query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Initializes the Theano shared variables. This should be called in the process where you want to do the updating. All further calls must be from the same process. The network.gparams must be created in the same process.
def initVars(self, network, net_param_deltas): assert not self.isInitialized self.pid = os.getpid() self.network = network if net_param_deltas is not None: self.update_on_device = True self.net_train_param_deltas = net_param_deltas else: self.update_on_device = False self.net_train_param_deltas = {p : theano.shared(numpy.zeros(p.get_value(borrow=True, return_internal_type=True).shape, dtype=theano.config.floatX)) for p in network.train_params_vars} " :type: dict[theano.compile.sharedvalue.SharedVariable,theano.compile.sharedvalue.SharedVariable] " self.learning_rate_var = theano.shared(value=numpy.cast[theano.config.floatX](0), name="learning_rate") " :type: theano.compile.sharedvalue.SharedVariable " self.i = self.var(numpy.float32(0 if self.reset_update_params else network.update_step), name="updater_i") self.e = self.var(numpy.float32(0 if self.reset_update_params else network.update_step), name="updater_epoch") if self.momentum > 0: self.deltas = {p: self.var(p, zero=True, name="momentum_deltas_%s" % p.name) for p in network.train_params_vars} if self.adagrad: self.accu = {p: self.var(p, zero=True, name="adagrad_accu_%s" % p.name) for p in network.train_params_vars} if self.adadelta or self.adamdelta: # http://arxiv.org/pdf/1212.5701v1.pdf self.eg2 = {p: self.var(p, zero=True, name="adadelta_eg2_%s" % p.name) for p in self.network.train_params_vars} #E[g^2] self.edx2 = {p: self.var(p, zero=True, name="adadelta_edx2_%s" % p.name) for p in self.network.train_params_vars} #E[\delta x^2] self.dx = {p: self.var(p, zero=True, name="adadelta_dx_%s" % p.name) for p in self.network.train_params_vars} #\delta x
[ "def initialise_theano_functions(self):\n\n gradient_wrt_W = theano.tensor.grad(cost=self.get_cost(), wrt=self.W)\n gradient_wrt_b = theano.tensor.grad(cost=self.get_cost(), wrt=self.b)\n updates = [\n (self.W, self.W - self.learning_rate * gradient_wrt_W),\n (self.b, self.b - self.learning_rate * gradient_wrt_b)\n ]\n index = theano.tensor.lscalar()\n batch_size = theano.tensor.lscalar()\n\n self.train_model_once = theano.function(\n inputs=[index, batch_size, self.modulation],\n outputs=self.get_cost(),\n updates=updates,\n givens={\n self.x: self.input_batch[index*batch_size:(index+1)*batch_size],\n self.y: self.output_batch[index*batch_size:(index+1)*batch_size]\n }\n )", "def initShared(value):\n return theano.shared(np.array(value).astype(np.float32))", "def initialize(self):\n # self.gc1.reset_parameters()\n # self.gc2.reset_parameters()\n nn.init.xavier_uniform_(self.gc2.fc.weight)", "def _initialize_train(self):\n self._train_input = acme_utils.prefetch(self._build_train_input())\n\n # Check we haven't already restored params\n if self._byol_state is None:\n logging.info(\n 'Initializing parameters rather than restoring from checkpoint.')\n\n # initialize Byol and setup optimizer state\n inputs = next(self._train_input)\n init_byol = jax.pmap(self._make_initial_state, axis_name='i')\n\n # Init uses the same RNG key on all hosts+devices to ensure everyone\n # computes the same initial state and parameters.\n init_rng = jax.random.PRNGKey(self._random_seed)\n init_rng = helpers.bcast_local_devices(init_rng)\n\n self._byol_state = init_byol(rng=init_rng, dummy_input=inputs)", "def _init_graph(self):\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self._init_network_variables()\n self._init_network_functions()", "def _setup_network(self):\n self.network = moose.LIF( 'network', self.N );\n moose.le( '/network' )\n self.network.vec.Em = self.el\n self.network.vec.thresh = self.vt\n self.network.vec.refractoryPeriod = self.refrT\n self.network.vec.Rm = self.Rm\n self.network.vec.vReset = self.vr\n self.network.vec.Cm = self.Cm\n if not noiseInj:\n self.network.vec.inject = self.Iinject\n else:\n ## inject a constant + noisy current\n ## values are set in self.simulate()\n self.noiseTables = moose.StimulusTable('noiseTables',self.N)\n moose.connect( self.noiseTables, 'output', \\\n self.network, 'setInject', 'OneToOne')", "def init(self):\n # Support both distributed and non-distributed training\n local_rank = os.environ.get(\"LOCAL_RANK\")\n if local_rank is not None:\n dist.init_process_group(\n \"nccl\", timeout=timedelta(seconds=self.nccl_timeout)\n )\n assert (\n th.cuda.is_available()\n ), \"CUDA must be available for distributed training\"\n th.cuda.set_device(self.local_rank)", "def _initialize_network(self):\n self.model = self._get_model()\n if hasattr(self.model, 'fc'):\n self.optimizer = optim.Adam(self.model.fc.parameters(), lr=self.config['learning_rate'])\n else:\n self.optimizer = optim.Adam(self.model.classifier.parameters(), lr=self.config['learning_rate'])\n self.model.to(self.device)", "def _init_optimizer(self, params=None):\n pass", "def _setup(self):\n self.graph_convolution_1 = GCNConv(self.number_of_features, self.args.first_gcn_dimensions)\n self.graph_convolution_2 = GCNConv(self.args.first_gcn_dimensions, self.args.second_gcn_dimensions)\n self.fully_connected_1 = torch.nn.Linear(self.args.second_gcn_dimensions, self.args.first_dense_neurons)\n self.fully_connected_2 = torch.nn.Linear(self.args.first_dense_neurons, self.args.second_dense_neurons)", "def init_gm(self):\n self.data_x = self.tik_instance.Tensor(self.y_grad_dtype,\n [self.y_grad_gm_size],\n name=\"data_x\",\n scope=tik.scope_gm)\n self.data_y_grad = self.tik_instance.Tensor(self.y_grad_dtype,\n [self.y_grad_gm_size],\n name=\"data_y_grad\",\n scope=tik.scope_gm)\n self.data_weight = self.tik_instance.Tensor(self.weight_dtype,\n [self.weight_gm_size],\n name=\"data_weight\",\n scope=tik.scope_gm)\n self.data_target = self.tik_instance.Tensor(self.target_dtype,\n [self.target_gm_size],\n name=\"data_target\",\n scope=tik.scope_gm)\n self.data_total_weight = self.tik_instance.Tensor(\n self.x_dtype, [self.data_total_weight_size],\n name=\"data_total_weight\", scope=tik.scope_gm)\n self.output = self.tik_instance.Tensor(self.x_dtype,\n [self.output_gm_size],\n name=\"output\",\n scope=tik.scope_gm)", "def _init_network_variables(self):\n\n # Here we defind placeholders ...\n with tf.variable_scope('input'):\n # ... for the input of the syndrome increments\n self.x = tf.placeholder(tf.float32,\n [None, None, self.dim_syndr],\n name='x_input')\n # ... for the input of the final syndrome increments\n self.fx = tf.placeholder(tf.float32, [None, self.dim_fsyndr],\n name='fx_input')\n # ... for the parity of the bitflips\n self.y = tf.placeholder(tf.float32, [None, 1], name='y_input')\n # ... for the number of stabilizer measurement cycles in a sequence\n self.length = tf.placeholder(tf.int32, [None], name='length_input')\n\n with tf.variable_scope('training_parameters'):\n # ... for the learning rate\n self.lr = tf.placeholder(tf.float32, name='learning_rate')\n # ... for the weighing of the auxillary head\n self.alf = tf.placeholder(tf.float32, name='aux_loss_factor')\n\n # ... for the dropout (keep probabilities)\n self.lstm_kp = tf.placeholder(tf.float32, name='lstm_keep_probability')\n self.ff_kp = tf.placeholder(tf.float32, name='ff_keep_probability')\n\n with tf.variable_scope('summary_placeholders'):\n # ... for the tensorboard summaries\n self.plog = tf.placeholder(tf.float32, name='plog_train')\n self.plog_aux = tf.placeholder(tf.float32, name='plog_aux_train')\n self.tot_cost = tf.placeholder(tf.float32, name='tot_cost')", "def setup_optimizer(self):\n # The statistical model of our objective function\n init_param_point = dict(zip(self.hyper_param_names,\n self.init_hyper_param))\n init_param_point.update(self.fixed_params_dict)\n init_train_error, init_test_error, init_exec_time = \\\n self.get_obj(init_param_point)\n init_X = np.expand_dims(np.array(self.init_hyper_param), axis=0)\n self.best_obj = init_test_error\n self.train_erro_gp = GPy.models.GPRegression(init_X,\n init_train_error,\n self.train_error_kernel,\n noise_var=\n self.noise_level ** 2)\n self.train_erro_gp.optimize()\n\n self.test_erro_gp = GPy.models.GPRegression(init_X,\n init_test_error,\n self.test_error_kernel,\n noise_var=\n self.noise_level ** 2)\n self.test_erro_gp.optimize()\n\n self.exec_time_gp = GPy.models.GPRegression(init_X,\n init_exec_time,\n self.exec_time_kernel,\n noise_var=\n self.noise_level ** 2)\n self.exec_time_gp.optimize()", "def __init__(self, n_inputs=1024, n_classes=10, n_hidden_nodes=100, alpha=0.1, lr=0.05, n_epoch=200,\n activation='sigmoid'):\n self.activation = activation\n self.n_epoch = n_epoch\n self.n_hidden_nodes = n_hidden_nodes\n self.n_inputs = n_inputs\n self.n_classes = n_classes\n\n # Initialize Weights & Theano variables & symbolic equations\n X = T.matrix('X')\n y = T.matrix('y')\n\n self.layers = [\n theano.shared(name=\"W_hidden\", value=floatX(np.random.rand(self.n_inputs, self.n_hidden_nodes) - 0.5)),\n theano.shared(name=\"W_output\", value=floatX(np.random.rand(self.n_hidden_nodes, self.n_classes) - 0.5))]\n\n self.lr = theano.shared(floatX(lr))\n self.alpha = theano.shared(floatX(alpha))\n\n if self.activation == 'sigmoid':\n self.fprop = T.dot(T.nnet.sigmoid(T.dot(X, self.layers[0])), self.layers[1])\n elif self.activation == 'relu':\n self.fprop = T.dot(T.nnet.relu(T.dot(X, self.layers[0])), self.layers[1])\n else:\n self.fprop = T.dot(T.dot(X, self.layers[0]), self.layers[1])\n\n self.regularization = 0.5 * self.alpha * T.sum(T.power(self.layers[0], 2)) + \\\n 0.5 * self.alpha * T.sum(T.power(self.layers[1], 2)) # TODO check L2 formula\n\n self.loss = T.mean((T.nnet.softmax(self.fprop) - y) ** 2) + self.regularization\n\n gradient_hidden = T.grad(cost=self.loss, wrt=self.layers[0])\n gradient_output = T.grad(cost=self.loss, wrt=self.layers[1])\n self.update = [(self.layers[0], self.layers[0] - gradient_hidden * self.lr),\n (self.layers[1], self.layers[1] - gradient_output * self.lr)]\n\n self.fit = theano.function(inputs=[X, y], outputs=self.loss, updates=self.update, allow_input_downcast=True)\n\n self.predict_ = theano.function(inputs=[X], outputs=T.argmax(T.nnet.softmax(self.fprop), axis=1),\n allow_input_downcast=True)", "def _init_network(self):\n self.dqn = Brain(self.backbone_cfg, self.head_cfg).to(self.device)\n self.dqn_target = Brain(self.backbone_cfg, self.head_cfg).to(self.device)\n self.loss_fn = build_loss(self.loss_type)\n\n self.dqn_target.load_state_dict(self.dqn.state_dict())\n\n # create optimizer\n self.dqn_optim = optim.Adam(\n self.dqn.parameters(),\n lr=self.optim_cfg.lr_dqn,\n weight_decay=self.optim_cfg.weight_decay,\n eps=self.optim_cfg.adam_eps,\n )\n\n # load the optimizer and model parameters\n if self.load_from is not None:\n self.load_params(self.load_from)", "def _build_theano_graph(self):\n # define the theano variables\n parameters = theano.shared(\n value=np.zeros(len(self.free_parameters), dtype=np.float),\n name='J',\n borrow=True # still not sure what this does\n )\n parameters.set_value(self.initial_values)\n # multiply variables with matrix coefficients. This takes each element\n # of `self.matrices` and converts into bigreal form (and to a numpy\n # array if it wasn't alredy).\n bigreal_matrices = self._get_bigreal_matrices()\n theano_graph = T.tensordot(parameters, bigreal_matrices, axes=1)\n # from IPython.core.debugger import set_trace; set_trace()\n return [parameters, theano_graph]", "def __init__(self, model_params, training_params):\n # Set model and training parameters as instance variables\n\n\n self._model_params = deepcopy(model_params)\n self._training_params = deepcopy(training_params)", "def updates(model, params, global_lr1, global_lr2, moment_param1, moment_param2):\n\n # cost_ele is used in training elementary params (theta)\n # cost_valid is used in hyper / validation set, hyper params is denoted in (gamma)\n cost_ele = model.trainCost + model.penalty\n cost_valid = model.trainCost\n\n # dC/dtheta\n dele_dtheta = T.grad(cost_ele, model.paramsT1)\n dvalid_dtheta_temp = T.grad(cost_valid, model.paramsT1)\n \n # optimizers\n optimizer1 = adam() if params.opt1 in ['adam'] else None\n optimizer2 = adam() if params.opt2 in ['adam'] else None\n update_ele = [] if optimizer1 is None else optimizer1.initial_updates()\n update_hyper = [] if optimizer2 is None else optimizer2.initial_updates()\n\n update_valid, dvalid_dtheta, dvalid_dgamma, temp_ups, track_ele, track_hyper = [], [], [], [], [], []\n history_ele = {'grad': dict(), 'up': dict()}\n history_hyper = {'grad': dict(), 'up': dict()}\n learn_params = [global_lr1, global_lr2, moment_param1, moment_param2]\n\n \"\"\"\n Updating T1 params\n\n \"\"\"\n for param, grad in zip(model.paramsT1, dele_dtheta):\n\n grad = scale_norm(remove_nans(grad), threshold=3.) \n ups, track, _ = update_fun(param, grad, 'T1',\n history_ele, optimizer1, learn_params, params)\n update_ele += ups\n track_ele += [track]\n\n \"\"\"\n Updating T2 params\n\n \"\"\"\n if params.useT2: \n\n \"\"\"\n Save grads C2T1 for the T2 update:\n \"\"\"\n for param, grad in zip(model.paramsT1, dvalid_dtheta_temp):\n\n grad = scale_norm(remove_nans(grad), threshold=3.)\n grad = clip_grad(grad, threshold=10.)\n save_grad = theano.shared(np.asarray(param.get_value() * 0., dtype='float32'),\n broadcastable=param.broadcastable,\n name='gradC2T1_%s' % param.name)\n update_valid += [(save_grad, grad)]\n dvalid_dtheta += [save_grad]\n\n \"\"\"\n If gradient dC2/dT1 is also estimated with adam\n \"\"\"\n if params.avC2grad in ['adam', 'momentum']:\n #dvalid_dtheta = T.grad(cost_valid, mlp.paramsT1)\n if params.avC2grad == 'adam': opt3 = adam()\n else: opt3 = None\n temp_ups = [] if opt3 is None else opt3.initial_updates()\n\n newC2 = []\n grad = scale_norm(remove_nans(grad), threshold=3.)\n grad = clip_grad(grad, threshold=10.)\n for param, grad in zip(model.paramsT1, dvalid_dtheta):\n temp_up, _, newGrad = update_fun(param, T.reshape(grad, param.shape), 'T1',\n history_hyper, opt3, learn_params, params)\n temp_ups += temp_up[:-1]\n newC2 += newGrad\n dvalid_dtheta = newC2\n\n paramsT2, dvalid_dgamma = hypergrad(model.paramsT1, model.paramsT2, dvalid_dtheta,\n model.trainCost, model.trainCost, model.penalty)\n\n for param, grad in zip(model.paramsT2, dvalid_dgamma):\n paramName, _ = param.name.split('_')\n if params.decayT2 > 0. and paramName not in ['L2', 'L1']:\n grad += params.decayT2*param \n\n grad = scale_norm(remove_nans(grad), threshold=3.) \n grad = clip_grad(grad, threshold=10.) \n temp_up, track, _ = update_fun(param, T.reshape(grad, param.shape),'T2',\n {}, optimizer2, learn_params, params)\n update_hyper += temp_up\n track_hyper += [track]\n print \"Parameters \",\n print \", \".join([p.name for p in model.paramsT2]),\n print \"are trained on hyper set\"\n \n # monitored variables for output \n if (not params.useT2) and params.trackGrads:\n debugs = track_ele\n elif params.trackGrads:\n debugs = track_ele + track_hyper\n else:\n debugs = []\n\n return update_ele, update_valid, update_hyper+temp_ups, debugs", "def initialize(self):\n # setting the seed\n #pdb.set_trace()\n\n # create tf session\n self.sess = tf.Session()\n\n # tensorboard stuff\n self.add_summary()\n # initiliaze all variables\n init = tf.global_variables_initializer()\n self.sess.run(init)\n\n if self.config.use_baseline:\n self.baseline_network.set_session(self.sess)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> x = [[4,8],[12,4],[4]] >>> ll_sum(x) 32 >>> y = [[1,1,2],[4,4],[112],[44]] >>> ll_sum(y) 168 >>> ll_sum([[1010],[502,301],[207,400],[24],[1500,500]]) 4444 >>> ll_sum([[100],[10],[2]]) 112 >>> ll_sum([[44444444444444444444444444444444,4040404040440404404040404040404]]) 48484848484884848848484848484848
def ll_sum(list): x = 0 for i in range(len(list)): x += sum(list[i]) return x
[ "def ll_sum(lists_int):\n a = 0\n for L in lists_int:\n for i in L:\n a += i\n return a", "def ll_sum(t):\n result = 0\n for x in t:\n if type(x) == list:\n result += ll_sum(x)\n else:\n result += x\n return result", "def compute_sum(input_list):\n res = 0\n total_len = 0\n for (w, l) in input_list:\n total_len += l\n res += w * total_len\n return res", "def vectorsum(x, y):\n return [ sum(vec) for vec in zip(x, y) ]", "def sum(xs):\r\n return reduce(add, xs)", "def _list_sum(l):\n return reduce(lambda x, y: x+y, l, 0)", "def sum_list_lsd(first_list: LinkedList, second_list: LinkedList) -> int:\n result = 0\n carry = 0\n order = 1\n\n current_0 = first_list.head\n current_1 = second_list.head\n\n while current_0 and current_1:\n result += ((current_0.data + current_1.data + carry) % 10) * order\n carry = (current_0.data + current_1.data + carry) // 10\n order *= 10\n current_1 = current_1.next\n current_0 = current_0.next\n\n for current in [current_1, current_0]:\n while current:\n result += ((current.data + carry) % 10) * order\n\n carry = current.data + carry // 10\n order *= 10\n current = current.next\n\n return result", "def list_total(l: list):\n return sum(l)", "def sum_list(l):\n return reduce(operator.add, l)", "def list_sum(l):\n # replace the line below with your code\n return float(sum(l))", "def sum_lists(my_list1, my_list2):\n sum1 = sum(my_list1)\n sum2 = sum(my_list2)\n return sum1 + sum2", "def a_very_big_sum(length, long_list):\n return sum(long_list)", "def vectorsum(x, y):\n s = []\n for xx, yy in zip(x, y):\n s.append(xx + yy)\n return s", "def vec_sum(a, b):\n return [va + vb for va, vb in zip(a, b)]", "def sum_list(numbers: list) -> int:\n\n return sum(numbers)", "def sum_lists(arg1,arg2):\n try:\n assert len(arg1) == len(arg2), 'Lists not of equal length'\n except AssertionError as e:\n print(e)\n exit()\n list_of_sums = [x+y for x,y in zip(arg1,arg2)]\n print(list_of_sums)\n return list_of_sums", "def Total(listlikeArg):\n return np.sum(np.array(listlikeArg),axis = 0)", "def fold_sum(xs): # pylint: disable=invalid-name\n return reduce(lambda x, y: x+y, xs)", "def sum_numbers(seq_seq):\n # ------------------------------------------------------------------\n # Done: 4. Implement and test this function.\n # Note that you should write its TEST function first (above).\n # ------------------------------------------------------------------\n total = 0\n for k in range(len(seq_seq)):\n for j in range(len(seq_seq[k])):\n total = seq_seq[k][j] + total\n return total" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> x = [1,9,8,12,65,4,4,36,88,9,112,69] >>> middle(x) [9, 8, 12, 65, 4, 4, 36, 88, 9, 112] >>> y = [10,4,'^w^','r10'] >>> middle(y) [4, '^w^'] >>> middle(['cut','show only me!','cut']) ['show only me!'] >>> middle(['r',10]) [] >>> middle([1]) [] >>> middle([]) []
def middle(list): new = list[1:-1] return new
[ "def middle(list):\n new_list = list[1:-1]\n return new_list", "def middle_index(x):\n if len(x) % 2 == 0:\n middle_index = len(x)/2 - 1\n else:\n middle_index = len(x)/2\n return int(middle_index)", "def middle(t):\n return t[1:-1]", "def find_middle(self, nums):\n middle = float(len(nums))/2\n\n # If even numer of nums\n if len(nums) % 2 == 0:\n return nums[int(middle)], nums[int(middle-1)]\n # If odd number of nums\n else:\n return nums[int(middle - .5)]", "def get_middle(left_pointer, right_pointer):\n return (left_pointer + right_pointer) // 2", "def middle(X, Y, N):\n if N % 2 != 0:\n return X\n else:\n return (int(N/2) * Y) + X + (int(N/2) * Y)", "def deleteMiddle(head):\n pass", "def choose_next_middle(self, lower, upper):\n pass", "def get_middle_element(A):\n idx, b = divmod(len(A), 2)\n if b == 0:\n idx -= 1\n return A[idx], idx", "def median(args):\n total_nums = len(args)\n middle = total_nums/2 \n avg_middle = (int(args[middle]) + int(args[middle - 1]))/float(2)\n if total_nums % 2 == 0:\n print avg_middle\n else:\n print int(args[middle])", "def find_middle_item(self):\n if self.head is None:\n return None\n current, temp = self.head, self.head\n counter = 0\n while current is not None:\n counter += 1\n current = current['next']\n mid_index = (counter+1)//2 # same when for example, counter = 5 and 6\n while counter > mid_index:\n temp = temp['next']\n counter -= 1\n return temp", "def median_of_three_index(left, right, items):\n if right - left < 2:\n return left\n else:\n mid = left + (right - left) // 2\n return sorted([left, right, mid], key=lambda idx: items[idx])[1]", "def delete_middle(linked_list):\n\n if linked_list.head is None:\n return linked_list\n\n if linked_list.length() == 1:\n linked_list.head = None\n return linked_list.head\n\n middle_element = find_middle(linked_list)\n linked_list.delete(middle_element)\n return linked_list", "def GetMiddleStr(self, content, startStr, endStr):\r\n if '[' in startStr:\r\n startStr = startStr.replace('[', '\\[')\r\n if ']' in endStr:\r\n endStr = endStr.replace(']', '\\]')\r\n patternStr = r'%s(.+?)%s' % (startStr, endStr)\r\n p = re.compile(patternStr)\r\n res = p.search(content).groups()\r\n return res[0]", "def mid_linkedlist(linked):\n l = [linked]\n while l[-1].next:\n l.append(l[-1].next)\n return l[len(l) // 2]", "def move_bound_to_middle(self, lower, middle, upper):\n if self.function(middle) * self.function(lower) < 0:\n return lower, middle\n elif self.function(middle) * self.function(upper) < 0:\n return middle, upper\n elif self.function(middle) == 0:\n return middle, middle\n else:\n print(\"ERROR: unexpected situation at BisectionSolver.solve()\")\n print(\"f({}) = {}\".format(lower, self.function(lower)))\n print(\"f({}) = {}\".format(upper, self.function(upper)))\n print(\"f({}) = {}\".format(middle, self.function(middle)))\n\n # Error Code\n return lower - 1, upper + 1", "def FindMiddleAbove(self, leftHTrial, rightHTrial):\n middleY = leftHTrial.ptOrigin[1] - ConnectorSeparation\n while middleY >= 0:\n middleHTrial = HorizontalTrial((leftHTrial.rightBound, middleY), self)\n self.pfd.HorizontalLineLimits(middleHTrial)\n \n if middleHTrial.OverlapsLine(rightHTrial):\n vLeftTrial = self.FindVerticalTrial(leftHTrial, middleHTrial)\n if vLeftTrial:\n vRightTrial = self.FindVerticalTrial(middleHTrial, rightHTrial)\n if vRightTrial:\n middleHTrial.leftBound = vLeftTrial.ptOrigin[0]\n middleHTrial.rightBound = vRightTrial.ptOrigin[0]\n return middleHTrial\n \n if middleHTrial.rightTop == None:\n middleY -= ConnectorSeparation\n else:\n point = self.SnapPoint((middleHTrial.rightTop, middleY))\n middleY = point[1] - 2*ConnectorSeparation\n\n return None", "def mid(fa,fb):\n g =(fa+fb)/2\n return g", "def delete_middle(self):\n\t\tif self.head is None:\n\t\t\traise ValueError(\"Cannot find an element in an empty list\")\n\n\t\tcurrent = self.head\n\t\tmid_index = 0\n\t\tcount = 0\n\t\taux = None\n\t\tmid = self.head\n\n\t\twhile current is not None:\n\t\t\tif mid_index < int(count/2):\n\t\t\t\taux = mid\n\t\t\t\tmid = mid.next\n\t\t\t\tmid_index += 1\n\t\t\tcount += 1\n\t\t\tcurrent = current.next\n\n\t\tif aux is None:\n\t\t\tself.head = self.head.next\n\t\telse:\n\t\t\taux.next = mid.next\n\n\t\tdel mid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> x = ['eh!?','middle','again?'] >>> chop(x) >>> x ['middle'] >>> y = ['chop','now','chop','now','chop','chop','chop'] >>> chop(y) >>> y ['now', 'chop', 'now', 'chop', 'chop'] >>> z = ['Mr.Stark','I',"don't",'feel','so','good','.'] >>> chop(z) >>> z ['I', "don't", 'feel', 'so', 'good'] >>> chop(z) >>> z ["don't", 'feel', 'so'] >>> chop(z) >>> z ['feel'] >>> chop(z) >>> z [] >>> o = ['King','Farewell','false king','bye'] >>> chop(o) >>> o ['Farewell', 'false king'] >>> t = ['my score is 100!!','Whoops! my score disappear!?','what happen??'] >>> chop(t) >>> t ['Whoops! my score disappear!?']
def chop(list): new = list[1:-1] list[0:len(list)] = new return None
[ "def chop(lst):\n del lst[0] # Removes the first element\n del lst[-1] # Removes the last element", "def chop(lst):\r\n del lst[0] # Removes the first element\r\n del lst[-1] # Removes the last element\r", "def chop(list):\n if len(list) >= 2:\n list.pop(0)\n list.pop(-1)\n return None", "def chop(t):\n t.pop(0)\n t.pop()", "def trim_suggestions(word,suggs,maxlen,calcdist=None):\r\n if calcdist is None:\r\n calcdist = levenshtein\r\n decorated = [(calcdist(word,s),s) for s in suggs]\r\n decorated.sort()\r\n return [s for (l,s) in decorated[:maxlen]]", "def chop(s: Sequence[Any], k: int) -> list[Sequence[Any]]:\n return [s[i:i+k] for i in range(0, len(s) - k, k)]", "def chop_noun_phrases(noun_phrases):\n words = []\n for noun_phrase in noun_phrases:\n words = words + noun_phrase.split(' ')\n return words", "def call_popped(self, call, head):\n pass", "def cut(x, y, threshold, keep=\"before\"):\n\n import numpy as np\n\n if np.shape(x) != np.shape(y):\n raise ValueError\n print(\"x and y must have same length.\")\n if np.asarray(x).ndim != 1:\n raise ValueError\n print(\"x and y must have dimension = 1.\")\n\n if [i for i in sorted(x)] == [i for i in x]:\n if threshold < x[0]:\n raise ValueError\n print(\"Your threshold is to low. Not cutting list.\")\n if threshold > x[-1]:\n raise ValueError\n print(\"Your threshold is to high. Not cutting list.\")\n for i, item in enumerate(x):\n if item > threshold:\n if keep == \"before\":\n return x[:i], y[:i]\n elif keep == \"after\":\n return x[i:], y[i:]\n elif [i for i in sorted(x, reverse=True)] == [i for i in x]:\n if threshold > x[0]:\n raise ValueError\n print(\"Your threshold is to high. Not cutting list.\")\n if threshold < x[-1]:\n raise ValueError\n print(\"Your threshold is to low. Not cutting list.\")\n for i, item in enumerate(x):\n if item < threshold:\n if keep == \"before\":\n return x[:i], y[:i]\n elif keep == \"after\":\n return x[i:], y[i:]\n else:\n raise ValueError(\n \"Your series x is not sorted. Sort it either ascending or descending.\"\n )", "def chop(text, pattern, instance=0):\n pieces = pattern.split(text)\n last_piece_in_before = instance*2\n if len(pieces) - 1 < last_piece_in_before + 2:\n return None\n counter = 0\n befores = []\n afters = []\n for piece in pieces:\n if counter <= last_piece_in_before:\n befores.append(piece)\n elif counter > last_piece_in_before + 1:\n afters.append(piece)\n counter += 1\n if not afters or not befores:\n raise StandardError('Doh')\n before = ''.join(befores)\n after = ''.join(afters)\n return before, after", "def show_magicians(magicians, great_magicians):\n while magicians:\n current_magician = 'Great ' + magicians.pop().title()\n great_magicians.append(current_magician)\n print(great_magicians)\n print(magicians)", "def chop_at_now(ttlist):\n ttlist = list(ttlist)\n if not ttlist:\n return []\n first = ttlist[0]\n now = first.__class__()\n return [ttval for ttval in ttlist if ttval <= now]", "def cut(b, l):\n assert len(b) >= l, (len(b), l)\n return b[:-l], b[-l:]", "def exclude_by_length(self):\n longer_words = []\n\n user_input = int(input('Exclude words shorter than: '))\n\n for item in self:\n if len(item) >= user_input:\n longer_words.append(item)\n\n return longer_words", "def remove_last_fruit(list):\n list.pop()\n display_list(list)\n return list", "def remove_trailing_conjs_and_puncs(self, s, conjunction_list):\r\n\r\n if len(s) <= 1:\r\n return s\r\n while True:\r\n neither_conj = 0\r\n if s[0] in conjunction_list:\r\n s = s[1:]\r\n neither_conj += 1\r\n if s[-1] in conjunction_list:\r\n s = s[:-1]\r\n neither_conj += 2\r\n if neither_conj == 0 or len(s) <= 1:\r\n break\r\n return s", "def front_x(t):\n initial = []\n for x in t:\n if x.startswith(\"x\"):\n initial.append(x)\n t.remove(x)\n initial.sort()\n t.sort()\n new = initial + t\n return new", "def middle(list):\n new_list = list[1:-1]\n return new_list", "def front_x(list):\n xlist = []\n olist = []\n for s in list:\n if s[0] == 'x' or s[0] == 'X':\n xlist.append(s)\n else:\n olist.append(s)\n xlist.sort()\n olist.sort()\n return xlist + olist" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> x = ['siam','ciao','xiao','nongt'] >>> front_x(x) ['xiao', 'ciao', 'nongt', 'siam'] >>> y = ['bandori','poppipa','roselia','paspale','afterglow','hellohappy'] >>> front_x(y) ['afterglow', 'bandori', 'hellohappy', 'paspale', 'poppipa', 'roselia'] >>> front_x(['coup_de_grace','phantasm','XD','x_mark']) ['XD', 'x_mark', 'coup_de_grace', 'phantasm'] >>> front_x(['BigB','smalls','x is small','X is big','something like that']) ['X is big', 'x is small', 'BigB', 'smalls', 'something like that'] >>> front_x(['sleepy','feel','I','really','zzzz~~~~','xxx then']) ['xxx then', 'I', 'feel', 'really', 'sleepy', 'zzzz~~~~']
def front_x(list): x_list = [] non_x = [] for i in range(len(list)): if (list[i])[0].lower() == 'x': x_list.append(list[i]) else: non_x.append(list[i]) x_list.sort() non_x.sort() x_list.extend(non_x) return (x_list)
[ "def front_x(list):\n xlist = []\n olist = []\n for s in list:\n if s[0] == 'x' or s[0] == 'X':\n xlist.append(s)\n else:\n olist.append(s)\n xlist.sort()\n olist.sort()\n return xlist + olist", "def front_x(t):\n initial = []\n for x in t:\n if x.startswith(\"x\"):\n initial.append(x)\n t.remove(x)\n initial.sort()\n t.sort()\n new = initial + t\n return new", "def subst(x, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return core.prefix(x, subst(x, y, core.rest(S)))\n else:\n return [x]\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), subst(x, y, core.rest(S)))\n else:\n return S", "def generate_fronts(zipped):\r\n fronts = list()\r\n zipped = list(zipped)\r\n zipped.sort(key=lambda x: -x[1])\r\n while zipped:\r\n best_secondary = -999999\r\n front = list()\r\n new_zipped = collections.deque()\r\n for individual in zipped:\r\n if individual[2] > best_secondary or (individual[2] == best_secondary and\r\n individual[1] == front[-1][1]):\r\n best_secondary = individual[2]\r\n front.append(individual)\r\n else:\r\n new_zipped.append(individual)\r\n fronts.append(front)\r\n zipped = new_zipped\r\n return fronts", "def front_back(a, b): \n \n a_split = math.ceil(len(a) / 2)\n b_split = math.ceil(len(b) / 2)\n \n return a[:a_split] + b[:b_split] + a[a_split:] + b[b_split:]", "def blend(func, original, mix):\r\n mixIndex = 0\r\n build = []\r\n mixlen = len(mix)\r\n for item in original:\r\n if mixIndex < mixlen and func(item):\r\n build.append(mix[mixIndex])\r\n mixIndex = mixIndex + 1\r\n else:\r\n build.append(item)\r\n if mixIndex < mixlen:\r\n build.extend(list(mix[mixIndex:]))\r\n return build", "def move_to_front(list_a, index):\n print \"MOVE\"\n element = list_a[index]\n del list_a[index]\n return [element] + list(list_a)", "def split_x_y(mylist, x, y):\r\n return", "def filter_by_prefix(strings: List[str], prefix: str) -> List[str]:\n#[SOLUTION]\n return [x for x in strings if x.startswith(prefix)]", "def pareto_front(y):\n nds = non_dominated_set(y, return_mask=False)\n return y[nds]", "def getWordsWithSameStart(word, wordList, n):\n wordst=[]\n\n for name in wordList:\n \n if name[:n]==word[:n]:\n wordst.append(name)#appends words with first n letters\n \n return wordst", "def mix_lists(list1, list2):\n\n #Find out which list is shorter and store that length as n\n n = min(len(list1), len(list2))\n\n master_string = \"\"\n\n for i in range(n): \n master_string += list1[i] + \" \"\n master_string += list2[i] + \" \"\n\n #Disregard other tweets if one list is longer than the other so that one user \n #doesn't take over the markov tweet.\n\n return master_string", "def first_strings():\n pass", "def interleave(x, xs):\n for pos in range(len(xs) + 1):\n yield xs[:pos] + [x] + xs[pos:]", "def join_freq_list(self, x, nextLot):\n if self.chainMode == ChainMode.CHARS:\n return [x + s for s in nextLot]\n elif self.chainMode == ChainMode.WORDS:\n return [[x] + s for s in nextLot]", "def filter_list(words, starting_letter):\n if starting_letter:\n startswith_filter = lambda w : w.startswith(starting_letter)\n filtered_list = filter(startswith_filter, words)\n else:\n filtered_list = words\n \n return filtered_list[randint(0,len(filtered_list) - 1)]", "def _mixed_keys(y, x, random_effects, categorical, continuous_random_effects, offset):\n\n out = [y] + [v for v in x] + [v for v in categorical]\n res = [v for v in random_effects] + [v for v in continuous_random_effects] + [v for v in offset]\n out += [vv for v in res for vv in v.split(':') if len(vv) > 0]\n return out", "def buildArtistList(minimum=2,search=\"\"):\r\n\r\n \r\n \r\n library = MpGlobal.Player.library\r\n if search != \"\":\r\n so = SearchObject(search);\r\n library = so.search(library)\r\n \r\n g = lambda x : [x,]\r\n h = lambda x : [ item.strip() for item in x.replace(',',';').replace('\\\\',';').replace('/',';').split(';') ]\r\n \r\n MpGlobal.Player.quickList = buildQuickList(library,minimum,MpMusic.ARTIST,g)\r\n MpGlobal.Player.quickList_Genre = buildQuickList(library,0,MpMusic.GENRE,h)\r\n # sort the resulting list and update the quick selection tab\r\n MpGlobal.Window.tab_quickselect.sortData()", "def prefix_bytes(listx):\n\n listy = []\n for item in listx:\n item = \"0x\" + item\n listy.append(item)\n return listy" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> even_only([1,4,9,7,11,15,4,37,69,4,21,33,4,99,87]) [4, 4, 4, 4] >>> even_only([0,5,2,9,4,7,6,87,44]) [0, 2, 4, 6, 44] >>> even_only([49,48,47,46,45,44,43,42,41,40]) [48, 46, 44, 42, 40] >>> even_only([11,22,33,44,55,66,77,88,99]) [22, 44, 66, 88] >>> even_only([99,88,77,66,55,44,33,22,11]) [88, 66, 44, 22]
def even_only(list): x = [] for i in range(len(list)): if list[i] %2 == 0: x.append(list[i]) return x
[ "def even_only(list):\n new_list = []\n for i in list:\n if i % 2 == 0:\n new_list.append(i)\n return new_list", "def all_even(number_list):\n\n # do the opposite of the above function (this time, find positive #s)\n\n even_elements = [num for num in number_list if num % 2 == 0]\n return even_elements", "def all_even(number_list):\n even_numbers = []\n for item in number_list:\n if item % 2 == 0:\n #modulo: if you can divide it by two and there is no remainder\n even_numbers.append(item)\n\n return even_numbers", "def filter_even(iterable):\n return imap(lambda i: iterable[i],filter(lambda i: i%2 == 0,range(len(iterable))))", "def extract_even_numbers_in_list(alist):\r\n result = []\r\n for elem in alist:\r\n if elem%2 == 0:\r\n result.append(elem)\r\n return result", "def filter_positive_even_numbers(numbers):\n return [number for number in numbers if is_positive(number) and is_even(number)]", "def filter_positive_even_numbers(numbers):\n\n positive_even_numbers = [x for x in numbers if x > 0 and not x % 2]\n\n return positive_even_numbers", "def all_odd(number_list):\n odd_numbers = []\n for item in number_list:\n if item % 2 != 0:\n #modulo: if you can divide it by two but there is a remainder\n odd_numbers.append(item)\n\n return odd_numbers", "def filter_even_numbers(numbers):\n lista_numeros = []\n\n for element in numbers:\n lista_numeros.append(element)\n\n for element in lista_numeros:\n if not element%2 == 0:\n numbers.pop(numbers.index(element))\n\n #numbers.sort()", "def filter_odd(iterable):\n return imap(lambda i: iterable[i],filter(lambda i: i%2 == 1,range(len(iterable))))", "def only_odds(numbers):\n\tresult = []\n\tfor i in numbers:\n\t\tif i%2 == 0:\n\t\t\tcontinue\n\t\telse:\n\t\t\tresult.append(i)\n\treturn result", "def all_odd(number_list):\n # Find only the odd elements (not evenly divisible by 2) and return them\n # as a list\n\n odd_elements = [num for num in number_list if num % 2 != 0]\n\n return odd_elements", "def evens_using_list_comprehension(count):\n return [i for i in range(count) if i % 2 == 0]", "def is_even(values):\n return False", "def even_number_of_evens(numbers):\n\n # Check to see if the list is empty\n if numbers == []:\n return False\n else:\n # Set a `number_of_evens` variable that will be incremented each time\n # an even number is found\n evens = 0\n \n # Iterate of over each item and if it's an even number, increment the\n # `evens` variable\n for number in numbers:\n if number % 2 == 0:\n evens += 1\n \n if evens == 0:\n return False\n else:\n return evens % 2 == 0", "def every_other_item(my_list):\n # return a slice of the list that skips every 2nd number\n\n every_other_item = my_list[::2]\n \n return every_other_item", "def squares_of_even_elements_in_odd_positions(list_arg):\n\n new_list = []\n for i in range(len(list_arg)):\n if i % 2 != 0 and list_arg[i] % 2 == 0:\n new_list.append(list_arg[i] ** 2)\n return new_list", "def even_odd_partition(arr: List[int]) -> List[int]:\n def swap(i, j):\n temp = arr[j]\n arr[j] = arr[i]\n arr[i] = temp\n\n i = 0\n j = len(arr) - 1\n while i < j:\n if arr[i] % 2 == 0:\n # pointer is satisfied so increment\n i += 1\n else:\n # we need to swap an odd element to the end of the array\n # the odd pointer gets what it wanted so decrement\n swap(i, j)\n j -= 1\n\n if arr[j] % 2 != 0:\n # pointer gets what it wanted so decrement\n j -= 1\n else:\n # we need to swap the even element to the front of the array\n # the even pointer gets what it wanted so increment\n swap(i, j)\n i += 1\n\n return arr", "def odd_int2(list1):\n\t\n\twhile len(list1) > 0:\n\t\tstart_len = len(list1)\n\t\tcurr_value = list1[0]\n\t\t\n\t\tlist1 = list(filter(lambda elem: elem != curr_value, list1))\n\t\t\n\t\tif (start_len - len(list1)) % 2 == 0:\n\t\t\treturn curr_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> love('i hate youuu!!!!!') 'i love youuu!!!!!' >>> love('i wanna sleep.') 'i love sleep.' >>> love('love hate love hate love hate love hate love') 'love hate love hate love hate love love love' >>> love('Everyone hate 10') 'Everyone love 10' >>> love('Everyone in jail') 'Everyone love jail'
def love(text): new1 = text.split() new1[-2] = 'love' new2 = ' '.join(new1) return new2
[ "def love():\n return \"Baby Don't hurt me!!\"", "def love(t):\n list_splitted = t.split()\n second_last = list_splitted[-2]\n changed = t.replace(second_last,\"love\")\n return changed", "def greet2(g,n):\n phrase = g+','+n+'!'\n print(phrase)\n print('How are you?')", "def lick():\n return \"*licks ice cream cone*\"", "def wut():\n return \"Whaddya mean, 'wut'? Humans...\"", "async def _love(self,ctx):\n mother = ctx.message.author\n\n if common.has_mon(str(mother)):\n pet = common.user_data['players'][str(mother)]['mon']\n\n love_msg = [\"*{0} glances at {1}, giving the desperate {2} just enough attention to feel something like love.*\\n{1} gained a little happiness.\".format(mother.mention, pet['name'], pet['type']),\n \"*{0} pats {1}, catching it off guard. It's not like {1} wanted to be patted, anyway...*\\n{1} gained a little happiness.\".format(mother.mention, pet['name'])]\n\n\n if common.add_hoh(pet, 'happy'):\n await self.bot.say(random.sample(love_msg,1)[0])\n else:\n await self.bot.say(pet['name'] + \" already knows how much you love them.<3\")", "def one_sentence():\n return \"Lorem ipsum dolor sit amet, consectetur adipiscing elit.\"", "def overall_feeling(mood):\n \n # Quote and parting words of advice for students based on \n # what mood they have chosen to be currently feeling, from:\n # happy, sad, frustrated, bored, overwhelmed, hopeful, excited,\n # relaxed, hungry, and silly.\n mood_happy = (\" \\n *** 'Create the highest, grandest vision possible for\" +\n \" your life, because you become what you believe.'\" +\n \" -Oprah Winfrey. \\n Never take your happiness for\" +\n \" granted. :D U r pawsome! *** \\n \")\n mood_sad = (\" \\n *** 'There are only two ways to live your life. One is\" +\n \" as though nothing is a miracle. The other is as though\" +\n \" everything is a miracle.' -Albert Einstein \\n When I\" +\n \" am sad, I ask hooman to play fetch with me. However,\" +\n \" I don't think know how effective that is for you.\" +\n \" Sadness is really a tough one, there are just so many\" +\n \" angles to it... if only I could make you feel better\" +\n \" with just one quote. This too shall pass, my\" +\n \" fur-end! *** \\n \")\n mood_frustrated = (\" \\n *** 'If you can't fly, then run, if you can't\" +\n \" run, then walk, if you can't walk, then crawl,\" +\n \" but whatever you do, you have to keep moving\" \n \" forward.' -Martin Luther King Jr. \\n\" +\n \" Frustration is extremely stressful, but keep\" +\n \" going! No need to terrier-self up about it.\" +\n \" The end is near! Soon you will find peace of\" +\n \" mind. I'm rooting for you! *** \\n \")\n mood_bored = (\" \\n *** 'The time is always right to do what is right.'\" + \n \" -Martin Luther King Jr. \\n Go out and get some\" +\n \" fresh air! Or take this time to educate yourself\" +\n \" on current worldwide issues. This is a perfect\" +\n \" op-paw-tunity! There is no such thing as being\" +\n \" overeducated! :D *** \\n \")\n mood_overwhelmed = (\" \\n *** Believe you can and you're halfway there.'\" +\n \" -Theodore Roosevelt \\n Don't stress\" +\n \" yourself out, Puppy believes in you! You have\" +\n \" so much pet-tential! :D *** \\n \")\n mood_hopeful = (\" \\n *** ' All of our dreams can come true if we have\" +\n \" the courage to pursue them.' -Walt Disney \\n\" +\n \" Anything is paw-sible! :-) *** \\n \")\n mood_excited = (\" \\n *** 'You're only given a little spark of madness.\" +\n \" You mustn't lose it.' -Robin Williams \\n Looks like\" +\n \" fun things are happening in your life! Must be\" +\n \" having the ulti-mutt time of your life!! :D *** \\n \")\n mood_relaxed = (\" \\n *** 'Rest and be thankful.' -William Wadsworth \\n\" +\n \" Good for you! Hope you live long and paws-per! :)\" +\n \" *** \\n \")\n mood_hungry = (\" \\n *** I see that you're hungry. I am always hungry, but\" +\n \" my hooman only feeds me three times a day. How\" +\n \" prepawsterous! I hope you realize you are lucky to\" +\n \" have such long legs and arms to walk to the fridge\" +\n \" and grab yourself some food! Might I recommend\" +\n \" pup-eroni pizza...? *** \\n \")\n mood_silly = (\" \\n *** 'Why did the man fall into the well? He couldn't\" +\n \" see that well!' \\n If you're feeling silly, you\" +\n \" probably like puns. Hope you got a good chuckle out\" +\n \" of that one! I thought it was howlarious! :D *** \\n \")\n \n # Based on what mood the student feels, will return the corresponding\n # statement through if statements.\n if mood == 'happy':\n return(mood_happy)\n elif mood == 'sad':\n return(mood_sad)\n elif mood == 'frustrated':\n return(mood_sad)\n elif mood == 'bored':\n return(mood_bored)\n elif mood == 'overwhelmed':\n return(mood_overwhelmed)\n elif mood == 'hopeful':\n return(mood_hopeful)\n elif mood == 'excited':\n return(mood_excited)\n elif mood == 'relaxed':\n return(mood_relaxed)\n elif mood == 'hungry':\n return(mood_hungry)\n elif mood == 'silly':\n return(mood_silly)", "def botfight(phenny, input):\n\n messages = [\"hits %s\", \"punches %s\", \"kicks %s\", \"hits %s with a rubber hose\", \"stabs %s with a clean kitchen knife\"]\n response = random.choice(messages)\n\n phenny.do(response % otherbot)", "def horatio(self, irc, msg, args):\n intros = ['looks like',\n 'sounds like',\n 'it appears that',\n 'on the other hand,',\n 'i guess you could say',\n 'you might want to consider that',]\n words = args\n if len(words) == 1:\n words = (' '.join(words)).split()\n r1 = u'( ಠ_ಠ) ' + choice(intros) + u' ' + words[0]\n irc.reply(r1.encode('utf-8', 'ignore'), prefixNick=False)\n irc.reply(u'( ಠ_ಠ)>--■-■'.encode('utf-8','ignore'), prefixNick=False)\n r2 = u'(-■_■) ' + ' '.join(words[1:])\n irc.reply(r2.encode('utf-8', 'ignore'), prefixNick=False)\n irc.reply('YEAAAAAAAAAAAAAAAAAAAAAAH', prefixNick=False)", "def verbing(some_text):\n string = \"\"\n if(not isinstance(some_text, str)):\n print(\"Error: only accepts strings\")\n return None\n elif(isinstance(some_text, int)):\n print(\"Error: only accepts strings\")\n return None\n elif (isinstance(some_text, float)):\n print(\"Error: only accepts strings\")\n return None\n elif(len(some_text) == 0):\n print(\"''\")\n return None\n else: \n words = some_text.split()\n for x in words:\n if len(x) >=3:\n words.insert(words.index(x), (x + \"ing\")) \n words.remove(x)\n elif len(x) < 3:\n words.insert(words.index(x),x + \"ly\" )\n words.remove(x)\n \n for x in words:\n string = string + x + ' '\n\n\n string = string[:-1]\n \n return string", "def say_hello(name):\n return 'Hello, {} '.format(name)", "def likes(names):\n\n name_len = len(names)\n if not name_len:\n return 'no one likes this'\n elif name_len == 1:\n return '{} likes this'.format(names[0])\n elif name_len == 2:\n return '{} and {} like this'.format(names[0], names[1])\n elif name_len == 3:\n return '{}, {} and {} like this'.format(names[0], names[1], names[2])\n else:\n return '{}, {} and {} others like this'.format(names[0], names[1], name_len - 2)", "def say_hello(self, message, args):\n if args.favorite_number is None:\n return f'Hello {args.name}.'\n else:\n return f'Hello {args.name}, I hear your favorite number is {args.favorite_number}.'", "def sentence_punctuation():\n check50.run(\"python3 readability.py\").stdin(\"Congratulations! Today is your day. You're off to Great Places! You're off and away!\").stdout(\"Grade\\D+3\", \"Grade 3\\n\").exit(0)", "def echo_word(word1, echo):\n words = word1 * echo\n print(words)\n return words", "def single_sentence_complex():\n check50.run(\"python3 readability.py\").stdin(\"Alice was beginning to get very tired of sitting by her sister on the bank, and of having nothing to do: once or twice she had peeped into the book her sister was reading, but it had no pictures or conversations in it, \\\"and what is the use of a book,\\\" thought Alice \\\"without pictures or conversation?\\\"\").stdout(\"Grade\\D+8\", \"Grade 8\\n\").exit(0)", "def hey(conversation):\n\tif type(conversation) != type(str):\n\t\traise ValueError(\"Conversation must be a valid string, not \" + str(type(conversation)))\n\t\n\tconversation = ''.join([i for i in conversation if not i in string.whitespace]) # remove whitespace\n\tif conversation == \"\":\n\t\treturn 'Fine. Be that way!'\n\t\n\tif conversation.upper() == conversation and\tnot [i for i in conversation if not i in string.digits and not i in string.punctuation] == []:\n\t\treturn 'Whoa, chill out!' \t\t\n\t\t# use pythons default caps function to deal with unicode, ETC. Convert every letter in the conversation to caps, \n\t\t# check if it is the same. Numbers and symbols aren't affected, nor should they be.\n\t\t# we also check to nake sure there are \"letters\" in the input (something not whitespace, digits, or punctuation) \n\t\t# otherwise we can't tell if the user is yelling and should assume user is not\n\n\telif conversation[-1] == \"?\": ## questions end with question marks\n\t\treturn 'Sure.'\n\telse:\n\t\treturn 'Whatever.' #default answer", "def hey(what):\r\n what = what.strip()\r\n what_alpha = ''.join([i for i in what if i.isalpha()])\r\n what_other = ''.join([i for i in what if not i.isalnum()])\r\n if what_alpha.isupper():\r\n return 'Whoa, chill out!'\r\n elif what == '':\r\n return 'Fine. Be that way!'\r\n elif what_other.endswith('?'):\r\n return 'Sure.'\r\n else:\r\n return 'Whatever.'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> centered_average([4,3,4]) 4.0 >>> centered_average([4,1,9,7,44,32,569,4444]) 110.83333333333333 >>> centered_average([1,23,456,78910]) 239.5 >>> centered_average([1,3,5,7,9,11]) 6.0 >>> centered_average([9,5,16,79,23]) 16.0
def centered_average(nums): nums.sort() nums.remove(nums[0]) nums.remove(nums[-1]) return sum(nums) / len(nums)
[ "def centered_average(nums):\n nums.sort()\n new_list = nums[1:-1]\n sum_list = sum(new_list)\n avg = sum_list/len(new_list)\n return avg", "def centered_average(nums):\n return (sum(nums) - max(nums) - min(nums)) // (len(nums) - 2)", "def find_average(input_list):\r\n return sum(input_list)/len(input_list)", "def centered_average(nums):\n ourMax= nums[0]\n ourMin= nums[0]\n for x in nums:\n if ourMax<x:\n ourMax=x\n elif ourMin>x:\n ourMin=x\n nums.remove(ourMin)\n nums.remove(ourMax)\n return int(sum(nums)/len(nums))\n \n #another way of doing the same thing!\n #nums.remove(max(nums))\n #nums.remove(min(nums))\n #return sum(nums)/len(nums)", "def get_avg(input_list):\n return sum(input_list)/len(input_list)", "def __calculate_average(self, list):\n return reduce(lambda x, y: x + y, list) / len(list)", "def mean_average_precision(predictions_list):\n return mean(map(average_precision, predictions_list))", "def mean(lst):\n return sum(lst) / float(len(lst))", "def average(x):\r\n assert len(x) > 0\r\n return float(sum(x)) / len(x)", "def average(data):\n return 1.0*sum(data)/len(data)", "def mean(num_list):\n\n return sum(num_list) / len(num_list)", "def calculate_mean_on_range(start, end, list) -> float64:\n return float(sum(list[start:end]) / (end - start))", "def mean(a):\n return sum(a) / float(len(a))", "def average(data):\n counts = len(data)\n total = sum(data)\n return total / counts", "def findAverage(self, lis):\n sumcolor = 0\n sumpixels = 0\n for i in range(0, len(lis)-1):\n sumcolor += i*lis[i]\n sumpixels += lis[i]\n return sumcolor / sumpixels", "def mean(*list: float) -> float:\n # Get number of list elements.\n size = len(list)\n # Get sum of list elements.\n total = 0\n for e in list:\n total += e\n\n # Get mean of list elements.\n return total / size", "def mean(num_list):\n\tlist_mean=sum(num_list)/len(num_list)\n\n\treturn list_mean", "def center_vertices_avg(self):\n vertex_array = np.array(self.vertices_)\n centroid = np.mean(vertex_array, axis = 0)\n vertex_array_cent = vertex_array - centroid\n self.vertices_ = vertex_array_cent.tolist()", "def average_precision(predictions):\n precisions = []\n correct_predictions = 0\n for i in range(len(predictions)):\n if predictions[i]:\n correct_predictions += 1\n precisions.append(correct_predictions / (i + 1))\n if precisions:\n #return sum(precisions) / len(precisions)\n return mean(precisions)\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> reverse_pair('lose or win') 'win or lose' >>> reverse_pair('he is she') 'she is he' >>> reverse_pair('i will sleep with you') 'you with sleep will i' >>> reverse_pair('run in the hallway') 'hallway the in run' >>> reverse_pair('back to the future') 'future the to back'
def reverse_pair(sentence): new = sentence.split(' ')[::-1] reversed = ' '.join(new) return reversed
[ "def reverse_pair(sentence):\n list = sentence.split()\n l = []\n for s in list:\n l = [s] + l\n return ' '.join(l)", "def reverse_pair(t):\n new = t.split()\n number = len(new)\n changed = []\n while number >=1:\n changed.append(new[number-1])\n number = number - 1\n changed = \" \".join(changed)\n return changed", "def print_reverse_pairs(wordlist):\n\n for word in wordlist:\n print (word[::-1])", "def main():\n t = []\n\n with open('words.txt') as fin:\n for line in fin.readlines():\n word = line.strip()\n t.append(word)\n \n for word in t:\n pair = reverse_pair(t, word)\n\n if not pair == -1:\n print(pair)", "def is_reverse_pair(s1, s2):\n \n return (s1[::-1] == s2)", "def reverse(name, surname):\n\n print(f\"{name[::-1]} {surname[::-1]}\")", "def reverse(self, s: List[str], l: int, r: int) -> None:\n while l < r:\n s[l], s[r] = s[r], s[l]\n l += 1\n r -= 1", "def reversible_pair(func1, func2, bind1={}, bind2={}):\n global _reverse\n # We need the function, since that's what's in the events\n func1 = getfunction(func1)\n func2 = getfunction(func2)\n _reverse[func1] = (func2, getargnames(func2), bind2)\n _reverse[func2] = (func1, getargnames(func1), bind1)", "def reverse(s):\n r = \"\".join(reversed(s))\n\n return r", "def reverseWords(self, s: List[str]) -> None:\n # reverse s\n l, r = 0, len(s) - 1\n while l < r:\n s[l], s[r] = s[r], s[l]\n l += 1\n r -= 1\n # reverse each word within s\n l, r = 0, 0\n while r < len(s):\n r += 1\n if r == len(s) or s[r] == ' ':\n i, j = l, r - 1\n while i < j:\n s[i], s[j] = s[j], s[i]\n i += 1\n j -= 1\n l = r + 1\n return None", "def string_reverse(s):\n if len(s) == 0:\n return s\n else:\n return string_reverse(s[1:]) + s[0]", "def word_flipper(our_string):\n\n word_list = our_string.split(\" \")\n\n for idx in range(len(word_list)):\n word_list[idx] = word_list[idx][::-1] # [index1:index2:step]\n\n return \" \".join(word_list)", "def reverse_words(sentence):\n return sentence.reverse()", "def mirrorWords( word_list, word_list_rev):\r\n if len(word_list) == 0:\r\n words_rev = ' '.join(word_list_rev)\r\n print(words_rev) #puts all the elements of the list in a string\r\n return words_rev\r\n\r\n else:\r\n word = word_list[0] \r\n print(word[::-1])\r\n word_list_rev.append(word[::-1])\r\n word_list.pop(0) #removes the first word so that when the funtion is called it starts with the second element\r\n\r\n return(mirrorWords(word_list, word_list_rev))", "def make_palindrome(s):\n return s[::-1]", "def pairname(insym, outsym):\n if insym == outsym:\n return(insym)\n else:\n return(insym + \":\" + outsym)", "def flip_case(phrase, to_swap):\n\n swap_test = {to_swap.lower(), to_swap.upper()}\n\n phrase_flip = [\n letter.swapcase() if letter in swap_test else letter for letter in phrase]\n\n return \"\".join(phrase_flip)", "def test_reversal(self):\n assert self.example_one.string_reversal() == \"Madam,I'm Adam\"[::-1]\n assert self.example_two.string_reversal() == \"howdy\"[::-1]\n assert self.example_three.string_reversal() == \"Third Times The ChArM\"[::-1]", "def ends_with_pair(s):\n if len(s) == 1:\n return 0\n elif len(s) == 0:\n return -1\n elif s[-2] == s[-1]:\n return len(s) - 2\n elif s[-2] != s[-1]:\n return len(s) - 1\n return -1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> match_ends(['lingering','underwear','pant','tshirt']) 1 >>> match_ends(['sis','bro','dad','mom','papa','mama']) 3 >>> match_ends(['no','match','for','you']) 0 >>> match_ends(['roar','howl','bite','slash']) 1 >>> match_ends(['lol','lul','orz','meme']) 2
def match_ends(strings): count = 0 for i in range(len(strings)): if strings[i][0].lower() == strings[i][-1]: count += 1 return count
[ "def match_ends(list):\n count = 0\n for s in list:\n s = s.lower()\n if len(s) >= 2:\n if s[0] == s[-1]:\n count += 1\n return count", "def match_ends(t):\n num = 0\n for x in t:\n if len(x) >= 2:\n if x.startswith(x[0]) == x.endswith(x[0]):\n num += 1\n return num", "def match_any_end_reason(self, match):\n pass", "def match_any_end_date(self, match):\n pass", "def _find_end_of_match(self, rest, token, start):\n score, end = 1, start\n last_match_type = None\n\n for char in rest:\n end = token.find(char, start+1)\n if end < 0:\n return None, None\n\n if end == start+1:\n # This is a sequential match. These matches are worth 2\n # points only.\n if last_match_type != \"sequential\":\n last_match_type = \"sequential\"\n score += 1\n elif not token[end-1].isalnum():\n # This character follows a non-alphanumeric character. This\n # match is worth 2 points only.\n if last_match_type != \"boundary\":\n last_match_type = \"boundary\"\n score += 1\n else:\n last_match_type = \"normal\"\n score += (end - start)\n\n start = end\n\n return score, start+1", "def match_end_date(self, start, end, match):\n pass", "def test_search_end(self):\n seq = \"GCCTGGAAAGGC\"\n filler = \"A\" * 50\n big_seq = filler*2 + seq\n motif = [(104, 'CTGGAAAG')]\n self.assertEqual(stem.search(motif, big_seq), [seq])", "def ends_with(n, lst=['1', '2', '5', '9']):\n n = str(n) if not isinstance(n, str) else n\n return n[-1] in lst", "def matches(word, word_list):\n new_word_list = []\n for item in word_list:\n new_word_list.append(item[0])\n matches=get_close_matches(word, new_word_list)\n return matches", "def has_ending(neg_idx, guesses, pattern, extra=0):\n num = len(pattern) + extra\n at_position = neg_idx == num - 1\n return at_position and has_pattern(guesses[-num:], pattern)", "def max_match_from_last_noun(entity1, entity2):\n e1_last_n = noun_index_before_comma(entity1)\n e2_last_n = last_noun_index(entity2, len(entity2.lemmas)-1)\n matched = []\n while e1_last_n >= 0 and e2_last_n >= 0 and \\\n entity1.lemmas[e1_last_n] == entity2.lemmas[e2_last_n]:\n matched.append(entity1.lemmas[e1_last_n])\n e1_last_n -= 1\n e2_last_n -= 1\n matched_lemmas = list(reversed(matched))\n return matched_lemmas", "def test_valid_matches(self):\n tags = [\"%dummy\", \"%dumdum\", \"%mudmud\"]\n for i, m in enumerate(self.matches_list(\"%dummy, %dumdum & %mudmud\")):\n self.assertEqual(m.group(0), tags[i])", "def getWordsWithSameEnd(word, wordList, n):\n wordst=[]#initializes the list\n \n\n for name in wordList:\n \n if word=='existt':\n name[-n:],word[-n:],n,'gghhh'\n \n if name[-n:]==word[-n:]:\n wordst.append(name)#appends words with last n letters\n return wordst", "def match(string1, list2):\n len1 = len(string1)\n len2 = len(list2)\n s1 = string1.lower()\n l2 = [i.lower() for i in list2]\n scores = []\n for i in range(len2):\n score = 0\n s2 = l2[i]\n for j in range(len1):\n if s2.find(s1[j]) != -1:\n score += 1\n part = s2.partition(s1[j])\n s2 = part[0] + part[2]\n scores.append(score)\n return scores.index(max(scores))", "def find_substrs12_endchars(sidestr,mainstr,substr1,substr2,delay1=0,delay2=0):\n ## don't use regular expressions re module, which finds only non-overlapping matches\n ## we want to find overlapping matches too.\n substr2len = len(substr2)\n substr1len = len(substr1)\n abs_idx1 = 0 ## mainstr is getting chopped, but we maintain abs index on sidestr\n while True:\n idx2 = mainstr.find(substr2)\n ## find returns -1 if substr2 not found\n if idx2 != -1:\n endcharidx2 = idx2+substr2len+delay2\n ### NOTE: abs_startidx1 is one earlier than definition!!! I think necessary for causality.\n ## put +1 below to switch to definition in Quinn et al 2010\n abs_startidx1 = abs_idx1 + endcharidx2 - substr1len-delay1\n if endcharidx2<len(mainstr): # mainstr Y has characters left?\n if abs_startidx1 >= 0: # sidestr X has sufficient chars before?\n ## sidestr has substr1 before the char to be returned? and mainstr is not over\n ## IMP: below if's first term is the only place directed info enters.\n ## Remove first term below and you get just the entropy of mainstr Y: VERIFIED.\n #print sidestr[abs_startidx1:abs_startidx1+substr1len], substr1, abs_startidx1\n if sidestr[abs_startidx1:abs_startidx1+substr1len]==substr1:\n yield mainstr[endcharidx2]\n else: # reached end of string\n break\n ## chop the mainstr just after the start of substr2,\n ## not after the end, as we want overlapping strings also\n mainstr = mainstr[idx2+1:]\n ## don't chop sidestr as substr1len may be greater than substr2len\n ## in the next iteration, idx2 will be relative, but for sidestr we maintain abs_idx1\n abs_idx1 += idx2+1\n else: # substr2 not found\n break", "def calculate_score(s1, s2, l1, l2, startpoint):\n\n matched = \"\" # to hold string displaying alignements\n score = 0\n\n\n for i in range(l2):\n if (i + startpoint) < l1:\n if s1[i + startpoint] == s2[i]: # if the bases match\n matched = matched + \"*\"\n score = score + 1\n else:\n matched = matched + \"-\"\n\n # some formatted output\n print(\".\" * startpoint + matched) \n print(\".\" * startpoint + s2)\n print(s1)\n print(score) \n print(\" \")\n\n return score, matched", "def get_nends(ifiles):\n import re\n\n # Determine if dataset contains paired-end data\n paired_end = False\n nends_status = 1\n for file in ifiles:\n if file.endswith('.R2.fastq.gz'):\n paired_end = True\n nends_status = 2\n break # dataset is paired-end\n\n # Check to see if both mates (R1 and R2) are present paired-end data\n if paired_end:\n nends = {} # keep count of R1 and R2 for each sample\n for file in ifiles:\n # Split sample name on file extension\n sample = re.split('\\.R[12]\\.fastq\\.gz', os.path.basename(file))[0]\n if sample not in nends:\n nends[sample] = 0\n\n nends[sample] += 1\n\n # Check if samples contain both read mates\n missing_mates = [sample for sample, count in nends.items() if count == 1]\n if missing_mates:\n # Missing an R1 or R2 for a provided input sample\n raise NameError(\"\"\"\\n\\tFatal: Detected pair-end data but user failed to provide\n both mates (R1 and R2) for the following samples:\\n\\t\\t{}\\n\n Please check that the basename for each sample is consistent across mates.\n Here is an example of a consistent basename across mates:\n consistent_basename.R1.fastq.gz\n consistent_basename.R2.fastq.gz\n\n Please do not run the pipeline with a mixture of single-end and paired-end\n samples. This feature is currently not supported within {}, and it is\n not recommended either. If this is a priority for your project, please run\n paired-end samples and single-end samples separately (in two separate output directories).\n If you feel like this functionality should exist, feel free to open an issue on Github.\n \"\"\".format(missing_mates, sys.argv[0])\n )\n\n return nends_status", "def _add_interval(\n self, interval: Interval, alignment: MSA, end: bool = False\n ) -> Optional[Interval]:\n if interval.type is IntervalType.Match:\n # The +1 is because we also extend the non_match interval\n if len(interval) < self.mml:\n try:\n last_non_match = self._pop(IntervalType.NonMatch)\n last_non_match.modify_by(0, len(interval) + 1)\n except IndexError:\n last_non_match = Interval(\n IntervalType.NonMatch, interval.start, interval.stop + 1\n )\n if end: # If this is final call, go to append the interval\n last_non_match.modify_by(0, -1)\n self._append(last_non_match)\n return last_non_match\n else:\n if len(self._match_intervals) > 0 and has_empty_sequence(\n alignment, (interval.start, interval.stop)\n ):\n # Pad interval with sequence to avoid empty alleles\n len_match = len(self._match_intervals[-1])\n if len_match - 1 < self.mml:\n # Case: match is now too small, converted to non_match\n self._match_intervals.pop()\n interval.modify_by(-1 * len_match, 0)\n if len(self._non_match_intervals) > 0:\n # Case: merge previous non_match with this non_match\n self._non_match_intervals[-1].modify_by(0, len(interval))\n return None\n else:\n self._match_intervals[-1].modify_by(0, -1)\n interval.modify_by(-1, 0)\n\n self._append(interval)\n return None", "def find_last_line_matching(lines, text, end):\r\n for i in range(end, 0, -1):\r\n line = lines[i].strip()\r\n if line == text:\r\n return i\r\n return -1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The privacy settings for the album
def getPrivacy(self): return self.base.get("privacy", [])
[ "def getPrivacy(self):\n return FacebookPrivacy(self.base.get(\"privacy\", []))", "def privacy(self):\n return 'all_users' if self.q(css=PROFILE_VISIBILITY_SELECTOR.format('all_users')).selected else 'private'", "def privacy(self, privacy):\n self.wait_for_element_visibility('select#u-field-select-account_privacy', 'Privacy dropdown is visible')\n\n if privacy != self.privacy:\n query = self.q(css=PROFILE_VISIBILITY_INPUT)\n select_option_by_value(query, privacy)\n EmptyPromise(lambda: privacy == self.privacy, f'Privacy is set to {privacy}').fulfill()\n self.q(css='.btn-change-privacy').first.click()\n self.wait_for_ajax()\n\n if privacy == 'all_users':\n self.wait_for_public_fields()", "def click_privacy_settings_btn(self):\n self.driver.click(\"privacy_settings_btn\")", "async def _viewperms(self, ctx: commands.Context):\n settings = await self.config.guild(ctx.guild).all()\n color = discord.Color.dark_purple()\n statuschannel = ctx.guild.get_channel(settings['statuschannel'])\n if settings['statuschannel']:\n statuschannel = statuschannel.mention\n if not settings['statuschannel']:\n statuschannel = \"Not Set\"\n try:\n embed = discord.Embed(\n title=f\"Permission Settings\",\n color=color,\n description=f\"**Full Access Role:** {settings['fullaccessrole']}\\n\"\n f\"**Mod Roles:** {settings['modroles']}\\n\"\n f\"**Mod Commands:** {settings['modcommands']}\\n\"\n f\"**Blacklisted Names:** {settings['badnames']}\\n\"\n f\"**Status Channel:** {statuschannel}\"\n )\n return await ctx.send(embed=embed)\n except KeyError:\n await ctx.send(f\"Setup permissions first.\")", "def show_privacy():\n return _run_speedify_cmd([\"show\", \"privacy\"])", "def save(self, *args, **kwargs):\n for picture in self.get_pictures():\n picture.is_protected = self.is_protected\n picture.save()\n for album in Album.objects.filter(parent_album__exact=self):\n album.is_protected = self.is_protected\n album.update()\n super().save(*args, **kwargs)", "def item_visibility_prefs(self):\n settings = QtCore.QSettings()\n if settings.value(const.PREF_VISIBILITY):\n return const.DEFAULT_HIDDEN_ITEMS\n\n size = settings.beginReadArray(const.PREF_VISIBILITY)\n visibility_prefs = {}\n for index in range(size):\n settings.setArrayIndex(index)\n index_key = settings.childKeys()[0]\n visibility_prefs.update({index_key: int(settings.value(index_key))})\n settings.endArray()\n return visibility_prefs", "def privacy(self):\n return render('/privacy.html')", "def privacy_policy():\r\n\treturn render_template(\"privacy_policy.html\", page_title=misc.page_title(\"pp\"))", "def get_folder_prefs(self,secret,throw_exceptions=True):\r\n\t\tparams = { 'method': 'get_folder_prefs', 'secret' : secret }\r\n\t\treturn self._request(params,throw_exceptions)", "def get_privacy_dict(obj, field_name):\n kwargs = {\n 'content_type': ContentType.objects.get_for_model(obj),\n 'object_id': obj.pk,\n }\n if field_name:\n kwargs.update({'field_name': field_name})\n else:\n kwargs.update({'field_name': ''})\n privacy_levels = PrivacyLevel.objects.all()\n try:\n selected_setting = PrivacySetting.objects.get(**kwargs)\n selected_level = selected_setting.level.clearance_level\n except PrivacySetting.DoesNotExist:\n selected_setting = None\n selected_level = getattr(\n settings, 'PRIVACY_DEFAULT_CLEARANCE_LEVEL', 1)\n return {\n 'field_name': field_name,\n 'privacy_levels': privacy_levels,\n 'selected_level': selected_level,\n 'selected_setting': selected_setting,\n }", "def album(self):\r\n return self.content.get('album')", "def get_embed_perms():\n return discord.Permissions(embed_links=True, read_messages=True)", "def preferences(self) -> Preferences:\r\n return self.__api_request('GET', '/api/v1/preferences')", "def getAvatarConfiguration(self): \n return self.__avatarConfiguration", "def extra_authorize_data(self) -> dict[str, Any]:\n return {\"scope\": \"basic devices_read\"}", "def get_block_public_access_configuration():\n pass", "def write_allow():\n return 'write-allow', PermissionConfig" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A view to return the FAQ page
def faq(request): return render(request, "faq.html")
[ "def test_faq_template(self):\n res = self.testapp.get('/faq', status=200)\n self.failUnless('Why is it that C3S wants me to sign?' in res.body)\n self.failUnless(\n 'Copyright 2013, OpenMusicContest.org e.V.' in res.body)", "def quiz():\n return render_template('questions.html')", "def quiz_view(request, quiz_id):\n quiz = get_object_or_404(Quiz, id=quiz_id)\n questions = quiz.question_set.all()\n return render_to_response('quiz/view_quiz.html', {\n \"quizobj\" : quiz,\n \"questions\" : questions,\n \"selected_button\" : \"details\",\n }, context_instance=RequestContext(request))", "def see_answers():\n return render_template('answers.html')", "def how_to(request):\n\n return render(request, 'howto.html', {})", "def question(request, id):\r\n q = Question.objects.get(pk = id)\r\n a = q.answer_set.filter(deleted = False).order_by('-created')[:20]\r\n if request.method == 'GET':\r\n aform = aforms.AnswerForm()\r\n payload = {'question':q, 'answers':a, 'aform':aform, }\r\n return render_to_response('moments/question.html', payload, RequestContext(request))", "def questionnaire_detail(request, quid):\r\n mq = get_object_or_404(Questionnaire, id=quid)\r\n\r\n template_name = 'alumni/memory/detail.html'\r\n\r\n return render(request, template_name, {'data': mq})", "def quizzes():\n return render_template('quiz.html')", "async def faq(self, ctx, *, query: str = None):\n if not hasattr(self, 'faq_entries'):\n await self.refresh_faq_cache()\n\n if query is None:\n return await ctx.send('https://discordpy.readthedocs.io/en/latest/faq.html')\n\n matches = fuzzy.extract_matches(query, self.faq_entries, scorer=fuzzy.partial_ratio, score_cutoff=40)\n if len(matches) == 0:\n return await ctx.send('Nothing found...')\n\n paginator = commands.Paginator(suffix='', prefix='')\n for key, _, value in matches:\n paginator.add_line(f'**{key}**\\n{value}')\n page = paginator.pages[0]\n await ctx.send(page)", "def about(request):\n\n\treturn render(request, 'about/about.html')", "def get_faq(self, code):\n endpoint = f'{self.endpoint_base}/faqs/{code}/'\n return self._api.search(endpoint)", "def view_question(request):\n if authenticated_userid(request) == None or 'user' not in request.session.keys():\n return HTTPFound(location='/')\n main = get_renderer('templates/master.pt').implementation()\n ###load the question number and test id###\n test_id = int(request.GET[\"id\"])\n quesiton_num = 1\n try:\n question_num = int(request.GET[\"question\"])\n except:\n question_num = 1\n\n ###load the test and it's questions and their answers from the database###\n dbsession = DBSession()\n test = dbsession.query(Test).filter(Test.id==test_id).first()\n if attempts_remaining(dbsession, test.id, request.session['user']['name']) <= 0:\n return HTTPFound(location='/') #if no more attempts left\n if (test.start_time - datetime.datetime.now()) > (datetime.timedelta(0)):\n return HTTPFound(location='/') #if it's too early to take\n if (test.end_time - datetime.datetime.now()) < (datetime.timedelta(0)):\n return HTTPFound(location='/') #if it's too late to take\n all_questions = dbsession.query(Question).filter(\n Question.test_id==test.id).all()\n total_questions = len(all_questions)\n for q in all_questions:\n \n if q.question_num == question_num:\n question = q\n\n ###create \"current_test\" in the session object###\n session = request.session\n user_choice = ''\n if \"current_test\" not in session.keys() or (\n session[\"current_test\"][\"name\"] != test.name):\n session[\"current_test\"] = {\"name\": test.name}\n\n ###load any previously selected answer to this question###\n if str(question_num) in session['current_test'].keys():\n user_choice = session['current_test'][str(question_num)]\n\n ###check if a question was submited and put the answer in the session###\n post = request.POST\n if 'review test' in post or 'next question' in post:\n controls = post.items()\n answer = \"na\"\n i = 0\n if question.question_type == \"shortAnswer\":\n for control in controls:\n if control[0] == 'answer':\n answer = str(control[1])\n if question.question_type == \"multipleChoice\":\n for control in controls:\n if control[0] == 'deformField1':\n answer = str(control[1])\n if question.question_type == \"selectTrue\":\n answer = []\n for control in controls:\n if control[0] == 'checkbox':\n answer.append(control[1])\n session[\"current_test\"][str(question_num)]=answer \n #store selected answer\n if 'next question' in post:\n return HTTPFound(location='/question?id='+str(test.id)+\n ';question='+str(question_num+1))\n if 'review test' in post: #check if it was the last question\n #if so redirect to the test's submit page\n return HTTPFound(location='/test?id='+str(test.id))\n\n ###create the question's form###\n if question.question_type == \"multipleChoice\":\n schema = create_multiple_choice_form(question, \n dbsession, user_choice)\n if question.question_type == \"selectTrue\":\n schema = create_select_all_form(question,\n dbsession, user_choice)\n if question.question_type == \"shortAnswer\":\n schema = create_short_answer_form(question, \n dbsession, user_choice)\n if question_num == total_questions: #check if this is the last question\n form = deform.Form(schema[0],\n buttons=('review test',))\n else:\n form = deform.Form(schema[0],\n buttons=('next question',))\n if question.question_type == \"shortAnswer\":\n return {\"test\":test,'form':form.render(schema[1]),\n 'link':'/test?id='+str(test.id), 'main': main} #if it's a short answer question (returns default value)\n return {\"test\":test,'form':form.render(), 'link':'/test?id='+str(test.id), 'main': main}", "def about(self):\n return render('/about.html')", "async def site_faq_alias(self, ctx: Context) -> None:\n await self.invoke(ctx, \"site faq\")", "def index(request):\n return render(request, \"encyclopedia/index.html\", {\"entries\": util.list_entries()})", "def help(self):\n return render('/help.html')", "def question(request):\n\n # first check if we should be showing the results page instead\n # (if they have answered all the questions)\n if 'show_results' in request.COOKIES.keys() and (\n request.COOKIES['show_results'] == 'yes'):\n return show_results(request)\n\n current_q = None\n visitor = None\n new_visitor = False\n\n if request.method == 'POST':\n # The user has submitted an answer to a question.\n # Get User ID\n if 'visitor' in request.COOKIES.keys():\n visitor = Visitor.objects.get(id=request.COOKIES['visitor'])\n else: # if this is a new visitor\n visitor = Visitor()\n visitor.save()\n new_visitor = True\n\n is_yes = request.POST.get(\"yesno\") == 'yes'\n is_skip = request.POST.get(\"yesno\") not in (\"yes\", \"no\")\n current_q = Question.objects.get(\n id=request.POST.get(\"qnumber\"))\n if not Answer.objects.filter(\n question=current_q.id, visitor=visitor\n ).exists() and not is_skip:\n # The user has not answered this question yet. Count the response.\n answer = Answer(yes=is_yes, question=current_q, visitor=visitor)\n answer.save()\n\n question = get_next_question(current_q.id if current_q else 0)\n if question:\n # Display the new question to the user.\n question_number = get_question_number(question.id)\n question_total = get_question_total()\n context = {\n 'question': question,\n 'percent_no': get_percent_no(question),\n 'question_number': question_number,\n 'question_total': question_total,\n }\n response = render(request, 'question.html', context)\n if new_visitor:\n response.set_cookie(\"visitor\", visitor.id)\n return response\n\n else:\n # We have iterated through all questions.\n # Set a cookie for question completion and display the results page.\n response = show_results(request)\n response.set_cookie('show_results', 'yes')\n if new_visitor:\n response.set_cookie(\"visitor\", visitor.id)\n return response", "def question_page(question_num):\n num_length = len(session[\"responses\"])\n if question_num != num_length:\n flash(\"Trying to access an invalid question!\", \"warning\")\n return redirect(f\"/questions/{num_length}\")\n question = satisfaction_survey.questions[question_num]\n return render_template(\"question.html\", question=question)", "def test_detail_view_with_a_future_question(self):\n future_question = self.create_question(question_text='Future question.', days=5)\n url = reverse('polls:detail', args=(future_question.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A view to return the Judging Criteria page
def judging_criteria(request): return render(request, "judging-criteria.html")
[ "def devderby_rules(request):\n return jingo.render(request, 'demos/devderby_rules.html', {})", "def as_html(self, request, position):\n\n categories = []\n self_categories = self.categories.all()\n for category in Category.objects.all():\n if category in self_categories:\n selected = True\n else:\n selected = False\n\n categories.append({\n \"id\": category.id,\n \"name\": category.name,\n \"selected\": selected,\n \"level\": category.level,\n })\n\n return render_to_string(\"manage/criteria/category_criterion.html\",\n RequestContext(request, {\n \"id\": \"ex%s\" % self.id,\n \"operator\": self.operator,\n \"value\": self.value,\n \"position\": position,\n \"categories\": categories,\n \"content_type\": self.content_type,\n \"types\": CriterionRegistrator.items(),\n }))", "def show_impact():\n url_for('static', filename='zips_us_topo.json')\n url_for('static', filename='pov_levels.csv')\n query = modelsession.query(model.Project).order_by(func.random()).limit(1000).all()\n #creates a json object to be passed to javascript\n json_list = []\n for i in range(len(query)):\n json_list.append({\"id\": query[i].id,\n \"title\": query[i].title, \n \"latitude\": query[i].school.latitude,\n \"longitude\": query[i].school.longitude,\n \"location\": query[i].school.city + \", \" + query[i].school.state,\n \"grade\": query[i].grade_level,\n \"needs\": query[i].fulfillment_trailer})\n results = json.dumps(json_list)\n print \"results sending\"\n return render_template(\"impact.html\", results=results)", "def ProjectGrooming(request):\n kwargs = {\n 'storyType': 'Project Grooming',\n }\n myord = [\n 'track',\n 'module',\n 'rallyNumber',\n ]\n story=Story.objects.filter(**kwargs).order_by(*myord)\n c = {'story': story,\n 'header': 'Project grooming: ' + _text(len(story)),\n 'exception': 'No project grooming stories'}\n return render(request,'radabo/grooming.html',c)", "def objective():\n return render_template('objective.html')", "def as_html(self, request, position):\n\n compositions = CompositionCategory.objects.filter(criterion=self)\\\n .values('amount',\n 'category')\n formset = CompositionCategoryFormSet(initial=compositions)\n\n template = \"manage/criteria/composition_category_criterion.html\"\n return render_to_string(template, RequestContext(request, {\n \"id\": \"ex%s\" % self.id,\n \"operator\": self.operator,\n \"position\": position,\n \"compositions\": compositions,\n \"formset\": formset,\n \"content_type\": self.content_type,\n \"types\": CriterionRegistrator.items(),\n }))", "def terms_and_conditions_view(request, *args, **kwargs):\n return render(request, \"terms_and_conditions.html\")", "def expenses_view():\n expense = None\n expenses_tab = expenses_table()\n form = ExpenseViewForm()\n form.category.choices = category_choice()\n if form.validate_on_submit():\n if form.category.data != \"\" and form.expense_type.data != \"\": # when user select category and type\n expense = Expenses.query.filter_by(budget_id=selected_budget()). \\\n filter_by(category=form.category.data). \\\n filter_by(expense_type=form.expense_type.data).all()\n elif form.category.data != \"\": # when user select category only\n expense = Expenses.query.filter_by(budget_id=selected_budget()). \\\n filter_by(category=form.category.data).all()\n elif form.expense_type.data != \"\": # when user select type only\n expense = Expenses.query.filter_by(budget_id=selected_budget()). \\\n filter_by(expense_type=form.expense_type.data).all()\n expenses_tab = expenses_table(expense)\n return render_template('expenses_view.html', form=form, expenses_tab=Markup(expenses_tab))\n\n return render_template('expenses_view.html', form=form, expenses_tab=Markup(expenses_tab))", "def query():\n return render_template(\"dashboard/query.html\", tagname = 'query', form = QueryForm())", "def producers(request):\n producers = Producer.objects.filter(highlight=True)\n context = {\n 'producers': producers,\n }\n\n template = ('producers/producers.html')\n return render(request, template, context)", "def search(request):\n req_id = None\n try:\n req_id = int(request)\n except ValueError:\n req_id = None\n\n sql_str = \"title='{req}' OR description='{req}'\"\n if req_id:\n sql_str = \"id={req} OR title='{req}' OR description='{req}'\"\n sql_str = sql_str.format(req=request)\n res = db.select(\"CMPS\", sql_str)\n\n return res", "def createQueryDisplayString(self, criteria):\n query = self.createQuery(criteria)\n return str(query)", "def view_set_listings(request):\n return", "def index():\n results = models.Journal.select()\n return render_template('index.html', journals=results)", "def theory():\n return render_template('theory.html')", "def Priority(request):\n\n header = \"Prioritization and grooming status\"\n exc = \"Something has gone horribly wrong!\"\n\n # Get everything not prioritized\n kwargs = {\n 'status': 'B',\n 'ready': 'N',\n }\n myOrd = [\n '-storyType',\n '-businessValue',\n 'theme',\n 'rallyNumber',\n ]\n snp = Story.objects.filter(**kwargs).order_by(*myOrd)\n\n # Get prioritized enhancements not yet groomed\n kwargs = {\n 'status': 'B',\n 'ready': 'Y',\n 'storyType': 'Enhancement',\n }\n myOrd = [\n '-businessValue',\n 'theme',\n 'rallyNumber',\n ]\n eng = Story.objects.filter(**kwargs).order_by(*myOrd)\n\n # Get prioritized project groomings not yet completed - this takes two steps\n # 1. Status = Backlog, Ready = Yes\n # 2. Status in Defined, In-Progress\n\n # 1. Update storyType, keep same ordering\n kwargs['storyType'] = 'Project Grooming'\n pgns = Story.objects.filter(**kwargs).order_by(*myOrd)\n\n # 2.\n kwargs = {\n 'status__in': ['D','P'],\n 'storyType': 'Project Grooming',\n }\n pgip = Story.objects.filter(**kwargs).order_by(*myOrd)\n\n # Merge and sort the two project lists with the enhancements not groomed.\n inprogress = sorted(\n chain(eng, pgns, pgip),\n key=lambda x: x.status_sort())\n\n c = {\n 'stories_not_started': snp,\n 'stories_not_done': inprogress,\n 'header': header,\n 'exception': exc,\n 'story': 'Y', # Ugly hack to make export.html show the export button!\n }\n return render(request, 'radabo/priority.html', c)", "def index(request):\n oils_list = EssentialOil.objects.all().order_by('name')\n metabolites_list = Metabolite.objects.all().order_by('name')\n # For storing the retrieved results.\n result = {}\n if request.method == 'POST':\n # If submit button for oil was pressed-\n # Retrieve all related metabolites.\n if 'oil' in request.POST:\n result['oil'] = EssentialOil.objects.get(name=request.POST['oil'])\n result['metabolites'] = Through.objects.filter(oil=result['oil']).order_by('metabolite__name')\n # for metabolite in result['metabolites']:\n # through['metabolite'] = Through.objects.get()\n # If submit button for metabolite was pressed-\n # Retrieve all related oils.\n if 'metabolite' in request.POST:\n result['metabolite'] = Metabolite.objects.get(name=request.POST['metabolite'])\n result['oils'] = Through.objects.filter(metabolite=result['metabolite']).order_by('oil__name')\n return render(request, 'index.html', {'oils_list': oils_list, 'metabolites_list': metabolites_list, 'result': result})", "def termsConditions(request):\n return render(request, 'termsconditions.html')", "def search_result_view(request):\n\n job_list = Job.objects.order_by('-timestamp')\n\n # Keywords\n if 'job_title_or_company_name' in request.GET:\n job_title_or_company_name = request.GET['job_title_or_company_name']\n\n if job_title_or_company_name:\n job_list = job_list.filter(title__icontains=job_title_or_company_name) | job_list.filter(\n company_name__icontains=job_title_or_company_name)\n\n # location\n if 'location' in request.GET:\n location = request.GET['location']\n if location:\n job_list = job_list.filter(location__icontains=location)\n\n # Job Type\n if 'job_type' in request.GET:\n job_type = request.GET['job_type']\n if job_type:\n job_list = job_list.filter(job_type__iexact=job_type)\n\n # job_title_or_company_name = request.GET.get('text')\n # location = request.GET.get('location')\n # job_type = request.GET.get('type')\n\n # job_list = Job.objects.all()\n # job_list = job_list.filter(\n # Q(job_type__iexact=job_type) |\n # Q(title__icontains=job_title_or_company_name) |\n # Q(location__icontains=location)\n # ).distinct()\n\n # job_list = Job.objects.filter(job_type__iexact=job_type) | Job.objects.filter(\n # location__icontains=location) | Job.objects.filter(title__icontains=text) | Job.objects.filter(company_name__icontains=text)\n\n paginator = Paginator(job_list, 10)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = {\n\n 'page_obj': page_obj,\n\n }\n return render(request, 'jobapp/result.html', context)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A view to return the Plagiarism Policy page
def plagiarism_policy(request): return render(request, "plagiarism-policy.html")
[ "def privacy_policy_view(request, *args, **kwargs):\n return render(request, \"privacy_policy.html\")", "def privacy_policy():\r\n\treturn render_template(\"privacy_policy.html\", page_title=misc.page_title(\"pp\"))", "def policy():\n return render_template('dashboard/policy.html', tagname = 'policy')", "def objective():\n return render_template('objective.html')", "def privacy(self):\n return render('/privacy.html')", "def advisors(request):\n return render(request, \"advisors.html\", {})", "def plan_proposal(request, project_slug, proposal_slug):\n try:\n org_settings = OrganizationSetting.on_site.all()[0]\n except IndexError:\n org_settings = {}\n\n try:\n proposal = Proposal.objects.select_related().get(\n project__slug=project_slug,\n slug=proposal_slug)\n except Proposal.DoesNotExist:\n raise Http404\n \n proposal_image = Image.objects.filter(proposal = proposal.id)\n\n return render_to_response('proposal_feedback.html',\n {'proposal_details': proposal,\n 'proposal_image': proposal_image,\n 'project_name' : project_slug,\n 'proposal_name' : proposal_slug,\n 'org_settings': org_settings},\n context_instance = RequestContext(request))", "def show_policy_mitigation(self,policy_name):\n self.show_policy_basic(policy_name)\n panel_xpath = u\"//a[starts-with(@class,'submenu_')]//span[normalize-space(.)='%s']\" % u\"Mitigation設定\"\n self._selenium.wait_until_page_contains_element(panel_xpath)\n self._selenium.click_element(panel_xpath)\n time.sleep(1)\n self._selenium.wait_until_page_contains_element(u\"//button[.='キャンセル']\")\n self.verbose_capture()\n BuiltIn().log(\"Showed mitigation setting of the policy `%s`\" % policy_name)", "def petition(request):\n return render(request, 'base/petition.html')", "def professor_dashboard(request):\n assignments = list(Assignment.objects.filter(owner = request.user.professor).all())\n\n return render_to_response(\"professor/index.html\", \\\n {'user': request.user, 'assignments': assignments})", "def theory():\n return render_template('theory.html')", "def show_approvals(request):\r\n \r\n result_limit = getattr(settings, 'UPLOADED_SONG_COUNT', 150)\r\n songs = SongApprovals.objects.order_by('-approved')[:result_limit]\r\n \r\n return render_to_response('webview/recent_approvals.html', { 'songs': songs , 'settings' : settings }, context_instance=RequestContext(request))", "def payslips(request):\n\n return render(request, 'payslips/payslips.html')", "def PLATFORM_OBJECTIVE_VIEW_RESPONSE(self):\n\t\treturn \"\"\"Here are your objectives (Redirecting to the radar chart page).\n\t\t Click on any objective to update them.\"\"\"", "def makePolicy(mdp,Q):\r\n # A policy is an action-valued dictionary P[s] where s is a state\r\n P = dict()\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n ### YOUR CODE HERE\r\n return P", "def review(self, request, access_type,\n page_name=None, params=None, **kwargs):\n\n try:\n entity = self._logic.getFromKeyFieldsOr404(kwargs)\n except out_of_band.Error, error:\n return helper.responses.errorResponse(\n error, request, template=params['error_public'])\n\n # get the context for this webpage\n context = responses.getUniversalContext(request)\n responses.useJavaScript(context, params['js_uses_all'])\n context['page_name'] = '%s \"%s\" from %s' % (page_name, entity.title,\n entity.scope.name())\n context['entity'] = entity\n context['entity_type'] = params['name']\n context['entity_type_url'] = params['url_name']\n if self._show_review_not_appeared_msg:\n context['header_msg'] = self.DEF_REVIEW_NOT_APPEARED_MSG\n self._show_review_not_appeared_msg = False\n\n # get the roles important for reviewing an application\n filter = {\n 'user': user_logic.logic.getCurrentUser(),\n 'scope': entity.org,\n 'status': 'active'\n }\n\n org_admin_entity = org_admin_logic.logic.getForFields(filter, unique=True)\n mentor_entity = mentor_logic.logic.getForFields(filter, unique=True)\n\n # decide which form to use\n if org_admin_entity:\n # create a form for admin review\n # it is done here, because of the dynamic choices list for the mentors\n\n # firstly, get the list of all possible mentors to assign\n choices = []\n choices.append(('', 'No mentor'))\n\n # prefer those mentors who volunteer to mentor this proposal \n filter = {\n '__key__': entity.possible_mentors\n }\n order = ['name_on_documents']\n possible_mentors = mentor_logic.logic.getForFields(filter, order=order)\n for mentor in possible_mentors:\n choices.append((mentor.link_id, mentor.document_name()))\n\n # also list the rest of the mentors\n filter = {\n 'scope': entity.org\n }\n all_mentors = mentor_logic.logic.getForFields(filter, order=order)\n for mentor in all_mentors:\n if mentor.key() in entity.possible_mentors:\n continue\n choices.append((mentor.link_id, mentor.document_name()))\n\n dynafields = [\n {'name': 'rank',\n 'base': forms.IntegerField,\n 'label': 'Set to rank',\n 'help_text':\n 'Set this proposal to the given rank (ignores the given score)',\n 'min_value': 1,\n 'required': False,\n 'passthrough': ['min_value', 'required', 'help_text'],\n },\n {'name': 'mentor',\n 'base': forms.ChoiceField,\n 'passthrough': ['initial', 'required', 'choices'],\n 'label': 'Assign Mentor',\n 'choices': choices,\n 'required': False,\n 'help_text': 'Choose the mentor you would like to assign to this '\n 'Proposal. Choose \"No mentor\" if you don\\'t want any '\n 'mentor assigned.'\n },\n ]\n\n dynaproperties = params_helper.getDynaFields(dynafields)\n dynaproperties['clean_comment'] = cleaning.clean_html_content('comment')\n\n form = dynaform.extendDynaForm(\n dynaform=params['mentor_review_form'], \n dynaproperties=dynaproperties)\n\n else:\n # the current user is not an org admin\n if entity.org.scoring_disabled:\n # reviews are disabled, don't show score field\n form = params['locked_review_form']\n else:\n # reviews are enabled, show the score field\n form = params['mentor_review_form']\n\n if request.method == 'POST':\n return self.reviewPost(request, context, params, entity,\n form, org_admin_entity, mentor_entity, **kwargs)\n else:\n # request.method == 'GET'\n return self.reviewGet(request, context, params, entity,\n form, org_admin_entity, mentor_entity, **kwargs)", "def manage_assessments():\n if not validate_using_assessment():\n return redirect(redirect_url())\n\n return render_template('admin/presentations/manage.html')", "def primerDetailsPage(params):\n # retrieve batch information\n batchId, pamId, pam = params[\"batchId\"], params[\"pamId\"], params[\"pam\"]\n setupPamInfo(pam)\n\n inSeq, genome, pamSeq, position, extSeq = readBatchParams(batchId)\n seqLen = len(inSeq)\n batchBase = join(batchDir, batchId)\n\n guideSeq, pamSeq, pamPlusSeq, guideSeqWPam, guideStrand, guideSeqHtml, guideStart, guideEnd \\\n = findGuideSeq(inSeq, pam, pamId)\n\n # search for restriction enzymes that overlap the mutation site\n allEnzymes = readEnzymes()\n mutEnzymes = matchRestrEnz(allEnzymes, guideSeq.upper(), pamSeq.upper(), pamPlusSeq)\n\n # create a more human readable name of this guide\n guidePos = int(pamId.strip(\"s+-\"))+1\n guideStrand = pamId[-1]\n if guideStrand==\"+\":\n primerGuideName = str(guidePos)+\"forw\"\n else:\n primerGuideName = str(guidePos)+\"rev\"\n\n # primer helper\n print \"\"\"\n <style>\n table.primerTable {\n border-width: 1px;\n border-color: #DDDDDD;\n border-collapse: collapse;\n }\n table.primerTable td {\n border-width: 1px;\n border-color: #DDDDDD;\n border-collapse: collapse;\n }\n\n table.libTable td {\n border-width: 1px;\n table-layout: fixed;\n border-collapse: collapse;\n }\n table.libTable td {\n border-color: #DDDDDD;\n }\n </style>\n \"\"\"\n\n # output the page header\n print '''<div style='width: 80%; margin-left:10%; margin-right:10%; text-align:left;'>'''\n printBackLink()\n print \"<h2>\"\n if batchName!=\"\":\n print batchName+\":\"\n print \"Guide sequence: %s</h2>\" % (guideSeqHtml)\n\n print(\"Contents:<br>\")\n print(\"<ul>\")\n print(\"<li><a href='#cloning'>Cloning or expression of guide RNA</a>\")\n print(\"<ul><li><a href='#t7plasmid'>T7 <i>in vitro</i> expression from a plasmid</a></li></ul>\")\n print(\"<ul><li><a href='#t7oligo'>T7 <i>in vitro</i> expression from overlapping oligonucleotides</a></li></ul>\")\n print(\"<ul><li><a href='#u6plasmid'>U6 expression from an Addgene plasmid</a></li></ul>\")\n print(\"<ul><li><a href='#ciona'>Direct PCR for <i>C. intestinalis</i></a></li></ul>\")\n print(\"<ul><li><a href='#gibson'>Lentiviral vectors: Cloning with Gibson assembly</a></li></ul>\")\n print(\"<ul><li><a href='#primerSummary'>Summary of main cloning/expression primers</a></li></ul>\")\n print(\"<li><a href='#ontargetPcr'>PCR to amplify the on-target site</a></li>\")\n if len(mutEnzymes)!=0:\n print(\"<li><a href='#restrSites'>Restriction sites for PCR validation</a></li>\")\n print(\"<li><a href='#offtargetPcr'>PCR to amplify off-target sites</a></li>\")\n print(\"<li><a href='#satMut'>Saturating mutagenesis using all guides</a></li>\")\n print(\"</ul>\")\n print(\"<hr>\")\n\n printCloningSection(batchId, primerGuideName, guideSeq, params)\n print \"<hr>\"\n\n targetSeq, guideStartOnTarget, guideEndOnTarget = printValidationPcrSection(batchId, genome, pamId, position, params, \\\n guideStart, guideEnd, primerGuideName, guideSeq)\n print \"<hr>\"\n\n if len(mutEnzymes)!=0:\n printEnzymeSection(mutEnzymes, targetSeq, guideSeqWPam, guideStartOnTarget, guideEndOnTarget)\n print \"<hr>\"\n\n print(\"<h2 id='offtargetPcr'>PCR to amplify off-target sites</h2>\")\n offtUrl = cgiGetSelfUrl({\"otPrimers\":\"1\"}, onlyParams=[\"batchId\", \"pamId\"])\n print(\"<p>Primers for all off-targets can be downloaded from the <a href='%s'>Off-target PCR</a> page.</p>\" % offtUrl)\n\n print(\"<h2 id='satMut'>Saturating mutagenesis using all guides</h2>\")\n satMutUrl = cgiGetSelfUrl({\"satMut\":\"1\"}, onlyParams=[\"batchId\"])\n print(\"<p>Oligonucleotides of all guides for pooled cloning into a lentiviral vector can be downloaded from the <a href='%s'>Saturating mutagenesis page</a>.</p>\" % satMutUrl)\n\n print \"<hr>\"\n\n print '</div>'", "def planning_project(request, project_slug):\n # This view is currently not working, use only plan_proposal view as\n # only one proposal per project is possible\n project = Project.on_site.get(slug = project_slug)\n return render_to_response('planning_project.html',\n {'project_name' : project.name},\n context_instance = RequestContext(request))", "def portal_main_page(request):\n return render_to_response('GestioneUtenti/profilo.html',{'request':request})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A view to return the Privacy Policy page
def privacy_policy(request): return render(request, "privacy-policy.html")
[ "def privacy_policy_view(request, *args, **kwargs):\n return render(request, \"privacy_policy.html\")", "def privacy_policy():\r\n\treturn render_template(\"privacy_policy.html\", page_title=misc.page_title(\"pp\"))", "def privacy(self):\n return render('/privacy.html')", "def policy():\n return render_template('dashboard/policy.html', tagname = 'policy')", "def pdp_protect(view):\n\n @functools.wraps(view)\n def wrapped(request, *args, **kwargs):\n pdp = getPDP()\n if pdp:\n try:\n authorized = pdp.authorize(request)\n message = \"Not authorized\"\n code = \"NotAuthorized\"\n except AuthorisationException as e:\n authorized = False\n message = str(e)\n code = e.code\n\n if not authorized:\n encoder = OWS20ExceptionXMLEncoder()\n return HttpResponse(\n encoder.serialize(\n encoder.encode_exception(message, \"2.0.0\", code)\n ),\n encoder.content_type, status=403\n )\n\n return view(request, *args, **kwargs)\n\n return wrapped", "def show_privacy():\n return _run_speedify_cmd([\"show\", \"privacy\"])", "def getPrivacy(self):\n return self.base.get(\"privacy\", [])", "def custom_privacy_policy_url(self) -> str:\n return pulumi.get(self, \"custom_privacy_policy_url\")", "def legal_notice(request):\n return render(request, \"search/legal_notice.html\")", "def capture_policy(url):\n t_maker = html2text.HTML2Text()\n t_maker.ignore_links = True\n t_maker.ignore_images = True\n t_maker.ignore_emphasis = True\n html = requests.get(url)\n text = t_maker.handle(html.text)\n privacy_source = open(args.privacy_source, \"a\")\n privacy_source.write(text)", "def show_policy_mitigation(self,policy_name):\n self.show_policy_basic(policy_name)\n panel_xpath = u\"//a[starts-with(@class,'submenu_')]//span[normalize-space(.)='%s']\" % u\"Mitigation設定\"\n self._selenium.wait_until_page_contains_element(panel_xpath)\n self._selenium.click_element(panel_xpath)\n time.sleep(1)\n self._selenium.wait_until_page_contains_element(u\"//button[.='キャンセル']\")\n self.verbose_capture()\n BuiltIn().log(\"Showed mitigation setting of the policy `%s`\" % policy_name)", "def getPrivacy(self):\n return FacebookPrivacy(self.base.get(\"privacy\", []))", "def do_printPolicy(self,args):\n parser = CommandArgumentParser(\"printPolicy\")\n args = vars(parser.parse_args(args))\n\n policy = self.client.describe_policies(AutoScalingGroupName=self.scalingGroup)\n pprint(policy)", "def protected_view(request):\n return render(request, 'companies/protected.html', {'current_user': request.user})", "def show_policy_display(self,policy_name):\n self.show_policy_basic(policy_name)\n panel_xpath = u\"//a[starts-with(@class,'submenu_')]//span[normalize-space(.)='%s']\" % u\"閲覧設定\"\n self._selenium.wait_until_page_contains_element(panel_xpath)\n self._selenium.click_element(panel_xpath)\n time.sleep(1)\n self._selenium.wait_until_page_contains_element(u\"//button[.='キャンセル']\")\n self.verbose_capture()\n BuiltIn().log(\"Showed NW monitoring setting of the policy `%s`\" % policy_name)", "def professor_dashboard(request):\n assignments = list(Assignment.objects.filter(owner = request.user.professor).all())\n\n return render_to_response(\"professor/index.html\", \\\n {'user': request.user, 'assignments': assignments})", "def advisors(request):\n return render(request, \"advisors.html\", {})", "def sendPolicy():\n return send_from_directory('publicpolicy', 'publicpolicy.py')", "def test_07_privacy_preference_screen(self):\n self.__load_app_first_screen_welcome()\n self.web_welcome.click_manage_options()\n self.privacy_preference.verify_privacy_preference_screen()\n self.privacy_preference.verify_toggles(self.privacy_preference.APP_ANALYTICS)\n self.privacy_preference.verify_toggles(self.privacy_preference.ADVERTISING)\n self.privacy_preference.verify_toggles(self.privacy_preference.PERSONALIZED_SUGGESTIONS)\n self.privacy_preference.verify_continue_btn()\n self.privacy_preference.verify_back_btn()", "def public_view3(request):\n return HttpResponse(\"PublicView\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A view to return the Useful Resources page
def useful_resources(request): return render(request, "useful-resources.html")
[ "def resources():\n return render_template('resources.html', title='Resources')", "def resources():", "def help(self):\n return render('/help.html')", "def get_static_web_help_page():\n ...", "def test_resource_details(self):\r\n\r\n # Check details page\r\n resp = self.client['maintainer'].get(self.urls['resource'])\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertTemplateUsed(resp, 'resources/resource_detail.html')\r\n # Test if RLStats was created automatically\r\n self.assertTrue(self.team.language.name.encode('utf-8') in resp.content)\r\n\r\n # response.context[-1] holds our extra_context. maybe we should check\r\n # some of these to make sure they're there?\r", "def help():\n#TODO Complete for people using the interface\n return render_template('help.html')", "def resource_detailed_view(request, id='1'):\n\n try:\n resource = Resource.objects.get(global_id=id)\n except ObjectDoesNotExist, e:\n # create into local database the resource\n metadata = get_resource_metadata(id)\n # look for resource owner, if he exists locally\n try:\n resource_owner = User.objects.get(username=metadata['author'])\n # TODO create a particular type of resource rather than a Resource\n if str(metadata['type']).lower() == \"workflow\":\n resource = Workflow(global_id=id, owner=resource_owner)\n else:\n resource = Resource(global_id=id, owner=resource_owner)\n resource.save(metadata=metadata)\n\n except ObjectDoesNotExist, e:\n # TODO create a new user or assign resource temporarly to the President :-)\n resource = Resource(global_id=id, owner=User.objects.get(username='mbalasso'))\n resource.save(metadata=metadata)\n\n finally:\n resource.metadata = metadata\n # TODO set resource workflow\n # set_workflow(resource, ResourceWorkflow)\n\n except MultipleObjectsReturned:\n\n # seems like the President has stolen something :-)\n resources = Resource.objects.filter(global_id=id)\n metadata = get_resource_metadata(global_id=id)\n for r in resources:\n if r.owner.username != metadata['author']:\n r.delete()\n\n resource = Resource.objects.get(global_id=id)\n resource.metadata = metadata\n\n # Count visit hit\n resource.metadata['views'] = resource.update_views_counter()\n\n # INJECT DEFAULT VALUES\n resource.citations = [{'citation': \"STH2013 VPH-Share Dataset CVBRU 2011\", \"link\": get_random_citation_link()}]\n resource.status = \"Published\"\n resource.language = \"English\"\n resource.version = \"1.0\"\n resource.related = []\n\n # check if the resource has been already requested by user\n if not request.user.is_anonymous(): # and not has_permission(resource, request.user, 'can_read_resource'):\n try:\n resource_request = ResourceRequest.objects.get(resource=resource, requestor=request.user)\n resource_request_state = get_state(resource_request)\n if resource_request_state.name in ['Pending', 'Refused']:\n resource.already_requested = True\n resource.request_status = resource_request_state.name\n except ObjectDoesNotExist, e:\n resource.already_requested = False\n\n try:\n workflow = Workflow.objects.get(global_id=id)\n if str(workflow.metadata['name']).lower().count('aneurist'):\n resource.related = ['<a href=\"http://www.onlinehpc.net/\" target=\"_blank\">Taverna Online tool</a>']\n except ObjectDoesNotExist, e:\n workflow = None\n\n return render_to_response(\n 'scs_resources/resource_details.html',\n {'resource': resource,\n 'workflow': workflow,\n 'requests': []},\n RequestContext(request)\n )", "def about_us(request):\n return render(request, './Venter/about_us.html')", "def Info(request):\n return render_to_response('radabo/info.html', {})", "def display_resources(self, env):\n inventory, resources, statuses = self.get_environment_info(env)\n\n display(\" resources: \", color='green', stderr=False)\n for r in resources:\n if statuses[r]['state'] != 'deployed':\n color='red'\n else: \n color='green'\n display(\" %s:\" % r, color=color, stderr=False)\n display(\" %s\" % json.dumps(statuses[r], indent=10), color=color, stderr=False)", "def solvationIndex(request):\n return render(request, 'solvationTools.html')", "def furtherreadings():\n return render_template('furtherreadings.html')", "def hr_mgmt(request, ):\n title = \"HR Management\"\n hrmgmt = HRManagement.objects.all()\n return render(request, 'hr_mgmt.html', locals())", "def test_release_details_resources(self):\r\n resp = self.client['anonymous'].get(self.urls['release'])\r\n\r\n # The list at the top of the page should include this resource.\r\n self.assertContains(resp, \"Test Project: Resource1\")\r\n\r\n # One of the languages is totally untranslated.\r\n self.assertContains(resp, \"Untranslated: %s\" % self.resource.source_entities.count())", "def api_help():\r\n\tdata = {\"base_url\": os.getenv(\"WWW2PNG_BASE_URL\")}\r\n\treturn render_template(\"api_help.html\", page_title=misc.page_title(\"api_help\"), data=data)", "def api_help():\n docs = [{'name': route.__name__, 'value': route.__doc__}\n for route in [aliases, connections, databases, fetch, now, reports]]\n return render_template('help.html', docs=docs, version=__version__, url_root=request.url_root)", "def admin_guide(request):\n return HttpResponse(\"Admin Guide\")", "def index(request):\n return render(request, 'inventory/index.html')", "def how_to(request):\n\n return render(request, 'howto.html', {})", "def about(request):\n\n\treturn render(request, 'about/about.html')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Submits a pyspark job to your dataproc cluster. Specify the main script with main_python_file. You can add script parameters with the script_parameters parameter.
def submit_pyspark_job(self, cluster_name, main_python_file, script_parameters=[]): job_details = { 'projectId': self.project_id, 'job': { 'placement': { 'clusterName': cluster_name }, 'pysparkJob': { 'mainPythonFileUri': main_python_file, 'args': script_parameters } } } result = self.client.projects().regions().jobs().submit( projectId=self.project_id, region=self.region, body=job_details).execute() job_id = result['reference']['jobId'] logging.info('Submitted job with ID: {}'.format(job_id)) return job_id
[ "def submit_pyspark_job(self, job_config):\n\n job_config = {\n \"args\": [\n \"gs://fynd-new-bucket/a/README.txt\"\n ],\n \"pythonFileUris\": [\n job_config.get('files.zip')\n ],\n \"mainPythonFileUri\": job_config.get('main.py')\n }\n\n project = self.project_config.get('project', 'fynd-1088')\n region = self.project_config.get('region', 'asia-south1')\n cluster_name = self.cluster_config.get('cluster_name', 'test')\n\n pythonFileUris = job_config.get('pythonFileUris')\n mainPythonFileUri = job_config.get('mainPythonFileUri')\n subm_args = job_config.get('args', [])\n\n job_details = {\n 'projectId': project,\n 'job': {\n 'placement': {\n 'clusterName': cluster_name\n },\n \"pysparkJob\": {\n \"mainPythonFileUri\": mainPythonFileUri,\n \"args\": subm_args,\n \"pythonFileUris\": pythonFileUris\n }\n }\n }\n result = self.dataproc.projects().regions().jobs().submit(\n projectId=project,\n region=region,\n body=job_details).execute()\n job_id = result['reference']['jobId']\n print('Submitted job ID {}'.format(job_id))\n return job_id", "def submit_pyspark_job(dataproc, project, region, cluster_name, bucket_name, filename):\n job_details = {\n 'projectId': project,\n 'job': {\n 'placement': {\n 'clusterName': cluster_name\n },\n 'pysparkJob': {\n 'mainPythonFileUri': 'gs://{}/{}'.format(bucket_name, filename)\n }\n }\n }\n result = dataproc.projects().regions().jobs().submit(\n projectId=project,\n region=region,\n body=job_details).execute()\n job_id = result['reference']['jobId']\n print('Submitted job ID {}'.format(job_id))\n return job_id", "def _spark_submit(self, application_class):\n\n # get spark submit String\n spark_str = self._get_spark_submit_str()\n\n logging.info(\"Executing Spark: {}\".format(spark_str))\n # Add step to EMR cluster.\n step_name = \"EMR Step: Running Spark Application Class {}\".format(application_class)\n\n # execute Spark submit\n self._execution_system.run_command_in_cluster(spark_str, step_name)", "def start_spark(app_name='my_spark_app', master='local[*]', jar_packages=[],\n files=[], spark_config={}):\n\n # detect execution environment\n flag_repl = not(hasattr(__main__, '__file__'))\n flag_debug = 'DEBUG' in environ.keys()\n\n if not (flag_repl or flag_debug):\n # get Spark session factory\n spark_builder = (\n SparkSession\n .builder\n .appName(app_name))\n else:\n # get Spark session factory\n spark_builder = (\n SparkSession\n .builder\n .master(master)\n .appName(app_name))\n\n # create Spark JAR packages string\n spark_jars_packages = ','.join(list(jar_packages))\n spark_builder.config('spark.jars.packages', spark_jars_packages)\n\n spark_files = ','.join(list(files))\n spark_builder.config('spark.files', spark_files)\n\n # add other config params\n for key, val in spark_config.items():\n spark_builder.config(key, val)\n\n # create session and retrieve Spark logger object\n spark_sess = spark_builder.getOrCreate()\n spark_logger = Log4j(spark_sess)\n\n # get config file if sent to cluster with --files\n spark_files_dir = SparkFiles.getRootDirectory()\n config_files = [filename\n for filename in listdir(spark_files_dir)\n if filename.endswith('config.json')]\n\n if config_files:\n path_to_config_file = path.join(spark_files_dir, config_files[0])\n with open(path_to_config_file, 'r') as config_file:\n config_dict = json.load(config_file)\n spark_logger.warn('loaded config from ' + config_files[0])\n else:\n spark_logger.warn('no config file found')\n config_dict = None\n\n return spark_sess, spark_logger, config_dict", "def job(\n ctx,\n cluster_id,\n step_name,\n bucket,\n prefix,\n packages,\n action_on_failure,\n job_name,\n job_kwargs,\n):\n # Get Python code assets from S3\n distribution_prefix = join(\"s3://\", bucket, prefix)\n python_major_version = sys.version[:3]\n egg_key = join(\n distribution_prefix,\n \"{{cookiecutter.package_name}}-\"\n + __version__\n + \"-py\"\n + python_major_version\n + \".egg\",\n )\n pyspark_entrypoint_key = join(distribution_prefix, \"pyspark_entrypoint.py\")\n\n # Build the arguments to send to command-runner.jar\n spark_submit_cmd = [\"spark-submit\"]\n if packages:\n spark_submit_cmd.extend([\"--packages\", packages])\n spark_submit_cmd.extend([\"--py-files\", egg_key])\n spark_submit_cmd.append(pyspark_entrypoint_key)\n spark_submit_cmd.extend([\"--job-name\", job_name])\n if job_kwargs:\n spark_submit_cmd.extend([\"--job-kwargs\"] + list(job_kwargs))\n msg = f\"Will execute the following spark-submit command on EMR Master:\\n\\t{spark_submit_cmd}\\n\\n\"\n logger.info(msg)\n\n # Submit the EMR Step through the API\n client = boto3.client(\"emr\")\n response = client.add_job_flow_steps(\n JobFlowId=cluster_id,\n Steps=[\n {\n \"Name\": step_name,\n \"ActionOnFailure\": action_on_failure,\n \"HadoopJarStep\": {\n \"Jar\": \"command-runner.jar\",\n \"Args\": spark_submit_cmd,\n },\n },\n ],\n )\n logger.info(response)", "def initialize_spark(appName=\"MyApp\", submit_args=None, memory=12):\n import os\n import findspark\n from textwrap import dedent\n\n if \"SPARK_HOME\" not in os.environ:\n raise Exception(\"SPARK_HOME environmental variable not set.\")\n if \"PYSPARK_SUBMIT_ARGS\" not in os.environ:\n os.environ[\n \"PYSPARK_SUBMIT_ARGS\"\n ] = f\"--master local[12] --driver-memory {memory}g --executor-memory {memory}g pyspark-shell\"\n if \"PYSPARK_SUBMIT_ARGS\" not in os.environ:\n raise Exception(\n dedent(\n \"\"\"\\\n PYSPARK_SUNBMIT_ARGS environmental variable not set.\n \n As an example:\n export PYSPARK_SUBMIT_ARGS = \" --master local[8] --driver-memory 8g --executor-memory 8g pyspark-shell\"\n \"\"\"\n )\n )\n findspark.init(os.environ[\"SPARK_HOME\"])\n spark_master = _parse_master(os.environ[\"PYSPARK_SUBMIT_ARGS\"])\n from pyspark.sql import SparkSession\n\n spark = SparkSession.builder.master(spark_master).appName(\"MyApp\").getOrCreate()\n return spark", "def shell_train(global_profile, profiles):\n run_shell(global_profile, profiles, 'pyspark_train')", "def main():\n \n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = f\"s3a://{OUTPUT_BUCKET}/\"\n \n song_df = read_song_data(spark, input_data)\n process_song_data(spark, song_df, input_data, output_data) \n process_log_data(spark, song_df, input_data, output_data)\n spark.stop()", "def run(self):\n ssc = StreamingContext(sc, 5)\n from OneTestScenario import SparkApp\n SparkApp(model=model, Context=sc, streamingContext=ssc)\n print \"\\nSpark thread ended.\\n\"", "def main(): \n \n #Print S3 bucket location\n s3_bucket=os.environ[\"s3_bucket\"]\n s3_bucket = s3_bucket.replace(\"'\", \"\")\n \n print (s3_bucket)\n\n spark = create_spark_session()\n print(\"Spark Session Created\")\n\n #Invoke Functions to check data \n check(s3_bucket + \"datalake/\", \"country_table\",spark,\"SELECT count(code_2digit) total_country FROM temp_table\")\n check(s3_bucket + \"datalake/\", \"airport_table\",spark,\"SELECT count(iata_code) total_airport FROM temp_table\")", "def test_sagemaker_scala_jar_multinode(role, image_uri, configuration, sagemaker_session, sagemaker_client):\n spark = SparkJarProcessor(\n base_job_name=\"sm-spark-scala\",\n image_uri=image_uri,\n role=role,\n instance_count=2,\n instance_type=\"ml.c5.xlarge\",\n max_runtime_in_seconds=1200,\n sagemaker_session=sagemaker_session,\n )\n\n bucket = spark.sagemaker_session.default_bucket()\n with open(\"test/resources/data/files/data.jsonl\") as data:\n body = data.read()\n input_data_uri = \"s3://{}/spark/input/data.jsonl\".format(bucket)\n S3Uploader.upload_string_as_file_body(\n body=body, desired_s3_uri=input_data_uri, sagemaker_session=sagemaker_session\n )\n output_data_uri = \"s3://{}/spark/output/sales/{}\".format(bucket, datetime.now().isoformat())\n\n scala_project_dir = \"test/resources/code/scala/hello-scala-spark\"\n spark.run(\n submit_app=\"{}/target/scala-2.12/hello-scala-spark_2.12-1.0.jar\".format(scala_project_dir),\n submit_class=\"com.amazonaws.sagemaker.spark.test.HelloScalaSparkApp\",\n submit_jars=[\n \"{}/lib_managed/jars/org.json4s/json4s-native_2.12/json4s-native_2.12-3.6.9.jar\".format(scala_project_dir)\n ],\n arguments=[\"--input\", input_data_uri, \"--output\", output_data_uri],\n configuration=configuration,\n )\n processing_job = spark.latest_job\n\n waiter = sagemaker_client.get_waiter(\"processing_job_completed_or_stopped\")\n waiter.wait(\n ProcessingJobName=processing_job.job_name,\n # poll every 15 seconds. timeout after 15 minutes.\n WaiterConfig={\"Delay\": 15, \"MaxAttempts\": 60},\n )\n\n output_contents = S3Downloader.list(output_data_uri, sagemaker_session=sagemaker_session)\n assert len(output_contents) != 0", "def spark(ctx): # noqa: B902\n pass", "def process_input_data(spark, input_source, output_data, username, password):\n\n # Read data from happiness dimension table from Amazon Redshift\n df_country = spark.read \\\n .format(\"jdbc\") \\\n .option(\"url\", input_source) \\\n .option(\"dbtable\", \"happiness_dim\") \\\n .option(\"user\", username) \\\n .option(\"password\", password) \\\n .option(\"driver\", \"org.postgresql.Driver\") \\\n .load()\n\n # Read data from mode dimension table from Amazon Redshift\n df_mode = spark.read \\\n .format(\"jdbc\") \\\n .option(\"url\", input_source) \\\n .option(\"dbtable\", \"mode_dim\") \\\n .option(\"user\", username) \\\n .option(\"password\", password) \\\n .option(\"driver\", \"org.postgresql.Driver\") \\\n .load()\n\n # Read data from visa dimension table from Amazon Redshift\n df_visa = spark.read \\\n .format(\"jdbc\") \\\n .option(\"url\", input_source) \\\n .option(\"dbtable\", \"visa_dim\") \\\n .option(\"user\", username) \\\n .option(\"password\", password) \\\n .option(\"driver\", \"org.postgresql.Driver\") \\\n .load()\n\n # Read data from immigration fact table from Amazon Redshift\n df_spark = spark.read \\\n .format(\"jdbc\") \\\n .option(\"url\", input_source) \\\n .option(\"dbtable\", \"immigration_fact\") \\\n .option(\"user\", username) \\\n .option(\"password\", password) \\\n .option(\"driver\", \"org.postgresql.Driver\") \\\n .load()\n\n # Join all the dataframes \n df_final = df_spark.join(df_visa, df_spark.visa_type == df_visa.id)\\\n .join(df_mode, df_spark.mode == df_mode.id) \\\n .join(df_country, df_spark.arrival_country == df_country.id) \\\n .select(\n [df_spark.id,\n 'arrival_date',\n 'departure_date',\n 'age',\n 'gender',\n 'visa',\n df_mode.mode,\n 'airline',\n 'duration',\n 'country',\n 'region',\n 'happiness_score',\n 'economy'\n ]\n )\n\n\n\n df_final = df_final.na.fill('NA')\n\n # Extract happiness_score, economy score of united states for comparison\n x = df_country.select(['country', 'happiness_score', 'economy']).where(df_country.country == 'United States')\n\n h_score = x.head()[1]\n e_score = x.head()[2]\n\n # Create new columns based on comparison of happiness score and economy score\n df_final = df_final\\\n .withColumn('is_more_happy', when(df_final.happiness_score <= h_score, 'False').otherwise('True')) \\\n .withColumn('is_better_economy', when(df_final.economy <= e_score, 'False').otherwise('True')) \\\n .withColumn('day', dayofweek(df_final.arrival_date))\n\n # Transform date format\n df_final = df_final.withColumn('happiness_score', col('happiness_score').cast('double')) \\\n .withColumn('economy', col('economy').cast('double')) \\\n .withColumn(\"arrival_date\", date_format(col(\"arrival_date\"), \"yyyy-MM-dd'T'HH:mm:ss\")) \\\n .withColumn(\"departure_date\", date_format(col(\"departure_date\"), \"yyyy-MM-dd'T'HH:mm:ss\"))\n\n # Push data to elastic\n df_final.write.format('org.elasticsearch.spark.sql') \\\n .option('es.nodes', output_data) \\\n .option('es.port', 9200) \\\n .option('es.resource', '%s/%s' % ('immigration-', 'immigration')) \\\n .mode('append') \\\n .save()", "def main(overwrite, input_path, target_path):\n spark = get_spark_session()\n data_frame = spark.read.parquet(input_path)\n results = process_data(data_frame)\n save_results(results, target_path, overwrite)", "def test_wait_for_spark_app_id(sdc_builder, sdc_executor, cluster):\n # STF-1156: STF Does not properly configure Spark Executor for Secured Cluster\n if cluster.hdfs.is_kerberized:\n pytest.skip('Spark Executor tests on secured cluster are not supported.')\n\n submit_timeout_secs = 10\n python_data = 'print(\"Hello World!\")'\n tmp_directory = '/tmp/out/{}'.format(get_random_string(string.ascii_letters, 10))\n python_suffix = 'py'\n application_name = ''.join(['stf_', get_random_string(string.ascii_letters, 10)])\n\n # build the 1st pipeline - file generator\n builder = sdc_builder.get_pipeline_builder()\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='TEXT',\n raw_data=python_data)\n local_fs = builder.add_stage('Local FS', type='destination')\n local_fs.set_attributes(data_format='TEXT', directory_template=tmp_directory,\n files_prefix='sdc-${sdc:id()}', files_suffix=python_suffix, max_records_in_file=1)\n # we use the finisher so as local_fs can generate event with file_path being generated\n pipeline_finisher_executor = builder.add_stage('Pipeline Finisher Executor')\n events_wiretap = builder.add_wiretap()\n\n dev_raw_data_source >> local_fs >= [pipeline_finisher_executor, events_wiretap.destination]\n\n pipeline = builder.build(title='To File pipeline').configure_for_environment(cluster)\n sdc_executor.add_pipeline(pipeline)\n\n # run the pipeline and capture the file path\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n file_path = events_wiretap.output_records[0].field['filepath'].value\n\n # build the 2nd pipeline - spark executor\n builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='TEXT',\n raw_data='dummy',\n stop_after_first_batch=True)\n\n spark_executor = builder.add_stage(name=SPARK_EXECUTOR_STAGE_NAME)\n spark_executor.set_attributes(minimum_number_of_worker_nodes=1,\n maximum_number_of_worker_nodes=1,\n application_name=application_name,\n deploy_mode='CLUSTER',\n driver_memory='10m',\n executor_memory='10m',\n application_resource=file_path,\n language='PYTHON',\n spark_app_submission_time_in_s=submit_timeout_secs)\n\n wiretap = builder.add_wiretap()\n dev_raw_data_source >> spark_executor >= wiretap.destination\n\n pipeline = builder.build(title='Spark executor pipeline').configure_for_environment(cluster)\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert 'default user (sdc)' == wiretap.output_records[0].field['submitter'].value\n assert wiretap.output_records[0].field['timestamp'].value\n # We can't directly assert app-id since we retrieve that field on a best effort basis. Spark might or might not\n # give us the generated ID by the time we need to exit the stage execution.\n# assert wiretap.output_records[0].field['app-id'].value\n\n # assert Spark executor has triggered the YARN job\n assert cluster.yarn.wait_for_app_to_register(application_name)", "def test_sagemaker_java_jar_multinode(tag, role, image_uri, configuration, sagemaker_session, sagemaker_client):\n spark = SparkJarProcessor(\n base_job_name=\"sm-spark-java\",\n framework_version=tag,\n image_uri=image_uri,\n role=role,\n instance_count=2,\n instance_type=\"ml.c5.xlarge\",\n max_runtime_in_seconds=1200,\n sagemaker_session=sagemaker_session,\n )\n\n bucket = spark.sagemaker_session.default_bucket()\n with open(\"test/resources/data/files/data.jsonl\") as data:\n body = data.read()\n input_data_uri = \"s3://{}/spark/input/data.jsonl\".format(bucket)\n S3Uploader.upload_string_as_file_body(\n body=body, desired_s3_uri=input_data_uri, sagemaker_session=sagemaker_session\n )\n output_data_uri = \"s3://{}/spark/output/sales/{}\".format(bucket, datetime.now().isoformat())\n\n java_project_dir = \"test/resources/code/java/hello-java-spark\"\n spark.run(\n submit_app=\"{}/target/hello-java-spark-1.0-SNAPSHOT.jar\".format(java_project_dir),\n submit_class=\"com.amazonaws.sagemaker.spark.test.HelloJavaSparkApp\",\n arguments=[\"--input\", input_data_uri, \"--output\", output_data_uri],\n configuration=configuration,\n )\n processing_job = spark.latest_job\n\n waiter = sagemaker_client.get_waiter(\"processing_job_completed_or_stopped\")\n waiter.wait(\n ProcessingJobName=processing_job.job_name,\n # poll every 15 seconds. timeout after 15 minutes.\n WaiterConfig={\"Delay\": 15, \"MaxAttempts\": 60},\n )\n\n output_contents = S3Downloader.list(output_data_uri, sagemaker_session=sagemaker_session)\n assert len(output_contents) != 0", "def main():\n\n # Create Spark session\n # spark = s.CreateSession.create_spark_session('Immigration')\n spark, logger = s.CreateSession.create_spark_session('ProcessImmigrationRecords')\n\n\n # Create object of ExtractData to read data from S3\n # ext = e.ExtractData(spark)\n ext = e.ExtractData(spark)\n\n\n # Initialize CleanAndTransformDim to process the data\n # which will be uploaded in S3, this class creates dimension dfs\n # dim = d.CleanAndTransformDim()\n dim = d.CleanAndTransformDim()\n\n\n # Initialize CleanAndTransformFact to process the data\n # which will be uploaded in S3, this class creates fact data frame\n # fact = f.CleanAndTransformFact()\n fact = f.CleanAndTransformFact()\n\n\n # Initialize the class to write data in S3\n # in parquet format\n load = w.CreateSchema()\n\n\n \"\"\" MODE DIM \"\"\"\n # Load mode of travel information into\n # a dimension table in s3\n mode_data = ext.get_mode_info()\n mode_dim = dim.dim_mode(mode_data)\n load.load_mode_dim(mode_dim)\n\n\n \"\"\" COUNTRY DIM \"\"\"\n # Load list of country name along with\n # codes into dimension table in s3\n country_data = ext.get_country_info()\n country_dim = dim.dim_country(country_data)\n load.load_country_dim(country_dim)\n\n\n \"\"\" PORTS DIM \"\"\"\n # Load list of ports along with their\n # code and load into ports dim in s3\n ports_data = ext.get_ports_info()\n ports_dim = dim.dim_ports(ports_data)\n load.load_ports_dim(ports_dim)\n\n\n \"\"\" AIRPORTS DIM \"\"\"\n # Clean and load the airport data into\n # airports dimension table in s3\n airports_data = ext.get_airport_info()\n airports_dim = dim.dim_airports(airports_data)\n load.load_airports_dim(airports_dim)\n\n\n \"\"\" STATES DIM \"\"\"\n # Combine the city data along with states\n # create a dimension with details of city along\n # with state codes store in dim table in s3\n states_data = ext.get_states_info()\n city_data = ext.get_city_info()\n states_dim = dim.dim_states(states_data, city_data)\n load.load_states_dim(states_dim)\n\n\n \"\"\" DATE DIM \"\"\"\n # Creates time dimension from distinct arrival and\n # departure dates provided with immigration data\n # store the dim in s3\n imm_data = ext.get_immigration_info()\n date_dim = dim.dim_time(imm_data)\n load.load_date_dim(date_dim)\n\n\n \"\"\" VISA DIM \"\"\"\n # Combine the visa code with the visa types provided\n # with immigration data and create visa dimension\n # and store in s3\n visa_data = ext.get_visa_info()\n visa_dim = dim.dim_visa(visa_data, imm_data)\n load.load_visa_dim(visa_dim)\n\n\n \"\"\" NON IMMIGRANT DIM \"\"\"\n # Create a separate dim table for attributes related to non immigrant\n # that are not event related and can be put into a separate\n # dimension table (SCDs)\n non_imm_data = dim.dim_non_imm(imm_data)\n load.load_non_imm_dim(non_imm_data)\n\n\n \"\"\" IMMIGRATION FACT \"\"\"\n # Extract, transform and load the immigration information\n # into fact table, data is validated against required dim tables\n # to filter out the invalid records, save fact table in s3\n imm_fact = fact.fact_immigration(imm_data, ports_dim, airports_dim,\n mode_dim, country_dim, visa_dim,\n states_dim)\n load.load_immigration_fact(imm_fact)", "def main(args):\n\n logger.info(f\"Received arguments {args}\")\n DATABASE, TABLE, region = args.database, args.table, args.region\n\n boto3.setup_default_session(region_name=f\"{region}\")\n df = wr.athena.read_sql_query(\n f'SELECT * FROM \"{TABLE}\"', database=DATABASE, ctas_approach=False\n )\n\n df = df[columns]\n df = df.astype(col_type)\n logger.info(df.dtypes)\n\n df = df.drop([\"area code\", \"phone\"], 1)\n df = df.dropna()\n\n if args.coxph:\n del df[\"account length\"]\n\n # no fit predict method currently supported for DenseClus\n # See: https://github.com/awslabs/amazon-denseclus/issues/4\n if args.cluster:\n\n logger.info(\"Clustering data\")\n clf = DenseClus()\n clf.fit(df)\n logger.info(\"Clusters fit\")\n\n df[\"segments\"] = clf.score()\n df[\"segments\"] = df[\"segments\"].astype(str)\n\n logger.info(\"Load Preprocessing Model\")\n preprocess = joblib.load(\"/opt/ml/processing/transformer/preprocessor.joblib\")\n\n logger.info(\"Running feature engineering transformations\")\n test_features = preprocess.transform(df)\n\n logger.info(f\"Infer data shape after preprocessing: {test_features.shape}\")\n\n test_features_output_path = os.path.join(\n \"/opt/ml/processing/infer\", \"infer_features.csv\"\n )\n if isinstance(test_features, pd.DataFrame):\n test_features.to_csv(test_features_output_path, header=False, index=False)\n else:\n pd.DataFrame(test_features).to_csv(\n test_features_output_path, header=False, index=False\n )", "def shell(global_profile, profiles):\n run_shell(global_profile, profiles, 'pyspark')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs inference for the given objects with model_name. For each object in objects, returns the topN best predictions. The retry parameter determines whether to retry on HTTP errors indicated by
def create_inference_request( self, model_name: str, objects: List[dict], top_n: int = TOP_N, retry: bool = True, ) -> dict: self.log.debug( "Submitting Inference request for model '%s' with '%s'" " objects and top_n '%s' ", model_name, len(objects), top_n, ) endpoint = InferencePaths.format_inference_endpoint_by_name(model_name) response = self.session.post_to_endpoint( endpoint, payload={"topN": top_n, "objects": objects}, retry=retry ) as_json = response.json() self.log.debug("Inference response ID: %s", as_json["id"]) return as_json
[ "def do_bulk_inference(\n self,\n model_name: str,\n objects: List[dict],\n top_n: int = TOP_N,\n retry: bool = True,\n worker_count: int = 4,\n ) -> List[Union[dict, None]]:\n\n if worker_count is None:\n raise InvalidWorkerCount(\"worker_count cannot be None!\")\n\n if worker_count > 4:\n msg = \"worker_count too high: %s. Up to 4 allowed.\" % worker_count\n raise InvalidWorkerCount(msg)\n\n if worker_count <= 0:\n msg = \"worker_count must be greater than 0!\"\n raise InvalidWorkerCount(msg)\n\n def predict_call(work_package):\n try:\n response = self.create_inference_request(\n model_name, work_package, top_n=top_n, retry=retry\n )\n return response[\"predictions\"]\n except (DARHTTPException, RequestException) as exc:\n self.log.warning(\n \"Caught %s during bulk inference. \"\n \"Setting results to None for this batch!\",\n exc,\n exc_info=True,\n )\n\n prediction_error = [\n {\n \"objectId\": inference_object.get(\"objectId\", None),\n \"labels\": None,\n \"_sdk_error\": \"{}: {}\".format(exc.__class__.__name__, str(exc)),\n }\n for inference_object in work_package\n ]\n return prediction_error\n\n results = []\n\n with ThreadPoolExecutor(max_workers=worker_count) as pool:\n results_iterator = pool.map(\n predict_call, split_list(objects, LIMIT_OBJECTS_PER_CALL)\n )\n\n for predictions in results_iterator:\n results.extend(predictions)\n\n return results", "def create_inference_request_with_url(\n self,\n url: str,\n objects: List[dict],\n top_n: int = TOP_N,\n retry: bool = True,\n ) -> dict:\n self.log.debug(\n \"Submitting Inference request with '%s'\"\n \" objects and top_n '%s' to url %s\",\n len(objects),\n top_n,\n url,\n )\n response = self.session.post_to_url(\n url, payload={\"topN\": top_n, \"objects\": objects}, retry=retry\n )\n as_json = response.json()\n self.log.debug(\"Inference response ID: %s\", as_json[\"id\"])\n return as_json", "def test_predict_topk(self):\n model = self.model\n for output_type in [\"rank\", \"probability\"]:\n preds = model.predict_topk(\n self.data, output_type=output_type, output_frequency=\"per_window\"\n )\n expected_len = self._calc_expected_predictions_length(self.data, top_k=3)\n self.assertEqual(len(preds), expected_len)\n\n preds = model.predict_topk(\n self.data.head(100), k=5, output_frequency=\"per_window\"\n )\n expected_len = self._calc_expected_predictions_length(\n self.data.head(100), top_k=5\n )\n self.assertEqual(len(preds), expected_len)", "def get_predictions(model: AbstractEmbeddingModel,\n rerank: AbstractRerankStrategy,\n labelmap,\n num_to_rerank,\n top_k,\n distance_func='cosine'):\n train_image_paths = [\n x for x in pathlib.Path(\n const.INFER_TRAIN_IMAGE_DIR).rglob('*.jpg')]\n test_image_paths = [\n x for x in pathlib.Path(\n const.INFER_TEST_IMAGE_DIR).rglob('*.jpg')]\n\n test_ids, test_embeddings = \\\n model.extract_global_features(test_image_paths)\n train_ids, train_embeddings = \\\n model.extract_global_features(train_image_paths)\n train_ids_labels_and_scores = [None] * test_embeddings.shape[0]\n\n # Using (slow) for-loop, as distance matrix doesn't fit in memory.\n for test_index in range(test_embeddings.shape[0]):\n distances = spatial.distance.cdist(\n test_embeddings[np.newaxis, test_index, :], train_embeddings,\n distance_func)[0]\n\n partition = np.argpartition(distances, num_to_rerank)[:num_to_rerank]\n nearest = sorted([(train_ids[p], distances[p]) for p in partition],\n key=lambda x: x[1])\n\n train_ids_labels_and_scores[test_index] = [\n (train_id, labelmap[utils.to_hex(train_id)], 1. - cosine_distance)\n for train_id, cosine_distance in nearest\n ]\n\n del test_embeddings\n del train_embeddings\n gc.collect()\n\n pre_verification_predictions = get_prediction_map(\n test_ids, train_ids_labels_and_scores)\n\n for test_index, test_id in enumerate(test_ids):\n train_ids_labels_and_scores[test_index] = \\\n rerank.rescore_and_rerank(\n test_id, train_ids_labels_and_scores[test_index])\n\n post_verification_predictions = get_prediction_map(\n test_ids, train_ids_labels_and_scores, top_k)\n\n return pre_verification_predictions, post_verification_predictions", "def train_models(dataset, ev, n_folds, n_clusters):\n best = np.zeros((4, n_clusters), dtype=int)\n for i in range(n_folds):\n print(\"Training models for fold {}-th\".format(i))\n urm, tg_tracks, tg_playlist = ev.get_fold(dataset)\n\n maps_cbf = results_cbf(dataset, ev, urm,\n tg_tracks, tg_playlist, n_clusters)\n\n maps_ibf = results_ibf(dataset, ev, urm,\n tg_tracks, tg_playlist, n_clusters)\n\n maps_ubf = results_ubf(dataset, ev, urm,\n tg_tracks, tg_playlist, n_clusters)\n\n # maps_mf = results_mf(dataset, ev, urm,\n # tg_tracks, tg_playlist, n_clusters)\n\n maps_pop = result_pop_cbf(dataset, ev, urm,\n tg_tracks, tg_playlist, n_clusters)\n\n print(\"Computing best model for each cluster...\")\n maps = [maps_cbf, maps_ibf, maps_ubf, maps_pop]\n\n curr_best = [x.index(max(x)) for x in zip(*maps)]\n for cluster, model in enumerate(curr_best):\n best[model, cluster] += 1\n return best", "def determine_best_top_n_model(db_name, img_num, list_model, n):\n if n not in [1, 5]:\n print(str(n) + \"is not a valid number, must be 1 or 5\")\n print(\"Exiting...\")\n sys.exit()\n \n connection = connect_db(db_name)\n cursor = connection.cursor()\n\n query = \"SELECT model_name, top_\" + str(n)\n query += \", performance FROM exec_data WHERE img_num=(%s)\"\n\n potential = list()\n \n cursor.execute(query, (img_num,))\n for row in cursor.fetchall():\n model_name, top_n, performance = row\n \n if model_name in list_model and top_n == 1:\n potential.append((model_name, performance))\n \n if potential == list():\n return 'failed'\n\n return min(potential, key=lambda x: x[1])[0]", "def get_predictions(args):\n ## List of .json file inside the args.path (waiting to be predicted by the testing model)\n datasets = sorted([f.split('.')[-2] for f in os.listdir(args.path.replace('_pred', '')) if not f.startswith('.') and f.endswith('.ndjson')])\n\n ## Extract Model names from arguments and create its own folder in 'test_pred' for storing predictions\n ## WARNING: If Model predictions already exist from previous run, this process SKIPS WRITING\n for model in args.output:\n model_name = model.split('/')[-1].replace('.pkl', '')\n model_name = model_name + '_modes' + str(args.modes)\n\n ## Check if model predictions already exist\n if not os.path.exists(args.path):\n os.makedirs(args.path)\n if not os.path.exists(args.path + model_name):\n os.makedirs(args.path + model_name)\n else:\n print('Predictions corresponding to {} already exist.'.format(model_name))\n print('Loading the saved predictions')\n continue\n\n print(\"Model Name: \", model_name)\n predictor = load_predictor(model)\n goal_flag = predictor.model.generator.goal_flag\n\n # Iterate over test datasets\n for dataset in datasets:\n # Load dataset\n dataset_name, scenes, scene_goals = load_test_datasets(dataset, goal_flag, args)\n\n # Get all predictions in parallel. Faster!\n scenes = tqdm(scenes)\n pred_list = Parallel(n_jobs=12)(delayed(predict_scene)(predictor, model_name, paths, scene_goal, args)\n for (_, _, paths), scene_goal in zip(scenes, scene_goals))\n \n # Write all predictions\n write_predictions(pred_list, scenes, model_name, dataset_name, args)", "def make_predictions(chosen_model=MobileNetV2,\n tiles=image_tile_slicer()):\n model = chosen_model(weights='imagenet', include_top=True)\n model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])\n tiles = np.array(tiles)\n predictions = decode_predictions(model.predict(tiles))\n return predictions", "def get_trial_best_models(trial_name):\n trial_dir = args.saved_models + '/{0}/'.format(trial_name)\n run_dirs = [r[0] for r in os.walk(trial_dir)][1:]\n best_models_dir = trial_dir+'best_models/'\n if not os.path.exists(best_models_dir):\n os.makedirs(best_models_dir)\n for j, run_dir in enumerate(run_dirs):\n test_accs = pickle.load(Path(run_dir+'/test_accuracies.pkl').open('rb'))\n best_acc, idx = 0.0, -1\n for i, a in enumerate(test_accs):\n if a > best_acc:\n best_acc = a\n idx = i\n best_model = torch.load(run_dir+'/{0}.pt'.format(idx+1))\n torch.save(best_model, best_models_dir+'{0}.pt'.format(j+1))\n else:\n print('you have already collected the best models for this trial')", "def ensemble(dict_model_acc, test_design, method='vote'):\n pred_models_dict = {}\n pred_models_lst = []\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n test_design = np.array(test_design)\n\n for name_model, (model, acc) in dict_model_acc.items():\n pred_model = model.predict(test_design).tolist()\n pred_models_dict[name_model] = pred_model\n pred_models_lst.append(pred_model)\n\n acc_lst.append(acc)\n\n pred_models_df = pd.DataFrame(pred_models_lst)\n\n if method == 'vote':\n pred_vote_df = pred_models_df.mode()\n pred_vote_lst = list(pred_vote_df.loc[0, :])\n\n return pred_vote_lst\n\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n\n for name_model, (model, acc) in dict_model_acc.items():\n prob_model = model.predict_proba(test_design)\n prob1_model = np.array(prob_model)[:, 1].tolist()\n prob_models_dict[name_model] = prob_model\n prob1_models_lst.append(prob1_model)\n prob_models_lst.append(prob_model)\n\n acc_lst.append(acc)\n\n prob1_models_df = pd.DataFrame(prob1_models_lst)\n\n if method == 'avg_unif':\n prob1_avgunif_lst = list(prob1_models_df.mean())\n pred_avgunif_lst = [int(score > 0.5) for score in prob1_avgunif_lst]\n\n return pred_avgunif_lst, prob1_avgunif_lst\n elif method == 'avg_softmax':\n sum_exp_acc = sum(np.exp(acc_lst))\n acc_softmax = [np.exp(item) / sum_exp_acc for item in acc_lst]\n prob1_weighted_df = prob1_models_df.multiply(acc_softmax, axis='rows')\n prob1_softmax_lst = list(prob1_weighted_df.sum())\n pred_softmax_lst = [int(score > 0.5) for score in prob1_softmax_lst]\n\n return pred_softmax_lst, prob1_softmax_lst\n\n #elif method == 'grid_search':", "def evaluate_models(num_splits=10):\n models = {\"Decision Tree\": tree.DecisionTreeClassifier(),\n \"Nearest Neighbor\": neighbors.KNeighborsClassifier(),\n \"Random Forest\": ensemble.RandomForestClassifier(),\n \"Linear SVM\": svm.SVC(kernel=\"linear\"), # the linear kernel shows best performance\n \"LDA\": discriminant_analysis.LinearDiscriminantAnalysis(),\n \"Neural Net\": neural_network.MLPClassifier(solver=\"lbfgs\")} # small datasets favor an lbfgs solver\n\n data = pd.read_csv(f\"features_{num_splits}_splits.csv\", index_col=[0])\n # All the models can achieve near perfect accuracy without normalization except for neural networks\n for feature in [\"Mean\", \"Variance\", \"Up\"]:\n data[feature] = (data[feature] - data[feature].mean())/data[feature].std()\n y = data[\"Class\"]\n x = data.drop([\"Class\"], axis=1)\n\n # performing the model testing\n x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, test_size=0.3, random_state=1)\n performance = {}\n sources = [\"droneA\", \"droneB\", \"wifi\", \"random-signal\"]\n for name, model in models.items():\n model.fit(x_train, y_train)\n predictions = model.predict(x_test)\n report = metrics.classification_report(predictions, y_test, output_dict=True, zero_division=0)\n # The report gives summary results that are not used, so those are filtered out\n performance[name] = {source: report[source] for source in sources}\n\n return performance", "def get_model_top_n(db_name, img_num, model_name, n):\n #print(\"Connecting to database...\")\n connection = connect_db(db_name)\n cursor = connection.cursor()\n \n cmd = 'SELECT top_' + str(n)\n cmd += ' FROM exec_data WHERE model_name=\\''+model_name+'\\' and img_num=\\''+str(img_num)+'\\''\n cursor.execute(cmd)\n result = cursor.fetchall()\n return result[0][0]", "def get_classifiers(conn, n_neighbors = 4, weights = \"uniform\"):\n # 1. get all objects in the database\n objects = database.get_all_objects(conn)\n predictors = []\n for o in objects:\n df = pd.DataFrame()\n # get the locations associated with this object\n ids = database.get_locations_id_for_object(conn, o[0])\n # 2. for each location, get the measuremenets done there\n # and add them 'processed' to the training dataset\n for loc in ids:\n\n measurements = database.get_measurements_for_location(conn, loc[0])\n n_measurements = len(measurements)\n\n print(\"DEBUG: creating classifier for object {0}, processing {1} measurements of location: {2}\".format(o, n_measurements, loc[0]))\n\n measurements = np.array(measurements).reshape(n_measurements,-1)\n\n df_loc = pd.DataFrame(data=np.zeros(shape = (len(measurements),2)), columns=[\"loc\", \"vals\"])\n # add to the frame the features we care about \n df_loc[\"loc\"] = loc[0]\n df_loc[\"vals\"] = measurements.tolist()\n # getting the min and the argmin over the first 50 elements\n df_loc[\"amin\"] = df_loc.vals.apply(lambda x: np.array(x)[:50].argmin())\n df_loc[\"min\"] = df_loc.vals.apply(lambda x: np.array(x)[:50].min())\n df = df.append(df_loc, ignore_index = True)\n\n\n # 3. normalize some features here\n min_mean = df[\"min\"].mean() \n min_std = df[\"min\"].std()\n amin_mean = df[\"amin\"].mean() \n amin_std = df[\"amin\"].std()\n df[\"min\"] = (df[\"min\"] - min_mean) / min_std \n df[\"amin\"] = (df[\"amin\"] - amin_mean) / amin_std \n X = df[[\"amin\", \"min\"]].values\n y = df[\"loc\"].values\n\n if np.count_nonzero(df[\"amin\"].isna()):\n # --> object not valid\n predictors.append((o[0], None))\n continue \n\n # --> object is valid\n # 4. make the classification AND the classifier\n # a. construct the classifier\n clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights) \n clf.fit(X, y)\n\n\n # b. make a function to call the classifier and add it to array of function to returns\n def predictor_function(x, obj_id = o[0], \n min_mean = min_mean, min_std = min_std,\n amin_mean = amin_mean, amin_std = amin_std,\n clf = clf):\n \"\"\"Given a list x, it will compute the predictors and run the prediction\n on the apprioated classifier for this object.\"\"\"\n #print(\"Going to make prediction for object \", obj_id)\n x = np.array(x)\n m = (x[0:50].min() - min_mean) / min_std\n am = (x[0:50].argmin() - amin_mean ) / amin_std\n return clf.predict(np.array([[am, m]]))\n\n\n # add the created function to returned array\n predictors.append((o[0], predictor_function))\n return predictors", "def find_best_models(table_prefix, metric='precision_score_at_600', sort_by='min', n=5):\n test_results, _, _ = get_test_results_over_time(table_prefix)\n metrics = list(test_results[0].columns)\n assert metric in metrics\n\n filter_metric = f'num_labeled_samples_at_{metric.rsplit(\"_\", 1)[-1]}'\n assert filter_metric in metrics\n\n\n def get_maximin_values(my_metric):\n # Create results matrix for our metric of shape (model, time)\n results_matrix = np.zeros((test_results[0].shape[0], len(test_results)))\n for i, result in enumerate(test_results):\n results_matrix[:, i] = result[my_metric].to_numpy()\n\n # Calculate mininum-values for our metric over time\n if sort_by == 'min':\n values = results_matrix.min(axis=-1)\n elif sort_by == 'average':\n values = results_matrix.mean(axis=-1)\n elif sort_by == 'last':\n values = results_matrix[:, -1]\n\n return values\n\n\n values = get_maximin_values(metric)\n\n # Filter out values where num_labeled_samples is below some threshold\n num_labeled_samples_min_threshold = 75\n num_labeled_samples_values = get_maximin_values(filter_metric)\n filter_idx = num_labeled_samples_values < num_labeled_samples_min_threshold\n values[filter_idx] = -1\n\n # Find the indices of the best models\n best_model_idx = values.argsort()[::-1]\n best_model_idx = best_model_idx[:n]\n\n return best_model_idx", "def evaluate_models_1():\n df = prepare_general_dataset()\n X = df.loc[:, df.columns != 'bikes']\n Y = df['bikes']\n scores = []\n\n for key, value in MODELS.items():\n X = df.loc[:, df.columns != 'bikes']\n temp_scores = []\n for file in tqdm(glob.glob(value)):\n model = pd.read_csv(file)\n features = model['feature']\n\n # weights\n intercept = model['weight'].values[0]\n weights = model['weight'][1:]\n\n features_used = features.values[1:]\n X = X.filter(items=features_used)\n\n # reindex to perform series multiplication\n weights.index = X.iloc[1, :].index\n\n predictions = X.apply(lambda row: intercept + row.dot(weights), axis=1).astype('int64')\n temp_scores.append(mean_absolute_error(predictions, Y))\n\n print(f'\\nModel {key} performance:')\n print(mean(temp_scores))\n print(min(temp_scores))\n print('\\n')\n\n scores.append(temp_scores)\n\n with open('scores.txt', 'wb') as file:\n pickle.dump(scores, file)\n\n plot_scores_1(scores)", "def models():\n\n # Retrieve the name from url parameter\n language = request.args.get(\"language\", None)\n return_model = request.args.get(\"return_model\", False)\n\n client_project_metrics = [\n float(request.args.get(\"comments_ratio\", 0)),\n float(request.args.get(\"commit_frequency\", 0)),\n float(request.args.get(\"core_contributors\", 0)),\n int(request.args.get(\"has_ci\", False)),\n int(request.args.get(\"has_license\", False)),\n float(request.args.get(\"iac_ratio\", 0)),\n float(request.args.get(\"issue_frequency\", 0)),\n float(request.args.get(\"repository_size\", 0))\n ]\n\n response = {}\n\n if language == 'ansible':\n metadata = ansible_models_metadata\n elif language == 'tosca':\n metadata = tosca_models_metadata\n else:\n response[\"ERROR\"] = \"Set a valid language.\"\n return response\n\n path_to_model = None\n most_similar_score = 0\n\n for project in metadata:\n project_metrics = [\n project[\"comments_ratio\"],\n project[\"commit_frequency\"],\n project[\"core_contributors\"],\n int(project[\"has_ci\"]),\n int(project[\"has_license\"]),\n project[\"iac_ratio\"],\n project[\"issue_frequency\"],\n project[\"repository_size\"]\n ]\n\n sim = 1 - distance.cosine(project_metrics, client_project_metrics)\n if sim > most_similar_score:\n \n response['model_id'] = project['id']\n response['similarity'] = sim\n response['models'] = []\n \n most_similar_score = sim\n\n for defect_type in ('conditional', 'configuration_data', 'service', 'general'):\n path_to_model = project['models'].get(defect_type)\n\n if not path_to_model:\n continue\n\n path_to_model = str(Path(path_to_model))\n model = joblib.load(path_to_model, mmap_mode='r')\n \n response['models'].append(\n {\n 'type': defect_type,\n 'rules': export_text(model['estimator'].named_steps['classification'], feature_names=model['selected_features'])\n })\n\n\n return send_file(path_to_model, as_attachment=True) if return_model else response", "def ensemble_models(\n model_paths: List[str],\n cxr_filepath: str,\n cxr_labels: List[str],\n cxr_pair_template: Tuple[str],\n cache_dir: str = None,\n save_name: str = None,\n) -> Tuple[List[np.ndarray], np.ndarray]:\n\n predictions = []\n model_paths = sorted(model_paths) # ensure consistency of\n for path in model_paths: # for each model\n model_name = Path(path).stem\n\n # load in model and `torch.DataLoader`\n model, loader = make(\n model_path=path,\n cxr_filepath=cxr_filepath,\n )\n\n # path to the cached prediction\n if cache_dir is not None:\n if save_name is not None:\n cache_path = Path(cache_dir) / f\"{save_name}_{model_name}.npy\"\n else:\n cache_path = Path(cache_dir) / f\"{model_name}.npy\"\n\n # # if prediction already cached, don't recompute prediction\n # if cache_dir is not None and os.path.exists(cache_path):\n # print(\"Loading cached prediction for {}\".format(model_name))\n # y_pred = np.load(cache_path)\n # else: # cached prediction not found, compute preds\n print(\"Inferring model {}\".format(path))\n test_true = make_true_labels(cxr_true_labels_path=cxr_true_labels_path, cxr_labels=cxr_labels)\n\n y_pred = run_softmax_eval(model, test_true, loader, cxr_labels, cxr_pair_template)\n if cache_dir is not None:\n Path(cache_dir).mkdir(exist_ok=True, parents=True)\n np.save(file=cache_path, arr=y_pred)\n predictions.append(y_pred)\n\n # compute average predictions\n y_pred_avg = np.mean(predictions, axis=0)\n\n return predictions, y_pred_avg", "def process_predict(self):\n rn50_model = rn50()\n iV3_model = iV3(self.img_path)\n dog_filters = df(self.img_path)\n faces, BGR_img = self.Improved_OpenCV_face_detector()\n dogs = rn50_model.dog_detector(self.img_path)\n #if dog and human in the same image, model predicts dog breeds will always based on the dog\n #so we have to cropped the human image from the dog\n if(dogs != 0):\n print('Hello, dog!')\n u.show_upload_image(self.img_path)\n iV3_model.show_top5_result()\n if(len(faces) > 0):\n cropped_imgs = u.crop_detected_faces(BGR_img, faces)\n self.detect_face_on_cropped_imgs(cropped_imgs)\n u.delete_cropped_images()\n #if more than one people in the same image, model predicts dog breeds will always show one result\n #so we have to crop the human image to individuals\n else:\n if(len(faces) > 1):\n cropped_imgs = u.crop_detected_faces(BGR_img, faces)\n self.detect_face_on_cropped_imgs(cropped_imgs)\n u.delete_cropped_images()\n elif(len(faces) == 1):\n print('Hello, human!')\n dog_filters.apply_snapchat_filter()\n iV3_model.show_top5_result()\n else:\n print('No human. No dog.')\n u.show_test_image(self.img_path)", "def infer_model():\n # Setup training/testing environment\n setup_env()\n # Construct the model\n model = setup_model()\n # Load model weights\n cp.load_checkpoint(cfg.TEST.WEIGHTS, model)\n logger.info(\"Loaded model weights from: {}\".format(cfg.TEST.WEIGHTS))\n # Create data loaders and meters\n test_loader = data_loader.construct_test_loader()\n test_meter = meters.TestMeter(len(test_loader))\n filename = cfg.OUT_DIR + 'predict.txt'\n loss_fun = builders.build_loss_fun(\"cross_entropy\", \"none\").cuda()\n # Evaluate the model\n infer_epoch(test_loader, model, test_meter, 0, loss_fun, filename, writer=None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs bulk inference for larger collections. For objects collections larger than LIMIT_OBJECTS_PER_CALL, splits the data into several smaller Inference requests. Requests are executed in parallel. Returns the aggregated values of the predictions of the original API response
def do_bulk_inference( self, model_name: str, objects: List[dict], top_n: int = TOP_N, retry: bool = True, worker_count: int = 4, ) -> List[Union[dict, None]]: if worker_count is None: raise InvalidWorkerCount("worker_count cannot be None!") if worker_count > 4: msg = "worker_count too high: %s. Up to 4 allowed." % worker_count raise InvalidWorkerCount(msg) if worker_count <= 0: msg = "worker_count must be greater than 0!" raise InvalidWorkerCount(msg) def predict_call(work_package): try: response = self.create_inference_request( model_name, work_package, top_n=top_n, retry=retry ) return response["predictions"] except (DARHTTPException, RequestException) as exc: self.log.warning( "Caught %s during bulk inference. " "Setting results to None for this batch!", exc, exc_info=True, ) prediction_error = [ { "objectId": inference_object.get("objectId", None), "labels": None, "_sdk_error": "{}: {}".format(exc.__class__.__name__, str(exc)), } for inference_object in work_package ] return prediction_error results = [] with ThreadPoolExecutor(max_workers=worker_count) as pool: results_iterator = pool.map( predict_call, split_list(objects, LIMIT_OBJECTS_PER_CALL) ) for predictions in results_iterator: results.extend(predictions) return results
[ "def execute(self, requests):\n\n responses = []\n\n # Every Python backend must iterate over everyone of the requests\n # and create a pb_utils.InferenceResponse for each of them.\n for idx, request in enumerate(requests):\n # Get input tensors \n query = pb_utils.get_input_tensor_by_name(request, 'QUERY').as_numpy()\n request_output_len = pb_utils.get_input_tensor_by_name(request, 'REQUEST_OUTPUT_LEN').as_numpy()\n\n bad_words_dict = pb_utils.get_input_tensor_by_name(request, 'BAD_WORDS_DICT').as_numpy()\n stop_words_dict = pb_utils.get_input_tensor_by_name(request, 'STOP_WORDS_DICT').as_numpy()\n\n # Preprocessing input data.\n input_id, request_input_len = self._create_request(query)\n bad_words = to_word_list_format(bad_words_dict)\n stop_words = to_word_list_format(stop_words_dict)\n\n # Create output tensors. You need pb_utils.Tensor\n # objects to create pb_utils.InferenceResponse.\n input_id_tensor = pb_utils.Tensor(\n 'INPUT_ID',\n np.array(input_id).astype(self.input_id_dtype))\n request_input_len_tensor = pb_utils.Tensor(\n 'REQUEST_INPUT_LEN',\n np.array(request_input_len).astype(self.request_input_len_dtype))\n request_output_len_tensor = pb_utils.Tensor(\n 'REQUEST_OUTPUT_LEN',\n request_output_len)\n bad_words_ids_tensor = pb_utils.Tensor(\n 'BAD_WORDS_IDS',\n bad_words)\n stop_words_ids_tensor = pb_utils.Tensor(\n 'STOP_WORDS_IDS',\n stop_words)\n\n\n # Create InferenceResponse. You can set an error here in case\n # there was a problem with handling this inference request.\n # Below is an example of how you can set errors in inference\n # response:\n #\n # pb_utils.InferenceResponse(\n # output_tensors=..., TritonError(\"An error occurred\"))\n inference_response = pb_utils.InferenceResponse(output_tensors=[\n input_id_tensor,\n bad_words_ids_tensor,\n stop_words_ids_tensor,\n request_input_len_tensor,\n request_output_len_tensor])\n responses.append(inference_response)\n\n # You should return a list of pb_utils.InferenceResponse. Length\n # of this list must match the length of `requests` list.\n return responses", "def _perform_batch_inference(self, input_data, output_data, **kwargs):\n batch_strategy = kwargs[\"BatchStrategy\"]\n max_payload = int(kwargs[\"MaxPayloadInMB\"])\n data_source, batch_provider = self._prepare_data_transformation(input_data, batch_strategy)\n\n # Output settings\n accept = output_data[\"Accept\"] if \"Accept\" in output_data else None\n\n working_dir = self._get_working_directory()\n dataset_dir = data_source.get_root_dir()\n\n for fn in data_source.get_file_list():\n\n relative_path = os.path.dirname(os.path.relpath(fn, dataset_dir))\n filename = os.path.basename(fn)\n copy_directory_structure(working_dir, relative_path)\n destination_path = os.path.join(working_dir, relative_path, filename + \".out\")\n\n with open(destination_path, \"wb\") as f:\n for item in batch_provider.pad(fn, max_payload):\n # call the container and add the result to inference.\n response = self.local_session.sagemaker_runtime_client.invoke_endpoint(\n item, \"\", input_data[\"ContentType\"], accept\n )\n\n response_body = response[\"Body\"]\n data = response_body.read()\n response_body.close()\n f.write(data)\n if \"AssembleWith\" in output_data and output_data[\"AssembleWith\"] == \"Line\":\n f.write(b\"\\n\")\n\n move_to_destination(working_dir, output_data[\"S3OutputPath\"], self.name, self.local_session)\n self.container.stop_serving()", "def batched_inference(models,\n coverage_models,\n embeddings,\n rays,\n N_samples,\n N_importance,\n use_disp,\n chunk,\n point_transform_func=None,\n topk=0):\n B = rays.shape[0]\n results = defaultdict(list)\n for i in range(0, B, chunk):\n rendered_ray_chunks = \\\n render_rays(models,\n coverage_models,\n embeddings,\n rays[i:i+chunk],\n N_samples,\n use_disp,\n 0,\n 0,\n N_importance,\n chunk,\n dataset.white_back,\n test_time=True,\n point_transform_func=point_transform_func,\n topk=topk)\n\n for k, v in rendered_ray_chunks.items():\n results[k] += [v.cpu()]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results", "def batched_inference(models, embeddings,\n rays, N_samples, N_importance, use_disp,\n chunk,\n white_back):\n B = rays.shape[0]\n chunk = 1024*32\n results = defaultdict(list)\n for i in range(0, B, chunk):\n rendered_ray_chunks = \\\n render_rays(models,\n embeddings,\n rays[i:i+chunk],\n N_samples,\n use_disp,\n 0,\n 0,\n N_importance,\n chunk,\n dataset.white_back,\n test_time=True)\n\n for k, v in rendered_ray_chunks.items():\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results", "def _batch_inference(self, batched_inputs):\n outputs = []\n inputs = []\n for idx, input in zip(count(), batched_inputs):\n inputs.append(input)\n if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:\n outputs.extend(self.model.inference(inputs, do_postprocess=False))\n inputs = []\n return outputs", "def launch_inference(self):\n\n self.logger.info('Beginning to submit inference tasks')\n # Make a folder for the models\n model_folder = self.output_dir.joinpath('models')\n model_folder.mkdir(exist_ok=True)\n \n # Submit the chunks to the workflow engine\n for mid in range(len(self.mpnns)):\n # Get a model that is ready for inference\n model = self.ready_models.get()\n \n # Convert it to a pickle-able message\n model_msg = MPNNMessage(model)\n \n # Proxy it once, to be used by all inference tasks\n model_msg_proxy = ps.store.get_store(self.ps_names['infer']).proxy(model_msg, key=f'model-{mid}-{self.inference_batch}')\n \n # Run inference with all segements available\n for cid, (chunk, chunk_msg) in enumerate(zip(self.inference_chunks, self.inference_proxies)):\n self.queues.send_inputs([model_msg_proxy], chunk_msg,\n topic='infer', method='evaluate_mpnn',\n keep_inputs=False,\n task_info={'chunk_id': cid, 'chunk_size': len(chunk), 'model_id': mid})\n self.logger.info('Finished submitting molecules for inference')", "def inference_function(smiles, model_dir, **other_cols):\n # Launch the process pool if this is the first invocation\n # Note: The pool will stay alive until the host process dies\n # OK for HPC (host dies when job completes) but be very careful\n # running this function on persistent servers.\n global pool, models, feat\n import os\n core_count = len(os.sched_getaffinity(0))\n # I use the affinity rather than `os.cpu_count()` to work with aprun's\n # protocol for specifying the affinity of each MPI PE and all its\n # child processes (including those spawned by multiprocessing)\n if pool is None:\n from multiprocessing import Pool\n pool = Pool(core_count)\n\n # Measure the start time and record host name\n from datetime import datetime\n from platform import node\n start_time = datetime.utcnow().isoformat()\n hostname = node()\n\n # Load models\n from glob import glob\n import pickle as pkl\n if models is None:\n model_files = glob(os.path.join(model_dir, '*.pkl'))\n models = {}\n for path in model_files:\n with open(path, 'rb') as fp:\n models[os.path.basename(path)[:-4]] = pkl.load(fp)\n\n # Remove the first step from the pipeline (feature generation from the SMILES)\n feat = None\n for m in models.values():\n feat = m.steps.pop(0)[1]\n\n # Compute features in parallel\n import numpy as np\n n_splits = min(core_count * 4, len(smiles))\n smiles_chunks = np.array_split(smiles, n_splits)\n feature_chunks = pool.map(feat.transform, smiles_chunks)\n\n # Pull in the inference function and run it\n from functools import partial\n result = {'smiles': smiles}\n for name, model in models.items():\n func = partial(run_inference, model)\n result[name] = np.concatenate(pool.map(func, feature_chunks))\n result.update(other_cols)\n\n # Measure the end time\n end_time = datetime.utcnow().isoformat()\n return {\n 'start': start_time,\n 'result': result,\n 'end': end_time,\n 'core_count': core_count,\n 'hostname': hostname\n }", "def inference(self):\n result = run_swift_benchmark(name=self.benchmark_name, variety='inference', backend='eager')\n self.report_benchmark(**result)", "def inference(tasks, name, convnet_model, convnet_weight_path, input_patch_size,\n output_patch_size, output_patch_overlap, output_crop_margin, patch_num,\n num_output_channels, dtype, framework, batch_size, bump, mask_output_chunk,\n mask_myelin_threshold, input_chunk_name, output_chunk_name):\n with Inferencer(\n convnet_model,\n convnet_weight_path,\n input_patch_size=input_patch_size,\n output_patch_size=output_patch_size,\n num_output_channels=num_output_channels,\n output_patch_overlap=output_patch_overlap,\n output_crop_margin=output_crop_margin,\n patch_num=patch_num,\n framework=framework,\n dtype=dtype,\n batch_size=batch_size,\n bump=bump,\n mask_output_chunk=mask_output_chunk,\n mask_myelin_threshold=mask_myelin_threshold,\n dry_run=state['dry_run'],\n verbose=state['verbose']) as inferencer:\n \n state['operators'][name] = inferencer \n\n for task in tasks:\n handle_task_skip(task, name)\n if not task['skip']:\n if 'log' not in task:\n task['log'] = {'timer': {}}\n start = time()\n\n task[output_chunk_name] = state['operators'][name](\n task[input_chunk_name])\n\n task['log']['timer'][name] = time() - start\n task['log']['compute_device'] = state[\n 'operators'][name].compute_device\n yield task", "def predict_all():\n \n # Loads the serialised analytic models. \n lrm = joblib.load(\"app/mod_stat/model_linear.pkl\") \n log = joblib.load(\"app/mod_stat/model_binary.pkl\")\n \n # Queries each unique associated count value from the database.\n results = Counts.select(Counts.counts_associated).distinct()\n \n count_values = []\n for result in results:\n if result.get_result()[\"counts_associated\"] != \"None\":\n count_values.append(result.get_result()[\"counts_associated\"])\n\n # For each unique associated count value:\n for count in count_values:\n # Updates every row of the database having that value with a corresponding predicted count. \n query = Counts.update(counts_predicted=int(lrm.predict(int(count))[0])).where(Counts.counts_associated == count)\n query.execute()\n\n # Updates every row of the database having that value with a corresponding binary estimation. \n query = Counts.update(counts_predicted_is_occupied=log.predict(int(count))[0]).where(Counts.counts_associated == count)\n query.execute()", "def __call__(self, smiles: list, **kwargs):\n results = [{'smiles': smi} for smi in smiles]\n\n # Load smiles\n full_data = cp.data.get_data_from_smiles(\n smiles=[[smi] for smi in smiles],\n skip_invalid_smiles=False,\n features_generator=self.args.features_generator\n )\n # Valid data\n full_to_valid_indices = {}\n valid_index = 0\n for full_index in range(len(full_data)):\n if all(mol is not None for mol in full_data[full_index].mol):\n full_to_valid_indices[full_index] = valid_index\n valid_index += 1\n\n test_data = cp.data.MoleculeDataset([full_data[i] for i in sorted(full_to_valid_indices.keys())])\n\n # Edge case if empty list of smiles is provided\n if len(test_data) == 0:\n return [None] * len(full_data)\n\n #print(f'Test size = {len(test_data):,}')\n\n # Predict with each model individually and sum predictions\n if self.args.dataset_type == 'multiclass':\n sum_preds = np.zeros((len(test_data), self.num_tasks, self.args.multiclass_num_classes))\n else:\n sum_preds = np.zeros((len(test_data), self.num_tasks))\n\n # Create data loader\n test_data_loader = cp.data.MoleculeDataLoader(\n dataset=test_data,\n batch_size=self.args.batch_size,\n num_workers=self.args.num_workers\n )\n\n # Partial results for variance robust calculation.\n if self.args.ensemble_variance:\n all_preds = np.zeros((len(test_data), self.num_tasks, len(self.args.checkpoint_paths)))\n\n # Predict\n # print(f'Predicting with an ensemble of {len(self.args.checkpoint_paths)} models')\n for index, checkpoint_path in enumerate(self.args.checkpoint_paths):\n # Load model and scalers\n model = cp.utils.load_checkpoint(checkpoint_path, device=self.args.device)\n scaler, features_scaler, atom_descriptor_scaler, bond_feature_scaler = cp.utils.load_scalers(checkpoint_path)\n\n # Normalize features\n if self.args.features_scaling or self.train_args.atom_descriptor_scaling or self.train_args.bond_feature_scaling:\n test_data.reset_features_and_targets()\n if self.args.features_scaling:\n test_data.normalize_features(features_scaler)\n if self.train_args.atom_descriptor_scaling and self.args.atom_descriptors is not None:\n test_data.normalize_features(atom_descriptor_scaler, scale_atom_descriptors=True)\n if self.train_args.bond_feature_scaling and self.args.bond_features_size > 0:\n test_data.normalize_features(bond_feature_scaler, scale_bond_features=True)\n\n # Make predictions\n model_preds = cp.train.predict(\n model=model,\n data_loader=test_data_loader,\n scaler=scaler\n )\n sum_preds += np.array(model_preds)\n if self.args.ensemble_variance:\n all_preds[:, :, index] = model_preds\n\n # Ensemble predictions\n avg_preds = sum_preds / len(self.args.checkpoint_paths)\n avg_preds = avg_preds.tolist()\n\n # if self.args.ensemble_variance:\n # all_epi_uncs = np.var(all_preds, axis=2)\n # all_epi_uncs = all_epi_uncs.tolist()\n\n # Get prediction column names\n # if self.args.dataset_type == 'multiclass':\n # task_names = [f'{name}_class_{i}' for name in self.task_names\n # for i in range(self.args.multiclass_num_classes)]\n # else:\n # task_names = self.task_names\n\n for i in range(len(smiles)):\n if i in full_to_valid_indices.keys():\n results[i].update({f'{self.prefix}_pred_proba': avg_preds[full_to_valid_indices[i]][0]})\n else:\n results[i].update({f'{self.prefix}_pred_proba': 0.0})\n return results", "def inference_x10(self):\n result = run_swift_benchmark(name=self.benchmark_name, variety='inference', backend='x10')\n self.report_benchmark(**result)", "def create_inference_request(\n self,\n model_name: str,\n objects: List[dict],\n top_n: int = TOP_N,\n retry: bool = True,\n ) -> dict:\n self.log.debug(\n \"Submitting Inference request for model '%s' with '%s'\"\n \" objects and top_n '%s' \",\n model_name,\n len(objects),\n top_n,\n )\n endpoint = InferencePaths.format_inference_endpoint_by_name(model_name)\n response = self.session.post_to_endpoint(\n endpoint, payload={\"topN\": top_n, \"objects\": objects}, retry=retry\n )\n as_json = response.json()\n self.log.debug(\"Inference response ID: %s\", as_json[\"id\"])\n return as_json", "def get_predictions(self, inferences):\n return inferences", "def loop_api_requests(location_id, num_articles):\n\n print \"location_id: %d\" % location_id\n\n # get location_id for location_name\n loc = Location.query.filter(Location.location_id == location_id).one()\n location_name = loc.location_name\n print \"location_name: %s\" % location_name\n\n initial_articles = Article.query.filter(Article.location_id == location_id).count()\n print \"initial_articles: %d\" % initial_articles\n\n # a single API request returns one 'page' with 10 articles\n pages_to_request = (int(num_articles) / 10) - 1\n\n page_number = 0\n\n while page_number <= pages_to_request:\n articles_list = send_api_request(location_name, page_number)\n load_articles(articles_list, location_name)\n page_number += 1\n\n current_articles = Article.query.filter(Article.location_id == location_id).count()\n\n print \"current_articles: %d\" % current_articles", "def process_batch(self, batch: List[Dict[str, Any]]) -> List[Response]:\n pass", "def incremental_inference(self, layers, node_dataset, edges, adjacency_lists, sizes, max_arieties,\n infer_with_posterior, unibigram, add_self_arc,\n exp_name, tr_val_test, architecture=None, prev_stats_path=None,\n ckpt_name=None, aggregation_type='mean', all_combinations=False):\n unigrams_folderpath = 'unigrams_' + tr_val_test\n unigram_filename = 'unigrams'\n inferred_states_folderpath = 'states_' + tr_val_test\n inferred_states_filename = 'states'\n stats_folderpath = 'stats_' + tr_val_test\n stats_filename = 'stats'\n store_stats_folderpath = os.path.join(exp_name, stats_folderpath)\n load_stats_folderpath = store_stats_folderpath\n inferred_states_folderpath = os.path.join(exp_name, inferred_states_folderpath)\n unigrams_folderpath = os.path.join(exp_name, unigrams_folderpath)\n\n if not os.path.exists(unigrams_folderpath):\n os.makedirs(unigrams_folderpath)\n if not os.path.exists(store_stats_folderpath):\n os.makedirs(store_stats_folderpath)\n if not os.path.exists(inferred_states_folderpath):\n os.makedirs(inferred_states_folderpath)\n\n if architecture is None:\n ckpt = torch.load(os.path.join(exp_name, checkpoint_folder, ckpt_name))\n architecture = CGMM_Layer.build_architecture(ckpt)\n else:\n if prev_stats_path is not None:\n load_stats_folderpath = os.path.join(prev_stats_path, stats_folderpath)\n else:\n load_stats_folderpath = os.path.join(exp_name, stats_folderpath)\n\n print('Inference for', layers, 'layers on', tr_val_test)\n\n likelihood_by_layer = []\n\n start_layer = 0 if prev_stats_path is None else len(architecture)-1 # else I need to infer only the last layer\n\n for layer in range(start_layer, layers):\n cgmm_layer = architecture[layer]\n\n layer_wise_statistics = [(layer - x + 1) for x in self.use_statistics if (layer - x) >= 0]\n\n if cgmm_layer.is_layer_0:\n stats_nodes = None\n else:\n stats_to_load = [os.path.join(load_stats_folderpath, stats_filename + '_node_' + str(level) + '.npy')\n for level in layer_wise_statistics]\n stats_nodes = load_to_ZipDataset(stats_to_load)\n\n train_set = LabelAndStatsDataset(node_dataset, stats_nodes)\n train_set = DataLoader(train_set, batch_size=4000, shuffle=False, num_workers=2)\n\n # Accumulate the likelihood of all batches\n l = 0.\n\n inferred_states = None\n posteriors = None\n\n for input_batch in train_set:\n\n posterior_batch, batch_likelihood = cgmm_layer(*input_batch)\n\n l += batch_likelihood\n\n # Always used for statistics\n prediction_batch = torch.argmax(posterior_batch, dim=1).detach().numpy()\n\n if inferred_states is None:\n inferred_states = prediction_batch\n else:\n inferred_states = np.append(inferred_states, prediction_batch)\n\n if posteriors is None:\n posteriors = posterior_batch.detach().numpy()\n else:\n posteriors = np.concatenate((posteriors, posterior_batch.detach().numpy()), axis=0)\n\n stats_nodes, stats_edges = save_statistics(self,\n adjacency_lists, posteriors, edges, sizes, max_arieties,\n store_stats_folderpath, stats_filename, layer+1, add_self_arc)\n\n if not all_combinations:\n fingerprints = self.aggregate_states(adjacency_lists, sizes,\n posteriors if infer_with_posterior else inferred_states,\n stats_nodes=stats_nodes if unibigram else None,\n aggregate_posteriors=infer_with_posterior, unibigram=unibigram,\n aggregation_type=aggregation_type)\n\n save_tensor(fingerprints, unigrams_folderpath, unigram_filename, layer + 1)\n\n else:\n for infer in [True, False]:\n for unib in [True, False]:\n fingerprints = self.aggregate_states(adjacency_lists, sizes,\n posteriors if infer else inferred_states,\n stats_nodes=stats_nodes if unib else None,\n aggregate_posteriors=infer,\n unibigram=unib,\n aggregation_type=aggregation_type)\n\n comb_unig_filename = 'unigrams_' + str(infer) + '_' + str(unib)\n save_tensor(fingerprints, unigrams_folderpath, comb_unig_filename, layer + 1)\n\n inferred_states = posteriors\n\n save_tensor(inferred_states, inferred_states_folderpath, inferred_states_filename, layer+1)\n\n likelihood_by_layer.append(l)\n\n return likelihood_by_layer", "def create_inference_request_with_url(\n self,\n url: str,\n objects: List[dict],\n top_n: int = TOP_N,\n retry: bool = True,\n ) -> dict:\n self.log.debug(\n \"Submitting Inference request with '%s'\"\n \" objects and top_n '%s' to url %s\",\n len(objects),\n top_n,\n url,\n )\n response = self.session.post_to_url(\n url, payload={\"topN\": top_n, \"objects\": objects}, retry=retry\n )\n as_json = response.json()\n self.log.debug(\"Inference response ID: %s\", as_json[\"id\"])\n return as_json", "def infer(self):\r\n counter = 0\r\n output = {}\r\n while True:\r\n batch = self._batcher.next_batch() # 1 example repeated across batch\r\n if batch is None: # finished decoding dataset in single_pass mode\r\n print(\"Decoder has finished reading dataset for single_pass.\")\r\n # log original information\r\n with open(os.path.join(self._decode_dir, \"output.json\"), 'w', encoding='utf-8') as w:\r\n json.dump(output, w)\r\n print(\"Output has been saved in %s.\" % self._decode_dir)\r\n\r\n #start evaluation\r\n evaluate.main(self.ckpt_path, FLAGS.log_root, self._decode_dir, FLAGS.mode, FLAGS.multi_label_eval)\r\n return\r\n\r\n background_span = data.show_background_span(batch.original_backgrounds_token[0], batch.original_b_starts[0], batch.original_b_ends[0])\r\n response_span = data.show_background_span(batch.original_responses_token[0], batch.original_r_starts[0], batch.original_r_ends[0])\r\n # Run greed search to get best Hypothesis\r\n best_hyp = greed_search.run_greed_search(self._sess, self._model, self._vocab, batch)\r\n best_hyp.tokens = [token for token in best_hyp.tokens if token not in [None]]\r\n # Extract the output ids from the hypothesis and convert back to words\r\n output_ids = best_hyp.tokens[1:]\r\n decoded_token, highlights_decoded_token, spans = data.outputids2words(output_ids, self._vocab, batch.bac_oovs[0], batch.original_backgrounds_token[0])\r\n\r\n if output_ids[-1] == 3:\r\n output_ids_semantic = output_ids[:(len(output_ids)-1)]\r\n else:\r\n output_ids_semantic = output_ids\r\n\r\n ids_for_print = [str(i)for i in output_ids_semantic]\r\n ids_for_print = ' '.join(ids_for_print)\r\n\r\n switch_ref_probs = best_hyp.switch_ref_probs\r\n switch_ref_probs = [str(i) for i in switch_ref_probs]\r\n switch_ref_probs = ' '.join(switch_ref_probs)\r\n\r\n switch_gen_probs = best_hyp.switch_gen_probs\r\n switch_gen_probs = [str(i) for i in switch_gen_probs]\r\n switch_gen_probs = ' '.join(switch_gen_probs)\r\n\r\n switch_gen_pred_probs = best_hyp.switch_gen_pred_probs\r\n switch_gen_pred_probs = [str(i) for i in switch_gen_pred_probs]\r\n switch_gen_pred_probs = ' '.join(switch_gen_pred_probs)\r\n\r\n switch_gen_copy_probs = best_hyp.switch_gen_copy_probs\r\n switch_gen_copy_probs = [str(i) for i in switch_gen_copy_probs]\r\n switch_gen_copy_probs = ' '.join(switch_gen_copy_probs)\r\n\r\n # Remove the [STOP] token from decoded_words, if necessary\r\n try:\r\n fst_stop_idx = decoded_token.index(data.STOP_DECODING) # index of the (first) [STOP] symbol\r\n fst_stop_idx1 = highlights_decoded_token.index(data.STOP_DECODING)\r\n decoded_token = decoded_token[:fst_stop_idx]\r\n highlights_decoded_token = highlights_decoded_token[:fst_stop_idx1]\r\n\r\n if len(decoded_token) == 0:\r\n decoded_token.append(\".\")\r\n\r\n except ValueError:\r\n decoded_token = decoded_token\r\n highlights_decoded_token = highlights_decoded_token\r\n\r\n spans_output = ' '.join(spans)\r\n decoded_output = ' '.join(decoded_token)\r\n highlights_decoded_output = ' '.join(highlights_decoded_token)\r\n\r\n output[batch.original_example_ids[0]] = {\"background\": background_span, \"context\": batch.original_contexts[0], \"highlights_ref_response\": response_span,\r\n \"highlights_inferred_response\": highlights_decoded_output, \"ref_response\": batch.original_responses[0],\r\n \"inferred_response\": decoded_output, \"ref_span\": batch.original_spans[0],\"inferred_spans\": spans_output, \"output_index\": output_ids_semantic,\r\n \"switch_ref_probs\": switch_ref_probs, \"switch_gen_probs\": switch_gen_probs,\r\n \"switch_gen_pred_probs\": switch_gen_pred_probs,\"switch_gen_copy_probs\": switch_gen_copy_probs}\r\n\r\n self.write_for_observation(batch.original_example_ids[0], background_span, batch.original_contexts[0], response_span, highlights_decoded_output, ids_for_print, switch_ref_probs, switch_gen_probs, switch_gen_pred_probs, switch_gen_copy_probs, counter)\r\n counter += 1 # this is how many examples we've decoded\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs inference for the given objects against fullyqualified URL. A complete inference URL can be the passed to the method inference, instead of constructing URL from using base url and model name
def create_inference_request_with_url( self, url: str, objects: List[dict], top_n: int = TOP_N, retry: bool = True, ) -> dict: self.log.debug( "Submitting Inference request with '%s'" " objects and top_n '%s' to url %s", len(objects), top_n, url, ) response = self.session.post_to_url( url, payload={"topN": top_n, "objects": objects}, retry=retry ) as_json = response.json() self.log.debug("Inference response ID: %s", as_json["id"]) return as_json
[ "def inference(self, inputs, sess, mode):\n fetches = {}\n if mode == 'depth':\n fetches['depth'] = self.est_depth\n inputs_ph = self.inputs_depth\n if mode == 'egomotion':\n fetches['egomotion'] = self.est_egomotion\n inputs_ph = self.inputs_egomotion\n results = sess.run(fetches, feed_dict={inputs_ph: inputs})\n return results", "def inference(path, model_inf):\n inference_dataset = ImageDetectionDataset()\n inference_dataset.load_inference_classes()\n class_names = inference_dataset.get_class_names()\n\n define_path(path, model_inf, class_names)", "def infer_model():\n # Setup training/testing environment\n setup_env()\n # Construct the model\n model = setup_model()\n # Load model weights\n cp.load_checkpoint(cfg.TEST.WEIGHTS, model)\n logger.info(\"Loaded model weights from: {}\".format(cfg.TEST.WEIGHTS))\n # Create data loaders and meters\n test_loader = data_loader.construct_test_loader()\n test_meter = meters.TestMeter(len(test_loader))\n filename = cfg.OUT_DIR + 'predict.txt'\n loss_fun = builders.build_loss_fun(\"cross_entropy\", \"none\").cuda()\n # Evaluate the model\n infer_epoch(test_loader, model, test_meter, 0, loss_fun, filename, writer=None)", "def create_inference_request(\n self,\n model_name: str,\n objects: List[dict],\n top_n: int = TOP_N,\n retry: bool = True,\n ) -> dict:\n self.log.debug(\n \"Submitting Inference request for model '%s' with '%s'\"\n \" objects and top_n '%s' \",\n model_name,\n len(objects),\n top_n,\n )\n endpoint = InferencePaths.format_inference_endpoint_by_name(model_name)\n response = self.session.post_to_endpoint(\n endpoint, payload={\"topN\": top_n, \"objects\": objects}, retry=retry\n )\n as_json = response.json()\n self.log.debug(\"Inference response ID: %s\", as_json[\"id\"])\n return as_json", "def infer(self, method=\"MLE\", **kwargs) :\n\n\t\treturn eval(\"inference.{}(self, **kwargs)\".format(method))", "def launch_inference(self):\n\n self.logger.info('Beginning to submit inference tasks')\n # Make a folder for the models\n model_folder = self.output_dir.joinpath('models')\n model_folder.mkdir(exist_ok=True)\n \n # Submit the chunks to the workflow engine\n for mid in range(len(self.mpnns)):\n # Get a model that is ready for inference\n model = self.ready_models.get()\n \n # Convert it to a pickle-able message\n model_msg = MPNNMessage(model)\n \n # Proxy it once, to be used by all inference tasks\n model_msg_proxy = ps.store.get_store(self.ps_names['infer']).proxy(model_msg, key=f'model-{mid}-{self.inference_batch}')\n \n # Run inference with all segements available\n for cid, (chunk, chunk_msg) in enumerate(zip(self.inference_chunks, self.inference_proxies)):\n self.queues.send_inputs([model_msg_proxy], chunk_msg,\n topic='infer', method='evaluate_mpnn',\n keep_inputs=False,\n task_info={'chunk_id': cid, 'chunk_size': len(chunk), 'model_id': mid})\n self.logger.info('Finished submitting molecules for inference')", "def do_bulk_inference(\n self,\n model_name: str,\n objects: List[dict],\n top_n: int = TOP_N,\n retry: bool = True,\n worker_count: int = 4,\n ) -> List[Union[dict, None]]:\n\n if worker_count is None:\n raise InvalidWorkerCount(\"worker_count cannot be None!\")\n\n if worker_count > 4:\n msg = \"worker_count too high: %s. Up to 4 allowed.\" % worker_count\n raise InvalidWorkerCount(msg)\n\n if worker_count <= 0:\n msg = \"worker_count must be greater than 0!\"\n raise InvalidWorkerCount(msg)\n\n def predict_call(work_package):\n try:\n response = self.create_inference_request(\n model_name, work_package, top_n=top_n, retry=retry\n )\n return response[\"predictions\"]\n except (DARHTTPException, RequestException) as exc:\n self.log.warning(\n \"Caught %s during bulk inference. \"\n \"Setting results to None for this batch!\",\n exc,\n exc_info=True,\n )\n\n prediction_error = [\n {\n \"objectId\": inference_object.get(\"objectId\", None),\n \"labels\": None,\n \"_sdk_error\": \"{}: {}\".format(exc.__class__.__name__, str(exc)),\n }\n for inference_object in work_package\n ]\n return prediction_error\n\n results = []\n\n with ThreadPoolExecutor(max_workers=worker_count) as pool:\n results_iterator = pool.map(\n predict_call, split_list(objects, LIMIT_OBJECTS_PER_CALL)\n )\n\n for predictions in results_iterator:\n results.extend(predictions)\n\n return results", "def _load_inference_interface(self, use_jax):", "def single_worker_inference(infer_model,\n ckpt,\n inference_input_file,\n inference_output_file,\n hparams):\n output_infer = inference_output_file\n\n # Read data\n infer_data = load_data(inference_input_file, hparams)\n\n with tf.Session(config=utils.get_config_proto(), graph=infer_model.graph) as sess:\n loaded_infer_model = model_helper.load_model(infer_model.model, ckpt, sess, \"infer\")\n sess.run(infer_model.iterator.initializer,\n feed_dict={\n infer_model.src_placeholder: infer_data,\n infer_model.batch_size_placeholder: hparams.infer_batch_size\n })\n # Decode\n utils.print_out(\"# Start decoding\")\n _decode_and_evaluate(\"infer\",\n loaded_infer_model,\n sess,\n output_infer,\n ref_file=None,\n subword_option=None,\n beam_width=hparams.beam_width,\n tgt_eos=hparams.eos,\n num_translations_per_input=hparams.num_translations_per_input)", "def prepare_for_inference(model: TModel) -> TPModel:", "def inference_args(parser: argparse.ArgumentParser):\n parser.add_argument(\"--model_path\", default=None, type=str, required=True,\n help=\"Path to pre-trained model\")", "def benchmark_model_on_ep(\n args,\n model_name,\n exec_provider,\n trt_ep_options,\n model_path,\n inputs,\n all_inputs_shape,\n model_to_fail_ep,\n ep_results,\n success_results,\n test_data_dir,\n convert_input_fp16,\n):\n\n # memory tracking variables\n mem_usage = None\n result = None\n\n # get standalone TensorRT perf\n if is_standalone(exec_provider) and args.trtexec:\n try:\n result = run_trt_standalone(\n args.trtexec,\n model_name,\n model_path,\n test_data_dir,\n all_inputs_shape,\n exec_provider == standalone_trt_fp16,\n args.track_memory,\n )\n except Exception as excpt:\n logger.error(excpt)\n update_fail_model_map(model_to_fail_ep, model_name, exec_provider, \"runtime error\", excpt)\n return\n\n # inference with onnxruntime ep\n else:\n # resolve providers to create session\n providers = ep_to_provider_list[exec_provider]\n provider_options = get_provider_options(providers, trt_ep_options, args.cuda_ep_options)\n\n options = onnxruntime.SessionOptions()\n options.graph_optimization_level = get_graph_opt_level(args.graph_enablement)\n\n # create onnxruntime inference session\n try:\n sess, second_creation_time = create_session(model_path, providers, provider_options, options)\n\n except Exception as excpt:\n logger.error(excpt)\n update_fail_model_map(model_to_fail_ep, model_name, exec_provider, \"runtime error\", excpt)\n return\n\n if second_creation_time:\n ep_results[\"session\"][exec_provider + second] = second_creation_time\n\n logger.info(\"Start to inference %s with %s ...\", model_name, exec_provider)\n logger.info(sess.get_providers())\n logger.info(sess.get_provider_options())\n\n if sess:\n logger.info(\"Model inputs nodes:\")\n for input_meta in sess.get_inputs():\n logger.info(input_meta)\n logger.info(\"Model outputs nodes:\")\n for output_meta in sess.get_outputs():\n logger.info(output_meta)\n\n batch_size = 1\n result_template = {\n \"engine\": \"onnxruntime\",\n \"version\": onnxruntime.__version__,\n \"device\": exec_provider,\n \"fp16\": convert_input_fp16,\n \"io_binding\": args.io_binding,\n \"graph_optimizations\": args.graph_enablement,\n \"enable_cache\": args.trt_ep_options.get(\"trt_engine_cache_enable\", \"False\"),\n \"model_name\": model_name,\n \"inputs\": len(sess.get_inputs()),\n \"batch_size\": batch_size,\n \"sequence_length\": 1,\n \"datetime\": str(datetime.now()),\n }\n\n # run cpu fewer times\n repeat_times = 100 if exec_provider == cpu else args.test_times\n track_memory = False if exec_provider == cpu else args.track_memory\n\n # inference with ort\n try:\n result, mem_usage = inference_ort(\n args,\n model_name,\n sess,\n exec_provider,\n inputs,\n result_template,\n repeat_times,\n batch_size,\n track_memory,\n )\n except Exception as excpt:\n logger.error(excpt)\n update_fail_model_map(model_to_fail_ep, model_name, exec_provider, \"runtime error\", excpt)\n return\n\n if result:\n ep_results[\"latency\"][exec_provider] = {}\n ep_results[\"latency\"][exec_provider][\"average_latency_ms\"] = result[\"average_latency_ms\"]\n ep_results[\"latency\"][exec_provider][\"latency_90_percentile\"] = result[\"latency_90_percentile\"]\n if \"memory\" in result:\n mem_usage = result[\"memory\"]\n if mem_usage:\n ep_results[\"latency\"][exec_provider][\"memory\"] = mem_usage\n if not args.trtexec: # skip standalone\n success_results.append(result)", "def infer(self):", "async def infer(self, *args):\n g = await self.make_graph(args)\n nargs = len(g.parameters)\n\n if len(args) != nargs:\n raise type_error_nargs(self.identifier, nargs, len(args))\n\n argkey, context = await self._make_argkey_and_context(args)\n\n # We associate each parameter of the Graph with its value for each\n # property, in the context we built.\n for p, arg in zip(g.parameters, argkey):\n for track, v in arg:\n ref = self.engine.ref(p, context)\n self.engine.cache.set_value((track, ref), v)\n\n out = self.engine.ref(g.return_, context)\n return await self.engine.get_inferred(self.track.name, out)", "def inference(self):\n result = run_swift_benchmark(name=self.benchmark_name, variety='inference', backend='eager')\n self.report_benchmark(**result)", "def _get_inference_request(self, inputs, outputs, model_name, model_version,\n request_id, sequence_id):\n\n self._request = grpc_service_v2_pb2.ModelInferRequest()\n self._request.model_name = model_name\n self._request.model_version = model_version\n if request_id != None:\n self._request.id = request_id\n if sequence_id != None:\n self._request.sequence_id = sequence_id\n for infer_input in inputs:\n self._request.inputs.extend([infer_input._get_tensor()])\n for infer_output in outputs:\n self._request.outputs.extend([infer_output._get_tensor()])", "def load_inference_model(dirname, executor, load_file_name=None):\n if not os.path.isdir(dirname):\n raise ValueError(\"There is no directory named '%s'\", dirname)\n\n model_file_name = dirname + \"/__model__\"\n with open(model_file_name, \"rb\") as f:\n program_desc_str = f.read()\n\n program = Program.parse_from_string(program_desc_str)\n load_persistables(executor, dirname, program, load_file_name)\n\n feed_target_names = get_feed_targets_names(program)\n fetch_target_names = get_fetch_targets_names(program)\n fetch_targets = [\n program.global_block().var(name) for name in fetch_target_names\n ]\n\n return [program, feed_target_names, fetch_targets]", "def infinite_infer_run(): \n try:\n model_directory = \"/opt/awscam/artifacts/\"\n # model_name = \"mnist-8\" # onnx-model\n model_name = \"fingerModel.onnx\" # onnx-model\n\n # Create a local display instance that will dump the image bytes to a FIFO\n # file that the image can be rendered locally.\n local_display = LocalDisplay('480p')\n local_display.start()\n\n # When the ONNX model is imported via DeepLens console, the model is copied\n # to the AWS DeepLens device, which is located in the \"/opt/awscam/artifacts/\".\n model_file_path = os.path.join(model_directory, model_name)\n sess = rt.InferenceSession(model_file_path)\n \n while True:\n # Get a frame from the video stream\n ret, frame = awscam.getLastFrame()\n if not ret:\n raise Exception('Failed to get frame from the stream')\n \n # Preprocess the frame to crop it into a square and\n # resize it to make it the same size as the model's input size.\n input_img = preprocess(frame)\n\n # Inference.\n inferences = makeInferences(sess, input_img)\n inference = np.argmax(inferences)\n\n # TODO: Add the label of predicted digit to the frame used by local display.\n # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html\n # for more information about the cv2.putText method.\n # Method signature: image, text, origin, font face, font scale, color, and thickness \n # cv2.putText()\n cv2.putText(frame, str(inference), (20,120), cv2.FONT_HERSHEY_COMPLEX, 5, (243, 252, 61), 4)\n \n # 255, 0, 0\n # 61, 252, 243\n\n # Set the next frame in the local display stream.\n local_display.set_frame_data(frame)\n \n # Outputting the result logs as \"MQTT messages\" to AWS IoT.\n cloud_output = {}\n cloud_output[\"scores\"] = inferences.tolist()\n print(inference, cloud_output)\n print(input_img.shape, inferences.shape)\n\n except Exception as ex:\n # Outputting error logs as \"MQTT messages\" to AWS IoT.\n print('Error in lambda {}'.format(ex))\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"error details:\" + str(exc_type) + str(fname) + str(exc_tb.tb_lineno))", "def inference(imu_data, model_path=\"model_1\"):\n fs_imu = 100\n labels=np.zeros(len(imu_data))\n clean_x,clean_y=clean_datset([imu_data], [labels], fs_imu)\n dataset_feats=featurize_samples(clean_x, fs_imu)\n dataset_feats=np.array(dataset_feats[0]).reshape(1,-1)\n clean_y = np.ravel(clean_y)\n reg_model = load_model(model_path)\n samples_pred = reg_model.predict(dataset_feats)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
compare if a value is equal to an element in a list
def check_list(self, list1, val): for x in list1: if val == x: return True return False
[ "def elem(value, lst: List) -> bool:\n return not null(lst) and (head(lst) == value or elem(value, tail(lst)))", "def exists(\n self,\n value\n ):\n size_of_list = self.size()\n index = 0\n\n while index < size_of_list:\n if self.get_value(index) == value:\n return True\n index += 1\n return False", "def is_in_tuplist(tuplist, value):\n return len([tup for tup in tuplist if value in tup]) > 0", "def all_Equals(lst):\n for i in lst:\n if lst[0] != i:\n return None\n return lst[0]", "def __contains__(self, value):\n # start at the beginning of the list\n # check every item\n # if you find a match, return True\n # if we get to the end, return False\n\n current = self.front\n while current is not None:\n if current.value == value:\n return True\n current = current.next_\n return False", "def list_search(lst, value):\n for i in lst:\n if i==value:\n return value\n return None", "def _compare_list(self, my_list):\n\n if self.check_with_zeros:\n for val in self._range_max_value:\n if my_list.count(val) > 1:\n return False\n else:\n for val in self._range_max_value:\n if val not in my_list:\n return False\n return True", "def isIn(value, arg):\n return arg in value", "def check_value(val, reference):\n if isinstance(reference, list):\n for ref in reference:\n if check_value(val, ref):\n return True\n return False\n if isinstance(reference, tuple):\n return reference[0] <= val <= reference[1]\n return val == reference", "def allEqual(self, list):\n return not list or list == [list[0]] * len(list)", "def find(self, value):\n return value in self.sums", "def exist(lst, pred):\r\n for el in lst:\r\n if pred(el):\r\n return True\r\n return False", "def find_key_value(arg_list, key, value):\n for i in range(len(arg_list)):\n if arg_list[i] == key and arg_list[i + 1] == value:\n return True\n return False", "def find_with_type(value, list_):\n for index, comparison_value in enumerate(list_):\n if (type(value) == type(comparison_value) and\n value == comparison_value):\n return index\n raise ValueError", "def is_in_list(string_one, list_one):\n if string_one in list_one:\n return True\n return False", "def __eq__(self, l: 'SbPList') -> \"int\":\n return _coin.SbPList___eq__(self, l)", "def num_in_list(arr: list, num: int) -> bool:\n found = False\n while not found:\n for i in arr:\n if i == num:\n found = True\n return found", "def is_ins_in_list(instance, ins_list, end):\n for i in range(len(ins_list)):\n if i == end:\n break\n if instance.is_equal(ins_list[i]):\n return True\n return False", "def contains_condition(instance, value, list_property, **_):\n value = resolve_permission_variable(value)\n list_value = get_deep_attr(instance, list_property)\n return value in list_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if user type is an artist, routes to collecting artist info page, if users and artist tables get updated
def test_register_artist(self): self.helper_user_type_all('artist', 'Artist Info', 'website', 'mywebsite')
[ "def artistInfo(aid):\n\n # checks if user is logged in, if not redirects to welcome page\n if notLoggedIn(): \n return redirect( url_for('index'))\n\n # gets artist info and displays it\n conn = dbi.connect() \n artist = music.getArtistById(conn, aid)\n artistsWork = music.getMusicByArtistId(conn, aid)\n return render_template('artist-info.html',title='Artist Info', \n artist = artist, works = artistsWork)", "def renderArtistResults(conn, query):\n results = music.getArtist(conn, query)\n if len(results) == 0: # no results found, display\n return render_template('search-error.html',type='artists', \n title='Search error')\n elif (len(results) == 1): # redirects to that music/user/artist page\n return redirect(url_for('artistInfo', aid= results[0]['aid']))\n else:\n return render_template('list.html', type = 'artists', \n all_things = results, query= query, title=\"Artist results\")", "def user_is_artist(username):\n if models.ArtistProfile.objects.filter(basic_profile__user__username=username).count() > 0:\n return True\n return False", "def artist_detail(request, artist_id):\n artist = get_object_or_404(Artist, pk=artist_id)\n all_products = Product.objects.all()\n all_events = Event.objects.all()\n products = all_products.filter(artist__name=artist)\n events = all_events.filter(artist__name=artist)\n context = {\n 'artist': artist,\n 'products': products,\n 'events': events,\n }\n return render(request, 'artists/artist_detail.html', context)", "def artist_like_toggle(request, artist_pk):\n\n artist = Artist.objects.get(pk=artist_pk)\n if request.method == 'POST':\n artist.toggle_like_user(user=request.user)\n return redirect('artist:artist-list')", "def show_artist_news(request, uri_artist, mbid):\n artist = get_basic_artist(mbid)\n news_stream, news_sources = get_populated_artist_news(artist)\n artist.save_any_changes()\n\n # basic SEO check\n artist_seo_name = slugify2(artist.name)\n if uri_artist == artist_seo_name:\n return render_to_response('news/show_artist_news.html', locals())\n else:\n return HttpResponsePermanentRedirect(reverse('show-artist-news', args=(artist_seo_name, mbid)))", "def _artisturl(self):\n if self.metadata[\"albumartist\"] <> \"Various Artists\":\n self.album._requests += 1\n #sys.stderr.write(self.cfg['abetterpath_http_echonest_host'] + \":\" + self.cfg['abetterpath_http_echonest_port'] + self.urls['echonest_artist_url'] + \"\\n\")\n self.album.tagger.xmlws.get(self.cfg['abetterpath_http_echonest_host'], self.cfg['abetterpath_http_echonest_port'], self.urls['echonest_artist_url'], partial(self._processurls))", "def get_artist(self, request):\n user = request.user\n result = ArtistProfile.call(user=user)\n\n if result.failed:\n return Response(\n errors=dict(errors=result.error.value),\n status=status.HTTP_400_BAD_REQUEST\n )\n return Response(data=result.value, status=status.HTTP_200_OK)", "def __isArtistType(self):\n for obj in self.artistGraph.objects(self.artistURI, self.typePredicate):\n if obj in self.MUSIC_ONTOLOGIES:\n return True\n return False", "def home(request):\n\n user=request.user\n has_artistProfile=user.has_artistProfile() if user.is_authenticated else False\n upc_evs=dbSearch.get_upcoming_events()\n random_bands= dbSearch.get_randomBands(amount=1)\n\n context= {\n 'user': user,\n 'has_artistProfile': has_artistProfile,\n 'upcoming_events': upc_evs,\n 'random_bands': random_bands,\n 'http_protocol': getHTTP_Protocol()\n }\n return render(request, \"gig/home.html\", context=context)", "def artist(artist, page, directory, allow_duplicates, track):\n client = Client()\n\n def get_next_response(offset):\n return client.fetch_user_illustrations(artist, offset=offset)\n\n download_pages(\n get_next_response,\n starting_offset=(page - 1) * 30,\n directory=(Path(directory or Config()[\"pixi\"][\"download_directory\"])),\n allow_duplicates=allow_duplicates,\n track_download=resolve_track_download(track, directory),\n start_page=page,\n )\n\n click.echo(f\"Finished downloading artist {artist}.\")", "def on_spotlogin(data):\n user=get_user(data['token'])\n artists=get_artists(data['token'])\n \n # add to users if not already, update top artists\n usersquery = query_user(user['username'])\n if (usersquery == [] or usersquery == None):\n db_user=models.Users(\n username=user['username'],\n profile_picture=user['profile-picture'],\n user_type=user['user-type'],\n top_artists=artists,\n following=[],\n my_likes=[]\n )\n DB.session.add(db_user)\n else:\n usersquery.top_artists = artists\n\n # emit success to user, so they can access timeline\n socketio.emit('login success', True, room=flask.request.sid)\n\n \n # add to active users table\n DB.session.add(models.ActiveUsers(user['username'], flask.request.sid, data['token']))\n \n # commit all db changes\n DB.session.commit()", "def artists_action(menuitem, gui):\n \n result = xbmc.call.AudioLibrary.GetArtists()\n\n def convert(artist):\n text = artist[\"label\"]\n if artist.has_key('thumbnail'):\n image = self.cache.open_http(\n artist[\"thumbnail\"], self.config[\"default artist\"],\n image_convert)\n else:\n image = self.cache.open(self.config[\"default artist\"])\n\n def action(menuitem2, gui2):\n albums_action(menuitem2, gui2, artist[\"artistid\"])\n\n return MenuItem(image, text, action)\n\n self.artists_menu.fill(*map(convert, result[\"artists\"]))\n Menu.action_helper(self.artists_menu)(menuitem, gui)", "def add_artist(self, artist):\n self.artists[artist.name] = artist", "def my_music():\n\n # checks if user is logged in, if not redirects to welcome page\n if notLoggedIn(): \n return redirect( url_for('index'))\n # gets the music by a particular user from the database\n conn = dbi.connect()\n uid = session['user_id']\n myMusic = music.getMyMusic(conn, uid)\n # if user hasn't added any music display add music message\n if len(myMusic) == 0:\n return render_template('my-music.html',all_music =myMusic, \n noMusic=True, title='My Music')\n return render_template('my-music.html', all_music =myMusic, \n title='My Music')", "def artist_uri(self, artist_uri):\r\n self.data['artist_uri'] = artist_uri", "def render_artists(self, artists, *args, **kwargs):\n must_be_drawn = artists is None\n if must_be_drawn:\n self.first_draw(*args, **kwargs)\n else:\n self.update(artists, *args, **kwargs)", "def create_artist(request):\r\n auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_ARTIST', 0)\r\n \r\n if request.method == 'POST':\r\n # Check to see if moderation settings allow for the check\r\n if request.user.is_staff and auto_approve == 1:\r\n # Automatically approved due to Moderator status\r\n status = 'A'\r\n else:\r\n status = 'U'\r\n \r\n a = Artist(created_by = request.user, status = status)\r\n form = CreateArtistForm(request.POST, request.FILES, instance = a)\r\n if form.is_valid():\r\n new_artist = form.save(commit=False)\r\n new_artist.save()\r\n form.save_m2m()\r\n return HttpResponseRedirect(new_artist.get_absolute_url())\r\n else:\r\n form = CreateArtistForm()\r\n return render_to_response('webview/create_artist.html', \\\r\n {'form' : form }, \\\r\n context_instance=RequestContext(request))", "def home():\n if 'auth_header' in session:\n # get form responses\n type = request.args.get('type')\n time_range = request.args.get('time_range')\n if type and time_range:\n return redirect('/tracks/'+time_range) if type=='tracks' else redirect('/artists/'+time_range)\n elif type:\n return redirect('/tracks') if type=='tracks' else redirect('/artists')\n\n rec_playlist = request.args.get('create_playlist')\n if rec_playlist == 'rec_playlist':\n return redirect('/recommended4376')\n\n # place user data in dynamoDB for (hopefully) later use\n data_cookie = request.cookies.get('data_retrieved')\n resp = make_response(render_template('home.html'))\n if not data_cookie or data_cookie != 'yes':\n resp = set_data_cookies(resp)\n try:\n return resp\n except:\n return redirect('/auth') # send to auth if homepage doesn't load\n\n return redirect('/auth')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if user type is an patron, routes to collecting patron info page, if users and patron tables get updated
def test_register_patron(self): self.helper_user_type_all('patron', 'Patron Info', 'patron_info', 'I am a patron')
[ "def lookup_patron():\n return app.manager.admin_patron_controller.lookup_patron()", "def manage_single_journal_member(request, user_id):\n\n # Only registry members and registrar memebers can edit vesting members\n if request.user.groups.all()[0].name not in ['registrar_member', 'registry_member', 'vesting_manager']:\n return HttpResponseRedirect(reverse('user_management_created_links'))\n\n is_registry = False;\n \n if request.user.groups.all()[0].name == 'registry_member':\n is_registry = True;\n \n target_member = get_object_or_404(LinkUser, id=user_id)\n\n # Registrar members can only edit their own vesting members\n if request.user.groups.all()[0].name not in ['registry_member']:\n if request.user.registrar != target_member.registrar:\n return HttpResponseRedirect(reverse('user_management_created_links'))\n \n # Vesting managers can only edit their own vesting members\n if request.user.groups.all()[0].name not in ['registry_member', 'registrar_member']:\n if request.user != target_member.authorized_by:\n return HttpResponseRedirect(reverse('user_management_created_links'))\n\n\n context = {'user': request.user, 'target_member': target_member,\n 'this_page': 'users_vesting_users'}\n\n if request.method == 'POST':\n if is_registry:\n form = journal_member_w_group_form_edit(request.POST, prefix = \"a\", instance=target_member)\n else:\n form = journal_member_form_edit(request.POST, prefix = \"a\", instance=target_member)\n\n if form.is_valid():\n form.save()\n\n return HttpResponseRedirect(reverse('user_management_created_links'))\n\n else:\n context.update({'form': form,})\n else:\n if is_registry:\n form = journal_member_w_group_form_edit(prefix = \"a\", instance=target_member)\n else: \n form = journal_member_form_edit(prefix = \"a\", instance=target_member)\n context.update({'form': form,})\n\n context = RequestContext(request, context)\n \n return render_to_response('user_management/manage_single_journal_member.html', context)", "def transfer_patient_list(request):\n user_type = get_user_type(request.user)\n\n if user_type == \"doctor\":\n doctor = Doctor.objects.filter(user=request.user)[0]\n hospitals = Hospital.objects.filter(doctor=doctor)\n patients = Patient.objects.filter(transfer_to__in=hospitals) | Patient.objects.filter(admitted_to__in=hospitals)\n patients = patients.order_by('user__last_name')\n elif user_type == \"admin\":\n h_a = Hospital_Admin.objects.filter(user=request.user)[0]\n hospital = Hospital.objects.filter(hospital_admin=h_a)\n patients = Patient.objects.filter(transfer_to=hospital) | Patient.objects.filter(admitted_to=hospital)\n patients = patients.order_by('user__last_name')\n\n return render(request, 'HealthApps/transfer_patient_list.html', dict(patients=patients, user_type=user_type))", "def buscarusuario(): \n if not current_user.is_authenticated():\n flash('Debe loguearse primeramente!!!!', 'loggin')\n return render_template('index.html')\n \n permission = UserRol('ADMINISTRADOR')\n if permission.can():\n valor = request.args['patron']\n parametro = request.args['parametro']\n if valor == \"\" : \n administrarusuario()\n if parametro == 'fecha_nac':\n p = db_session.query(Usuario).from_statement(\"SELECT * FROM usuario where to_char(\"+parametro+\", 'YYYY-mm-dd') ilike '%\"+valor+\"%'\").all()\n else:\n p = db_session.query(Usuario).from_statement(\"SELECT * FROM usuario where \"+parametro+\" ilike '%\"+valor+\"%'\").all()\n return render_template('usuario/administrarusuario.html', usuarios = p) \n valor = request.args['patron']\n r = db_session.query(Usuario).filter_by(usuario=valor)\n return render_template('usuario/administrarusuario.html', usuarios = r)\n else:\n flash('Sin permisos para buscar usuarios', 'permiso')\n return render_template('index.html')", "def _request_handler(self, patron):\n if not patron:\n return INVALID_CREDENTIALS.detailed(_(\"No authenticated patron\"))\n\n credential = AdobeVendorIDModel.get_or_create_patron_identifier_credential(\n patron\n )\n return DeviceManagementRequestHandler(credential)", "def fossil():\n fossil_id = request.args(0) or redirect(URL('index'))\n fossil_record = db.fossil(fossil_id) or redirect(URL('index'))\n #fossil_record = db(db.fossil.id==fossil_id).select().first()\n is_active = False\n i_own = ( auth.user_id == fossil_record['owner'] )\n if auth.user_id == fossil_record['owner']:\n is_active = os.path.exists(fossil_record.link_path)\n db.fossil.file.writable=False\n db.fossil.file.readable=True\n db.fossil.clone_url.writable=False\n db.fossil.clone_url.readable=False\n form=crud.update(db.fossil, fossil_id, )\n #crud.settings.update_ondelete = delete_proc(fossil_record.link_path)\n crud.settings.delete_onaccept = delete_proc\n if request.args(1) == \"link\":\n link_manage(fossil_record.file,\n fossil_record.link_path,\n fossil_record.user_index)\n redirect(URL(a='fossilizer',c='default',f='fossil',\n args=[fossil_record.id]))\n if request.args(1) == \"unlink\" and i_own:\n unlink_manage(fossil_record.link_path)\n redirect(URL(a='fossilizer',c='default',f='fossil',\n args=[fossil_record.id]))\n else:\n db.fossil.file.readable=False\n if fossil_record.visibility:\n form=crud.read(db.fossil, fossil_id, )\n else:\n form=(\"nothing\", \"Here\", \"For\", \"you\")\n return dict(message=T(fossil_record.project_name), form=form,\n fossil=fossil_record, is_active=is_active)", "def url_rewritable(self, task, entry):\n for urlrewriter in get_plugins_by_group('urlrewriter'):\n log.trace('checking urlrewriter %s' % urlrewriter.name)\n if urlrewriter.instance.url_rewritable(self, entry):\n return True\n return False", "def manage_single_journal_member_reactivate(request, user_id):\n\n # Only registry members and registrar memebers can edit vesting members\n if request.user.groups.all()[0].name not in ['registrar_member', 'registry_member', 'vesting_manager']:\n return HttpResponseRedirect(reverse('user_management_created_links'))\n\n target_member = get_object_or_404(LinkUser, id=user_id)\n\n # Registrar members can only edit their own vesting members\n if request.user.groups.all()[0].name not in ['registry_member']:\n if request.user.registrar != target_member.registrar:\n return HttpResponseRedirect(reverse('user_management_created_links'))\n \n # Vesting managers can only edit their own vesting members\n if request.user.groups.all()[0].name not in ['registry_member', 'registrar_member']:\n if request.user != target_member.authorized_by:\n return HttpResponseRedirect(reverse('user_management_created_links'))\n\n context = {'user': request.user, 'target_member': target_member,\n 'this_page': 'users_vesting_users'}\n\n if request.method == 'POST':\n target_member.is_active = True\n target_member.save()\n\n return HttpResponseRedirect(reverse('user_management_manage_journal_member'))\n else:\n form = journal_member_form_edit(prefix = \"a\", instance=target_member)\n context.update({'form': form,})\n\n context = RequestContext(request, context)\n \n return render_to_response('user_management/manage_single_journal_member_reactivate_confirm.html', context)", "def _process_registration(populaire, rider, request):\n try:\n # Check for duplicate registration\n check_rider = Rider.objects.get(\n first_name=rider.first_name, last_name=rider.last_name,\n email=rider.email, populaire=populaire)\n url = reverse(\n 'populaires:prereg-duplicate',\n args=(populaire.short_name, populaire.date.strftime('%d%b%Y'),\n check_rider.id))\n except Rider.DoesNotExist:\n # Save new rider pre-registration and send emails to\n # rider and brevet organizer\n rider.save()\n update_google_spreadsheet.delay(populaire.pk)\n email_to_rider.delay(populaire.pk, rider.pk, request.get_host())\n email_to_organizer.delay(populaire.pk, rider.pk, request.get_host())\n url = reverse(\n 'populaires:prereg-confirm',\n args=(populaire.short_name, populaire.date.strftime('%d%b%Y'),\n rider.id))\n return url", "def change_user_type(self):\n user_origin = CustomUser.objects.get(id=self.id)\n if user_origin.user_type != self.user_type:\n ChildProfile.objects.filter(user=self).delete()\n ParentProfile.objects.filter(user=self).delete()\n InstructorProfile.objects.filter(user=self).delete()\n if user_origin.user_type == 1:\n if self.user_type == 2:\n ChildProfile.objects.get_or_create(user=self)\n elif self.user_type == 3:\n ParentProfile.objects.get_or_create(user=self)\n elif self.user_type == 4:\n InstructorProfile.objects.get_or_create(user=self)\n elif user_origin.user_type == 2:\n if self.user_type == 3:\n ParentProfile.objects.get_or_create(user=self)\n elif self.user_type == 4:\n InstructorProfile.objects.get_or_create(user=self)\n elif user_origin.user_type == 3:\n if self.user_type == 2:\n ChildProfile.objects.get_or_create(user=self)\n elif self.user_type == 4:\n InstructorProfile.objects.get_or_create(user=self)\n elif user_origin.user_type == 4:\n if self.user_type == 2:\n ChildProfile.objects.get_or_create(user=self)\n elif self.user_type == 3:\n ParentProfile.objects.get_or_create(user=self)", "def add_as_website_user(self):\n add_patient_as_a_website_user = frappe.db.get_single_value (\n 'Healthcare Settings', 'add_patient_as_a_website_user')\n if add_patient_as_a_website_user:\n original_add_as_website_user(self)", "def buscar(): \n if not current_user.is_authenticated():\n flash('Debe loguearse primeramente!!!!', 'loggin')\n return render_template('index.html')\n \n permission = UserRol('ADMINISTRADOR')\n if permission.can():\n valor = request.args['patron']\n parametro = request.args['parametro']\n if valor == \"\" : \n administrarrol()\n p = db_session.query(Rol).from_statement(\"SELECT * FROM rol where \" + parametro + \" ilike '%\" + valor + \"%'\").all()\n return render_template('rol/administrarrol.html', roles=p)\n valor = request.args['patron']\n r = db_session.query(Rol).filter_by(codigo=valor)\n if r == None:\n return 'no existe concordancia'\n return render_template('rol/administrarrol.html', roles=r)\n else:\n flash('Sin permisos para buscar roles', 'permiso')\n return render_template('index.html')", "def set_registrations(self, on):\n self._can_add_modify_user = on\n if self._can_add_modify_user:\n reply = \"E' ora possibile aggiungere e modificare i dati relativi agli utenti registrati al Secret Santa.\\n\"\n else:\n reply = \"Non è più possibile aggiungere e modificare i dati relativi agli utenti registrati al Secret Santa.\\n\"\n self.update_settings()\n return reply", "def url_rewritable(self, task, entry):\n for urlrewriter in plugin.get_plugins(interface='urlrewriter'):\n if urlrewriter.name in self.disabled_rewriters:\n logger.trace(\"Skipping rewriter {} since it's disabled\", urlrewriter.name)\n continue\n logger.trace('checking urlrewriter {}', urlrewriter.name)\n if urlrewriter.instance.url_rewritable(task, entry):\n return True\n return False", "def privilegesForPrincipal(principal, request):", "def manage_single_registrar_member(request, user_id):\n\n if request.user.groups.all()[0].name not in ['registry_member']:\n return HttpResponseRedirect(reverse('user_management_created_links'))\n\n target_registrar_member = get_object_or_404(LinkUser, id=user_id)\n\n context = {'user': request.user, 'target_registrar_member': target_registrar_member,\n 'this_page': 'users_registrar_members'}\n\n if request.method == 'POST':\n\n form = regisrtar_member_form_edit(request.POST, prefix = \"a\", instance=target_registrar_member)\n\n if form.is_valid():\n new_user = form.save()\n\n return HttpResponseRedirect(reverse('user_management_manage_registrar_member'))\n\n else:\n context.update({'form': form,})\n else:\n form = regisrtar_member_form_edit(prefix = \"a\", instance=target_registrar_member)\n context.update({'form': form,})\n\n context = RequestContext(request, context)\n \n return render_to_response('user_management/manage_single_registrar_member.html', context)", "def test_patrol(self):\n mysite = self.get_site()\n\n rc = list(mysite.recentchanges(total=1))\n if not rc:\n self.skipTest('no recent changes to patrol')\n\n rc = rc[0]\n\n # site.patrol() needs params\n with self.assertRaises(Error):\n list(mysite.patrol())\n try:\n result = list(mysite.patrol(rcid=rc['rcid']))\n except APIError as error:\n if error.code == 'permissiondenied':\n self.skipTest(error)\n raise\n\n if hasattr(mysite, '_patroldisabled') and mysite._patroldisabled:\n self.skipTest(f'Patrolling is disabled on {mysite} wiki.')\n\n result = result[0]\n self.assertIsInstance(result, dict)\n\n params = {'rcid': 0, 'revid': [0, 1]}\n\n raised = False\n try:\n # no such rcid, revid or too old revid\n list(mysite.patrol(**params))\n except APIError as error:\n if error.code == 'badtoken':\n self.skipTest(error)\n except Error:\n # expected result\n raised = True\n self.assertTrue(raised, msg='pywikibot.exceptions.Error not raised')", "async def checkperm(self, ctx, *, perm: str):\n perm = perm.replace(\" \", \"_\")\n perm = perm.lower()\n pager = paginator(self.bot)\n lines = []\n x = 1\n members = ctx.guild.members\n members.reverse()\n for m in members:\n for item in m.guild_permissions:\n if item[0] == perm and item[1] is True:\n lines.append(f\"{x}. {m.name}\")\n x += 1\n if len(lines) == 20:\n dex = \"\\n\".join(lines)\n e = discord.Embed(name=f\"**Users with the {perm} perm**\", description=dex)\n pager.add_page(data=e)\n lines = []\n if len(lines) != 0:\n dex = \"\\n\".join(lines)\n e = discord.Embed(name=f\"**Users with the {perm} perm**\", description=dex)\n pager.add_page(data=e)\n if len(pager.pages) != 0:\n await pager.do_paginator(ctx)", "def test_change_program_type(browser, web_server, dbsession):\n\n b = browser\n with transaction.manager:\n u = create_user(admin=True)\n\n login(web_server, browser)\n\n assert b.is_element_visible_by_css(\"#nav-admin\")\n\n b.visit(web_server + \"/admin/referrals/add\")\n\n b.fill(\"name\", \"foobar\")\n b.find_by_name(\"add\").click()\n\n assert b.is_text_present(\"Slug\")\n\n # See the listing does not broken out\n b.visit(web_server + \"/admin/referrals/1/edit\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if user type is an fan, routes to collecting fan info page, if users and fan tables get updated
def test_register_fan(self): self.helper_user_type_all('fan', 'Fan Info', 'fan_info', 'I am a fan')
[ "def is_fan(obj, user):\n if not is_authenticated(user):\n return False\n obj_type = ContentType.objects.get_for_model(obj)\n likes = Like.objects.filter(\n content_type=obj_type, object_id=obj.id, user=user)\n return likes.exists()", "def user_feeds(request):\n subscription, created = Subscription.objects.get_or_create(user=request.user)\n if created:\n return redirect(reverse('feed_index'))\n feeds = request.user.subscription.feeds.filter(published=True)\n return render_to_response('fast_userpage.html',\n {\n 'feeds':feeds,\n 'title':'Your Feeds',\n },RequestContext(request))", "def stream_user_redirect(request):\n\n user = request.matchdict[\"user\"]\n\n # The client generates /u/ links which include the full account ID\n if user.startswith(\"acct:\"):\n try:\n user = split_user(user)[\"username\"]\n except InvalidUserId:\n # If it's not a valid userid, catch the exception and just treat\n # the parameter as a literal username.\n pass\n\n location = request.route_url(\"activity.user_search\", username=user)\n\n raise httpexceptions.HTTPFound(location=location)", "def set_fan_mode(self, fan):\n self._api.fanmode = self.MAP_FANMODE[fan]\n self.schedule_update_ha_state()", "def fossil():\n fossil_id = request.args(0) or redirect(URL('index'))\n fossil_record = db.fossil(fossil_id) or redirect(URL('index'))\n #fossil_record = db(db.fossil.id==fossil_id).select().first()\n is_active = False\n i_own = ( auth.user_id == fossil_record['owner'] )\n if auth.user_id == fossil_record['owner']:\n is_active = os.path.exists(fossil_record.link_path)\n db.fossil.file.writable=False\n db.fossil.file.readable=True\n db.fossil.clone_url.writable=False\n db.fossil.clone_url.readable=False\n form=crud.update(db.fossil, fossil_id, )\n #crud.settings.update_ondelete = delete_proc(fossil_record.link_path)\n crud.settings.delete_onaccept = delete_proc\n if request.args(1) == \"link\":\n link_manage(fossil_record.file,\n fossil_record.link_path,\n fossil_record.user_index)\n redirect(URL(a='fossilizer',c='default',f='fossil',\n args=[fossil_record.id]))\n if request.args(1) == \"unlink\" and i_own:\n unlink_manage(fossil_record.link_path)\n redirect(URL(a='fossilizer',c='default',f='fossil',\n args=[fossil_record.id]))\n else:\n db.fossil.file.readable=False\n if fossil_record.visibility:\n form=crud.read(db.fossil, fossil_id, )\n else:\n form=(\"nothing\", \"Here\", \"For\", \"you\")\n return dict(message=T(fossil_record.project_name), form=form,\n fossil=fossil_record, is_active=is_active)", "def manager_show_normal_users(request):\n\n if(is_authorized_admin(request)):\n users = User.objects.filter(is_staff__exact=False)\n paginator = Paginator(users, 5)\n page_number = request.GET.get('page')\n page_users = paginator.get_page(page_number)\n return render(request, \"manager/users.html\", {\"users\": page_users})\n else:\n return HttpResponseRedirect(\"/\")", "def is_fan_on(self):\n return self._device['fan']", "def home(request):\n if request.user.is_staff:\n return HttpResponseRedirect(reverse('chat:chat'))\n if request.user.is_authenticated:\n user = models.Profile.objects.get(username=request.user.username)\n friends = user.friends.split(\",\")\n if len(friends) <= 1:\n return HttpResponseRedirect(reverse('chat:find_friends'))\n return HttpResponseRedirect(reverse('chat:chat'))\n else:\n return HttpResponseRedirect(reverse('login'))", "def home():\n if not utils.is_logged_in(session):\n return redirect(url_for(\"index\"))\n\n user_info = db.getUserByUsername(session.get(\"username\", \"\"))\n\n feeder_data = {\n str(user_feeder.get(\"feederId\", \"\")) : {\"name\" : user_feeder.get(\"name\", \"\")}\n for user_feeder in user_info.get(\"feeders\", [])}\n\n feeders = db.getFeedersById(list(feeder_data.keys()))\n\n # Parses the data about the feeder such as the STATUS of the feeder (can it\n # still drop food) and the consumed amount of food.\n for feeder in feeders:\n feeder_id = str(feeder.get(\"_id\", \"\"))\n feeder_data[feeder_id][\"status\"] = feeder.get(\"status\", \"FAIL\")\n feeder_eating_data = db.getOngoingConsumptionLogs(feeder_id)\n feeder_data[feeder_id][\"data\"] = [\n {\n \"x\" : data.get(\"hour\", dt.datetime.now()).strftime(\"%Y-%m-%d %H:00\"),\n \"y\" : data.get(\"foodEaten\", 0)\n }\n for data in feeder_eating_data\n ]\n\n if feeder_data[feeder_id][\"status\"] == \"OK\":\n feeder_data[feeder_id][\"status\"] = \"{name} is functioning normally!\".format(name = feeder_data[feeder_id][\"name\"])\n else:\n feeder_data[feeder_id][\"status\"] = \"{name} was unable to drop food!\".format(name = feeder_data[feeder_id][\"name\"])\n\n # Renders the home page for the user.\n return render_template(\"home.html\",\n username = utils.get_username(session),\n feeders = feeder_data)", "def permit(web, args=None):\n web.require_login()\n me = web.user\n u = SuiBirthdayUser.get_by_key_name(me.key().name())\n if not u:\n u = SuiBirthdayUser(key_name=me.key().name(),name=me.name,access_token=me.access_token) #update access_token when user change password\n u.creator = me.isAuthor()\n #get user's friends\n fetch_friends(u)\n web.redirect('/gift')", "def change_user_type(self):\n user_origin = CustomUser.objects.get(id=self.id)\n if user_origin.user_type != self.user_type:\n ChildProfile.objects.filter(user=self).delete()\n ParentProfile.objects.filter(user=self).delete()\n InstructorProfile.objects.filter(user=self).delete()\n if user_origin.user_type == 1:\n if self.user_type == 2:\n ChildProfile.objects.get_or_create(user=self)\n elif self.user_type == 3:\n ParentProfile.objects.get_or_create(user=self)\n elif self.user_type == 4:\n InstructorProfile.objects.get_or_create(user=self)\n elif user_origin.user_type == 2:\n if self.user_type == 3:\n ParentProfile.objects.get_or_create(user=self)\n elif self.user_type == 4:\n InstructorProfile.objects.get_or_create(user=self)\n elif user_origin.user_type == 3:\n if self.user_type == 2:\n ChildProfile.objects.get_or_create(user=self)\n elif self.user_type == 4:\n InstructorProfile.objects.get_or_create(user=self)\n elif user_origin.user_type == 4:\n if self.user_type == 2:\n ChildProfile.objects.get_or_create(user=self)\n elif self.user_type == 3:\n ParentProfile.objects.get_or_create(user=self)", "def handle_user_request(self, request):\n raise NotImplementedError", "def change_feed_status(request):\n if request.method == 'GET':\n feedid = request.GET.get('feedid')\n if feedid:\n try:\n channel = Channel.objects.get(id=feedid)\n if channel.active:\n channel.active = False\n else:\n channel.active = True\n channel.save()\n except:\n return redirect('/summer/list/')\n return HttpResponse(channel.active)\n\n return redirect('/summer/list/')", "def view(self, user, newsletter, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return False\n\n if user.is_manager:\n return False\n\n if user.is_advisor:\n return (\n Newsletter.objects.filter(pk=newsletter.pk).accessible_by(user).exists()\n )\n\n return self.admin_permission(user, newsletter, *args)", "def refresh(self, user):\n return False", "async def async_set_fan_mode(self, fan_mode):\n fan = self._fan_to_pizone[fan_mode]\n await self._wrap_and_catch(self._controller.set_fan(fan))", "def show_pddf_fanutil(dut, mode):\n if mode not in [\"direction\", \"getspeed\", \"numfans\", \"status\", \"version\"]:\n st.log(\"Unsupported modes provided \")\n return False\n skip_tmpl = False\n if mode == \"numfans\":\n skip_tmpl = True\n command = \"sudo pddf_fanutil {}\".format(mode)\n output = st.show(dut, command, skip_tmpl=skip_tmpl)\n if mode == \"numfans\":\n return {\"numfans\": utils.remove_last_line_from_string(output)}\n else:\n return output", "def get_case_fan(case_fan_id):\n case_fan_query = case_fan.get_case_fan(case_fan_id)\n if not case_fan_query:\n return custom_response({'error': 'case_fan not found'}, 404)\n \n data = case_fan_schema.dump(case_fan_query)\n return custom_response(data, 200)", "def get(self, request, username=None, format=None):\r\n # get profile by its username.\r\n profile_obj = get_object_or_404(UserProfile, user__username=username)\r\n # check if the profile is private or not.\r\n private_account = False\r\n if profile_obj.private_account and request.user.userprofile not in profile_obj.user.followers.all() and request.user != profile_obj.user:\r\n private_account = True \r\n user = request.user.userprofile\r\n updated = False\r\n followed_by = False\r\n sent_follow_request = False \r\n # check if requested user is authenticated or not.\r\n if request.user.is_authenticated:\r\n # if the user is in profile's followers.\r\n if user in profile_obj.user.followers.all():\r\n followed_by = False\r\n # remove user from profile's followers.\r\n profile_obj.user.followers.remove(user)\r\n # get the follow notification with user as a sender, and profile as a receiver.\r\n notify = get_object_or_404(Notification, sender=user, receiver=profile_obj, notification_type='follow')\r\n # delete follow notification.\r\n notify.delete()\r\n # if profile is private account \r\n elif private_account:\r\n sent_follow_request = True\r\n # check if the user has already sent a follow request to this profile or not. \r\n is_requested = UserProfile.objects.is_request_sent(user, profile_obj) \r\n if not is_requested:\r\n # create a follow request notification of sent as a status type with user as a sender, and profile as a receiver. \r\n Notification.objects.create(sender=user, receiver=profile_obj, status='sent', notification_type='follow_request')\r\n # if the user is not in profile's followers and profile is not private account.\r\n else:\r\n followed_by = True\r\n # add user to profile's followers.\r\n profile_obj.user.followers.add(user) \r\n # create a follow notification with user as a sender, and profile as a receiver. \r\n Notification.objects.create(sender=user, receiver=profile_obj, notification_type='follow')\r\n updated = True\r\n data = {\r\n 'updated':updated,\r\n 'followed_by':followed_by,\r\n 'sent_follow_request':sent_follow_request,\r\n }\r\n # get profile followers\r\n followers_qs = profile_obj.get_follower.all()\r\n # get profile following\r\n following_qs = profile_obj.following.all()\r\n # suggested profiles\r\n suggested_profiles = UserProfile.objects.suggested_profiles(user)[:6]\r\n context = {'profile':profile_obj, 'followers':followers_qs, 'following':following_qs, 'suggested_profiles':suggested_profiles}\r\n data['profile_navbar'] = render_to_string('profiles/includes/partial_profile_navbar.html', context, request=request)\r\n data['suggested_profiles'] = render_to_string('home/includes/suggested_profiles.html', context, request=request)\r\n return Response(data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A view to return the special offers page
def special_offers (request): return render(request, 'special_offers.html')
[ "def offer_detail(self, slug):\n if not self.token:\n return Exception('No token found!')\n response = self.api_request(method='GET', path='offer/%s/' % slug)\n return response", "def emp_profile_offres(request):\n\n usname = request.user.username\n uslname = request.user.last_name\n userinfo = get_real_profile(request)\n offers = userinfo.get_offers().order_by('-created')\n return render_to_response('./profile/profile_emp_offers.html', locals(), context_instance=RequestContext(request))", "def test_offers_retrieve(self):\n pass", "def test_offers_list(self):\n pass", "def product_details(request, product_id):\n query = get_object_or_404(Products, id=product_id)\n all_offers = Offers.objects.all().filter(products__id=product_id)\n context = {\n 'product': query,\n 'offers': all_offers,\n }\n return render(request, 'home/product_details.html', context)", "def browse_view(request):\r\n\r\n return render(request, 'markets/browse.html', {\r\n # The list of propositions to display.\r\n 'propositions': map(lambda prop : {\r\n 'code': prop.code,\r\n 'description': prop.description,\r\n 'trade_volume': prop.trade_volume(\r\n start=datetime.now()-timedelta(days=1)),\r\n 'bid_volume': prop.bid_volume(),\r\n 'resolves': prop.resolves,\r\n 'active': prop.active,\r\n 'leader': {\r\n 'outcome': prop.outcomes_by_price()[0]\r\n if prop.active else prop.outcome,\r\n 'price': prop.outcomes_by_price()[0].latest_price()\r\n },\r\n }, Proposition.objects.all() if 'show-completed' in request.GET else\r\n Proposition.objects.filter(active=True)),\r\n 'show_completed': 'show-completed' in request.GET\r\n })", "def get_offer(self, data):\n payload = {}\n payload.update(self.generic_service)\n payload.update(self.product_service)\n\n r = requests.get(\"http://catalog.bizrate.com/services/catalog/v1/us/{0}\".format(\"offer\"), params=payload)\n print(\"URL: \")\n print(r.url)\n\n print(\"RESPONSE: \")\n print(r.json())\n\n return", "def advisors(request):\n return render(request, \"advisors.html\", {})", "def page_view(request):\n (phone_categories, social_media) = various_caches()\n hot_deals = cache.get('hot_deals') or set_cache(\n HotDeal.objects.filter(item__is_in_stock=True,\n item__quantity__gte=1).distinct(), 'hot_deals')\n context = {'categories': phone_categories, 'social_media': social_media,\n 'hot_deals': list(set(hot_deals))}\n return render(request, 'front/landing_page.html', context)", "def show_remaining_offers(self):\n self.driver.find_element(*self.BUTTON_SHOW_REMAINING_LOC).click()", "def test_show_all_adopter_webpage(self):\n response = self.app.get('/show_all_adopters.html')\n assert 'Show all adopters' in response.data\n self.assertEqual(200, response.status_code, 'Status code is not 200')", "def view_set_listings(request):\n return", "def solvationIndex(request):\n return render(request, 'solvationTools.html')", "def hotels():\n return render_template('besthotels.html')", "def restaurants():\n return render_template('bestrestaurants.html')", "def home():\n\n items = Item.objects(sold = False)\n\n return render_template('base.html', items = items)", "def products_supplements(request):\n products = Product.objects.filter(category=\"3\")\n paginator = Paginator(products, 24)\n\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'products_supplements.html', {'products': products,'page_obj': page_obj})", "def view_shopping_bag(request):\n return render(request, 'shopping_bag/shopping_bag.html')", "def get_show_url(self, name):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
All dummies must contain unique values or will influence analysis
def test_dummies (self, dummies=()): return len(set(sum(dummies, ()))) == len(sum(dummies, ()))
[ "def _set_dummies(self):\n data_reduced = self.data[self.antecedent]\n self.data_dummies = pd.get_dummies(data_reduced, columns=self.antecedent)", "def dummization(self):\n #TODO: use sklearn ColumnTransformer instead\n\n return pd.get_dummies(\n self.simple_imputer(),\n prefix_sep='_',\n prefix=self.categorical_cols,\n columns=self.categorical_cols,\n drop_first=False\n )", "def create_dummy(data, target, use_dummies=None):\n\n dummies = []\n\n numerical = list(data.select_dtypes(include=[np.number]))\n categorical_f = [\n col for col in data if col not in numerical and col not in target\n ]\n\n for f in categorical_f:\n if f not in target:\n dummy = pd.get_dummies(data[f], prefix=f, drop_first=False)\n data = pd.concat([data, dummy], axis=1)\n data.drop(f, axis=1, inplace=True)\n\n dummies.extend(dummy)\n\n if use_dummies:\n missing = set(use_dummies) - set(dummies)\n for m in missing:\n data[m] = 0\n\n # set new columns to category\n for dummy in dummies:\n data[dummy] = data[dummy].astype('category')\n\n return data, dummies", "def create_dummy(self, drop_first=True):\n for feature_name, feature_values in self.CATEGORICAL_FEATURES:\n nb_possible_values = len(feature_values)\n # append every possible values of the feature to real feature column\n enhanced_feature_series = self.df[feature_name].append(feature_values)\n # get dummy features\n dummy_features_df = pd.get_dummies(enhanced_feature_series, prefix=feature_name, drop_first=drop_first)[:-nb_possible_values]\n # drop old feature column and add dummy features\n self.df.drop(feature_name, axis=1, inplace=True)\n self.df[dummy_features_df.columns] = dummy_features_df.astype(int)", "def design_matrices(df):\n \n X_list = []\n for ii in range(df.shape[1]):\n #TODO: category gives error\n if (df.iloc[:, ii].dtype=='object'):\n X_list.append(pd.get_dummies(df.iloc[:, ii], \n drop_first=True).values)\n else:\n X_list.append(df.iloc[:, ii].values[:, np.newaxis])\n \n return X_list", "def get_sdummies(sdf,\n dummy_columns,\n keep_top,\n threshold = 0.03,\n replace_with='000_OTHERS',\n dummy_info=[],\n dropLast=True):\n total = sdf.count()\n column_i = 0\n\n factor_set = {} # The full dummy sets\n factor_selected = {} # Used dummy sets\n factor_dropped = {} # Dropped dummy sets\n factor_selected_names = {} # Final revised factors\n factor_pro = {} # The proporation of factors\n for string_col in dummy_columns:\n\n if len(dummy_info) == 0:\n # Descending sorting with counts\n sdf_column_count = sdf.groupBy(string_col).count().orderBy(\n 'count', ascending=False)\n sdf_column_count = sdf_column_count.withColumn(\n \"cumsum\",\n F.sum(\"count\").over(Window.partitionBy(\"count\")))\n sdf_column_count = sdf_column_count.withColumn(\n \"cumperc\",col('cumsum')/total)\n sdf_column_count = sdf_column_count.withColumn(\n \"perc\",col('count')/total)\n # Obtain top dummy factors\n sdf_column_top_dummies = sdf_column_count.filter((col('cumperc') <= keep_top[column_i])&(col('perc')>=threshold))\n keep_list = sdf_column_top_dummies.select(string_col,'perc').rdd.map(\n lambda x: tuple(x)).collect()\n keep_list = dict(keep_list)\n perc_list = list(keep_list.values())\n keep_list = list(keep_list.keys())\n\n # Save factor sets\n factor_set[string_col] = dict(sdf_column_count.select(string_col,'perc').rdd.map(lambda x:tuple(x)).collect())\n factor_selected[string_col] = keep_list\n factor_dropped[string_col] = list(set(factor_set[string_col]) - set(keep_list))\n factor_pro[string_col] = perc_list\n # factor_selected_names[string_col] = [string_col + '_' + str(x) for x in factor_new ]\n\n # Replace dropped dummies with indicators like `others`\n if len(factor_dropped[string_col]) == 0:\n factor_new = []\n else:\n factor_new = [replace_with]\n factor_new.extend(factor_selected[string_col])\n\n factor_selected_names[string_col] = [\n string_col + '_' + str(x) for x in factor_new\n ]\n\n\n\n else:\n keep_list = dummy_info[\"factor_selected\"][string_col]\n\n # Replace dropped dummy factors with grouped factors.\n sdf = sdf.withColumn(\n string_col,\n when((col(string_col).isin(keep_list)),\n col(string_col)).otherwise(replace_with))\n column_i += 1\n\n # The index of string vlaues multiple columns\n indexers = [\n StringIndexer(inputCol=c, outputCol=\"{0}_IDX\".format(c))\n for c in dummy_columns\n ]\n\n # The encode of indexed vlaues multiple columns\n encoders = [\n OneHotEncoder(dropLast=dropLast,\n inputCol=indexer.getOutputCol(),\n outputCol=\"{0}_ONEHOT\".format(indexer.getOutputCol()))\n for indexer in indexers\n ]\n\n # Vectorizing encoded values\n assembler = VectorAssembler(\n inputCols=[encoder.getOutputCol() for encoder in encoders],\n outputCol=\"features_ONEHOT\")\n\n pipeline = Pipeline(stages=indexers + encoders + [assembler])\n # pipeline = Pipeline(stages=[assembler])\n onehot_model = pipeline.fit(sdf)\n sdf = onehot_model.transform(sdf)\n\n # Drop intermediate columns\n drop_columns = [x + \"_IDX\" for x in dummy_columns]\n drop_columns = [x + \"_ONEHOT\" for x in drop_columns] + drop_columns\n\n sdf = sdf.drop(*drop_columns)\n\n if len(dummy_info) == 0:\n dummy_info = {\n 'factor_set': factor_set,\n 'factor_selected': factor_selected,\n 'factor_proportion': factor_pro,\n 'factor_dropped': factor_dropped,\n 'factor_selected_names': factor_selected_names\n }\n\n return sdf, dummy_info", "def one_hot_encoder(dataframe, nan_as_category = True):\r\n df = dataframe\r\n original_columns = list(df.columns)\r\n df = pd.get_dummies(df, dummy_na= True,drop_first=True)\r\n debug(df.info(memory_usage='deep'))\r\n df = df.loc[:,~df.columns.duplicated()]\r\n debug(df.info(memory_usage='deep'))\r\n new_columns = [c for c in df.columns if c not in original_columns]\r\n const_columns = [c for c in new_columns if df[c].dtype!='object' \\\r\n and np.sum(df[c]) == 0 and np.std(df[c]) == 0]\r\n df.drop(const_columns, axis = 1, inplace = True)\r\n new_columns = list(set(new_columns).difference(set(const_columns)))\r\n return df, new_columns", "def clean_iris(df):\n \n dropcols = ['species_id', 'measurement_id']\n df.drop(columns= dropcols, inplace=True)\n df.rename(columns={'species_name': 'species'}, inplace=True)\n dummy_sp = pd.get_dummies(df[['species']], drop_first=True)\n return pd.concat([df, dummy_sp], axis =1)", "def convertColumnsToDummy(df):\n\n\t#Users categorical information to dummy!\t\n\tres = pd.get_dummies(df['gender'])\n\tdf = df.join(res)\n\tres = pd.get_dummies(df['income'])\n\tdf = df.join(res)\n\tres = pd.get_dummies(df['marital'])\n\tdf = df.join(res)\n\tres = pd.get_dummies(df['education'])\n\tdf = df.join(res)\n\n\t#Images categorical information to dummy!\n\tres = pd.get_dummies(df['bairro1'], prefix=\"bairro1\")\n\tdf = df.join(res)\n\tres = pd.get_dummies(df['graffiti1'], prefix=\"graffiti1\")\n\tdf = df.join(res)\n\tres = pd.get_dummies(df['bairro2'], prefix=\"bairro2\")\n\tdf = df.join(res)\n\tres = pd.get_dummies(df['graffiti2'], prefix=\"graffiti2\")\n\tdf = df.join(res)\n\t\n\treturn df", "def process_dummies_columns(df: pd.DataFrame, columns: list[str]):\n processed_cols = pd.get_dummies(df[columns], drop_first=True)\n for col in processed_cols.columns:\n df[col] = processed_cols[col]\n df = df.drop(columns, axis=1)\n return df", "def _check_for_variability(model_design):\n l = min(1000, len(model_design))\n sample = random_rows(model_design, l)\n # convert to float so describe works uniformly on bools\n sample = sample.astype('float')\n sample = sample.describe().transpose()\n\n error = sample[sample[\"std\"] == 0]\n if len(error):\n logger.warn(\"%s columns have no variability\" % len(error))\n for v in error.index.values:\n logger.info(\"no variability in: %s\" % v)\n error = sample[sample[\"count\"] < l]\n if len(error):\n logger.warn(\"%s columns have missing values\" % len(error))\n for v in error.index.values:\n logger.info(\"missing values in: %s\" % v)", "def replace_with_dummies(df, cols):\n for c in cols:\n df = rep_with_dummies_(df, c)\n return df", "def one_hot(df, cols):\n for each in cols:\n dummies = pd.get_dummies(df[each], prefix=each, drop_first=False)\n df = pd.concat([df, dummies], axis=1)\n df.drop(columns=cols, axis=1, inplace=True)\n return df", "def clean_titanic(df):\n \n \n df[\"is_female\"] = df.sex == \"Female\"\n embarked_dummies = pd.get_dummies(df.embarked, prefix='Embarked', drop_first=True)\n class_dummies = pd.get_dummies(df.pclass, prefix='class', drop_first=True)\n\n dropcols = ['deck', 'age', 'embark_town', 'passenger_id', 'embarked', 'sex', 'pclass', 'class']\n df.drop(columns= dropcols, inplace=True)\n\n return pd.concat([df, embarked_dummies, class_dummies], axis =1)", "def check_labels(dataframe):\n possible_models = {}\n if len(dataframe.unidentified.unique()) == 2:\n possible_models['unidentified'] = True\n else:\n possible_models['unidentified'] = False\n # we will use only samples with identifiable \n # scattering popoulations for the other models\n dataframe = dataframe[dataframe['unidentified']==False]\n for l in ['spherical_normal', 'guinier_porod', 'diffraction_peaks']:\n if len(dataframe[l].unique()) == 2:\n possible_models[l] = True\n else:\n possible_models[l] = False\n return possible_models", "def get_category_dummies(df, categories=categories):\n cat_list_col = df['cats'].str.split(',')\n dummied = pd.DataFrame(index=df['cats'].index)\n for cat in categories:\n dummied['category_' + cat] = cat_list_col.apply(lambda cats: cat in cats).astype(int)\n \n dummied_df = pd.concat([df, dummied], axis=1)\n\n return dummied_df", "def _dumify_categorical_features(df):\n prepped = pd.DataFrame(index=df.index)\n for feature in df.columns:\n # print feature, df.dtypes[feature]\n if df.dtypes[feature] == 'object':\n dummied = _dummy_text_feature(df, feature)\n prepped = prepped.join(dummied)\n else:\n prepped = prepped.join(df[feature])\n return prepped", "def train_eval_dummy(dummy_strategy, train_df, eval_df, output_dirp):\n print(train_df.head())\n\n # Write train\n Path(output_dirp).mkdir(parents=True, exist_ok=True)\n train_fp = Path(output_dirp) / \"trainset.tsv\"\n train_df.to_csv(train_fp, sep=\"\\t\", index=False)\n\n # reload train for testing\n train_df = pd.read_csv(train_fp, sep=\"\\t\", converters={\"labels\": literal_eval})\n # write and reload eval set for testing\n eval_fp = Path(output_dirp) / \"testset.tsv\"\n eval_df.to_csv(eval_fp, sep=\"\\t\", index=False)\n eval_df = pd.read_csv(eval_fp, sep=\"\\t\", converters={\"labels\": literal_eval})\n\n # Dataframe to sklearn matrix\n X_train = np.random.rand(train_df[\"text\"].shape[0],) # random dummy data with same shape as train\n X_eval = np.random.rand(eval_df[\"text\"].shape[0],) # random dummy data with same shape as train\n y_train = np.array(train_df[\"labels\"].to_list()) # train labels in multihot np.array\n y_eval = np.array(eval_df[\"labels\"].to_list()) # eval labels in multihot np.array\n\n # Define model\n if dummy_strategy == \"constant\":\n c = Counter(np.apply_along_axis(lambda x: str(x.tolist()), 1, y_train).tolist())\n most_freq_nn = next(x[0] for x in c.most_common() if \"1\" in x[0])\n most_freq_nn = np.array(literal_eval(most_freq_nn))\n model = DummyClassifier(strategy=dummy_strategy, constant=most_freq_nn, random_state=settings.RANDOM_STATE)\n else:\n model = DummyClassifier(strategy=dummy_strategy, random_state=settings.RANDOM_STATE)\n\n # Train the model\n print(f\"Training dummy model with strategy: {dummy_strategy}\")\n model.fit(X_train, y_train)\n\n # Evaluate the model on eval set\n y_pred = model.predict(X_eval)\n print(y_pred)\n result = {\n \"LRAP\": label_ranking_average_precision_score(y_eval, y_pred),\n \"eval_loss\": label_ranking_loss(y_eval, y_pred),\n }\n\n # Write model result and outputs\n eval_df[\"y_pred\"] = y_pred.tolist()\n predictions_fp = Path(output_dirp) / \"testset_with_predictions.tsv\"\n eval_df.to_csv(predictions_fp, sep=\"\\t\", index=False)\n\n with open(Path(output_dirp) / \"result.json\", \"wt\") as result_out:\n json.dump(result, result_out)\n\n return result", "def get_dummies(self, sep: str = \"|\"):\n # we need to cast to Series of strings as only that has all\n # methods available for making the dummies...\n result, name = self._data.array._str_get_dummies(sep)\n return self._wrap_result(\n result,\n name=name,\n expand=True,\n returns_string=False,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
only allows answers in the sets, AFERMATIVE and NEGATIVE.
def answer_binary(prompt): answer = input(prompt).lower() while (answer not in AFERMATIVE) and (answer not in NEGATIVE): print(f"your anser must be answer must be any of '{AFERMATIVE}'" f" if yes, or '{NEGATIVE}' if no.") answer = input(prompt).lower() return answer
[ "def conflateAnswers(answers):\n\n if 'objective' in answers or 'neutral' in answers:\n answers = ['neutral']\n return answers", "def test_is_antichain_2(set_of_sets):\n assert not is_antichain(set_of_sets)", "def any_answered(self) -> Set[str]:\n return reduce(set.union, self.answers, initial=set())", "def test_is_antichain_1(set_of_sets):\n assert is_antichain(set_of_sets)", "def isinvertible(self):\n roots = self.ma_roots()\n if len(roots) != 0:\n return all(abs(root) > 1 for root in roots)\n else:\n return 'There are no MA terms.'", "def test_student_set_answer_not_valid() -> None:\n student = Student(1, 'John')\n q1 = MultipleChoiceQuestion(1, \"a b c or d?\", ['a', 'b', 'c', 'd'])\n a1 = Answer('z')\n q2 = CheckboxQuestion(5, \"do you like dogs?\", ['yes', 'no', 'sometimes'])\n a2 = Answer('yes')\n q3 = NumericQuestion(2, \"Pick num\", 1, 5)\n a3 = Answer(7)\n q4 = YesNoQuestion(4, \"T or F\")\n a4 = Answer(\"True\")\n student.set_answer(q1, a1)\n student.set_answer(q2, a2)\n student.set_answer(q3, a3)\n student.set_answer(q4, a4)\n assert len(student._answers) == 0\n assert student._answers == {}", "def NO_test(s, A, W):\n\n\n for w in W.extreme_points:\n scalar_utility_s = Utils.dot(w, s)\n for t in A:\n scalar_utility_t = Utils.dot(w, t)\n if scalar_utility_s - scalar_utility_t < 0:\n return False\n return True", "def incorrect(self):\n return [ans for ans in self.practices if not ans.is_correct]", "def test_ask_false(self):\n res = self.graph.query('ASK { <http://goonmill.org/2007/skill.n3#baz> a <http://goonmill.org/2007/skill.n3#Foo> } ')\n self.assertEquals(res.askAnswer, False, \"The answer should have been that the triple was not found\")", "def all_answered(self) -> Set[str]:\n return reduce(set.intersection, self.answers, initial=self.any_answered)", "def test_Student_has_nonexistant() -> None:\n student = Student(1, 'John')\n\n q1 = NumericQuestion(1, \"Pick num\", 1, 5)\n q2 = MultipleChoiceQuestion(2, \"Pick text\", [\"opt 1\", \"opt 2\"])\n q3 = CheckboxQuestion(3, \"Pick multiple\", [\"a\", \"b\", \"c\"])\n q4 = YesNoQuestion(4, \"T or F\")\n\n a1 = Answer(2)\n a2 = Answer(\"opt 1\")\n a3 = Answer([\"a\", \"b\"])\n\n student.set_answer(q1, a1)\n student.set_answer(q2, a2)\n student.set_answer(q3, a3)\n\n assert not student.has_answer(q4)", "def test_negtive_inputs_against_positive(self):\n print(\"Testing that signal_variance returns the same result for input and -input\")\n\n positive = signal_variance([30, 20, 10, 40, 50])\n negative = signal_variance([-30, -20, -10, -40, -50])\n\n self.assertEqual(positive, negative,\n \"signal_variance is not returning the same result for input and -input\")", "def _positive(self, cr, uid, ids, context=None):\n for fact in self.browse(cr, uid, ids, context=context):\n if fact.week_factor<0 or fact.holiday_factor<0 or fact.maximum<0 :\n return False\n return True", "def _positive_factor(self, cr, uid, ids, context=None):\n for fact in self.browse(cr, uid, ids, context=context):\n if fact.factor<0 or fact.penalty_amount<0:\n return False\n return True", "def test_set_validator():\n v = SetValidator(existing=['a', 'b', 'c'])\n assert not v.validate('a') and not v.valid\n assert v.validate('bc') and v.valid", "def test_mix_of_non_primitive_operators(self):\n self.assert_to_cnf_transformation(\n 'A xor (B -> C -> D) nand (E iff F)',\n '(not A or ~B or ~C or D or not E or not F) and '\n '(A or B or not E or not F) and '\n '(A or C or not E or not F) and '\n '(A or not D or not E or not F) and '\n '(not A or ~B or ~C or D or E or F) and '\n '(A or B or E or F) and '\n '(A or C or E or F) and '\n '(A or not D or E or F)')\n self.assert_to_cnf_transformation(\n '(A nand B) -> (C nor D) -> (E iff F)',\n r'(A \\/ C \\/ D \\/ F or not E) /\\ (A \\/ C \\/ D \\/ E or not F) /\\ '\n r'(B \\/ C \\/ D \\/ F or not E) /\\ (B \\/ C \\/ D \\/ E or not F)')", "def test_neutral_only(self):\n my_sentiment = sentiment_analyzer.get_sentiment(\"The sky is blue.\")\n self.assertTrue((my_sentiment[\"Overall_Sentiment\"] == 'Neutral') &\n (my_sentiment[\"Positive_Sentences\"] == 0) &\n (my_sentiment[\"Negative_Sentences\"] == 0) &\n (my_sentiment[\"Neutral_Sentences\"] == 1) &\n (my_sentiment[\"Total_Sentences\"] == 1))", "def deny(v):\n return insist(not v)", "def test_check_unmask_answerpool(self):\n xml = textwrap.dedent(\"\"\"\n <problem>\n <multiplechoiceresponse>\n <choicegroup type=\"MultipleChoice\" answer-pool=\"4\">\n <choice correct=\"false\">Apple</choice>\n <choice correct=\"false\">Banana</choice>\n <choice correct=\"false\">Chocolate</choice>\n <choice correct =\"true\">Donut</choice>\n </choicegroup>\n </multiplechoiceresponse>\n </problem>\n \"\"\")\n module = CapaFactory.create(xml=xml)\n with patch.object(module.runtime, 'publish') as mock_track_function:\n get_request_dict = {CapaFactory.input_key(): 'choice_2'} # mask_X form when masking enabled\n module.submit_problem(get_request_dict)\n mock_call = mock_track_function.mock_calls[1]\n event_info = mock_call[1][2]\n assert event_info['answers'][CapaFactory.answer_key()] == 'choice_2'\n # 'permutation' key added to record how problem was shown\n assert event_info['permutation'][CapaFactory.answer_key()] ==\\\n ('answerpool', ['choice_1', 'choice_3', 'choice_2', 'choice_0'])\n assert event_info['success'] == 'incorrect'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if the create_default_project will create a default maya project structure with a proper workspace.mel
def test_create_default_project_will_create_a_workspace_mel_file( create_test_data, trash_bin ): arch = Archiver() tempdir = tempfile.gettempdir() project_path = arch.create_default_project(tempdir) trash_bin.append(project_path) workspace_mel_path = os.path.join(project_path, "workspace.mel") assert os.path.exists(workspace_mel_path)
[ "def test_no_project_newproj(self):\n self._test_non_admin_operation(ccdlib.OP_NEWPROJ)", "def prepare_project(project_manager, project_name=\"working_project\"):\n project = project_manager.LoadProject(project_name)\n if not project:\n print(\"Unable to loat a project '\" + project_name + \"'\")\n print(\"Then creating a project '\" + project_name + \"'\")\n project = project_manager.CreateProject(project_name)\n print(f'\"{project_name}\" is created')\n else:\n print(f'\"{project_name}\" is loaded')\n\n return project", "def test_create_default_project_workspace_mel_already_exists(\n create_test_data, trash_bin\n):\n data = create_test_data\n arch = Archiver()\n tempdir = tempfile.gettempdir()\n\n # there should be no error to call it multiple times\n project_path = arch.create_default_project(tempdir)\n trash_bin.append(project_path)\n\n project_path = arch.create_default_project(tempdir)\n project_path = arch.create_default_project(tempdir)", "def test_create_default_project_workspace_mel_content_is_correct(\n create_test_data, trash_bin\n):\n arch = Archiver()\n tempdir = tempfile.gettempdir()\n\n project_path = arch.create_default_project(tempdir)\n trash_bin.append(project_path)\n\n workspace_mel_path = os.path.join(project_path, \"workspace.mel\")\n\n with open(workspace_mel_path) as f:\n content = f.read()\n\n expected_result = \"\"\"// Anima Archiver Default Project Definition\n\nworkspace -fr \"translatorData\" \"data\";\nworkspace -fr \"offlineEdit\" \"scenes/edits\";\nworkspace -fr \"renderData\" \"renderData\";\nworkspace -fr \"scene\" \"scenes\";\nworkspace -fr \"3dPaintTextures\" \"sourceimages/3dPaintTextures\";\nworkspace -fr \"eps\" \"data\";\nworkspace -fr \"OBJexport\" \"data\";\nworkspace -fr \"mel\" \"scripts\";\nworkspace -fr \"furShadowMap\" \"renderData/fur/furShadowMap\";\nworkspace -fr \"particles\" \"cache/particles\";\nworkspace -fr \"audio\" \"sound\";\nworkspace -fr \"scripts\" \"scripts\";\nworkspace -fr \"sound\" \"sound\";\nworkspace -fr \"DXF_FBX export\" \"data\";\nworkspace -fr \"furFiles\" \"renderData/fur/furFiles\";\nworkspace -fr \"depth\" \"renderData/depth\";\nworkspace -fr \"autoSave\" \"autosave\";\nworkspace -fr \"furAttrMap\" \"renderData/fur/furAttrMap\";\nworkspace -fr \"diskCache\" \"data\";\nworkspace -fr \"fileCache\" \"cache/nCache\";\nworkspace -fr \"ASS Export\" \"data\";\nworkspace -fr \"FBX export\" \"data\";\nworkspace -fr \"sourceImages\" \"sourceimages\";\nworkspace -fr \"FBX\" \"data\";\nworkspace -fr \"DAE_FBX export\" \"data\";\nworkspace -fr \"movie\" \"movies\";\nworkspace -fr \"Alembic\" \"data\";\nworkspace -fr \"DAE_FBX\" \"data\";\nworkspace -fr \"iprImages\" \"renderData/iprImages\";\nworkspace -fr \"mayaAscii\" \"scenes\";\nworkspace -fr \"furImages\" \"renderData/fur/furImages\";\nworkspace -fr \"furEqualMap\" \"renderData/fur/furEqualMap\";\nworkspace -fr \"illustrator\" \"data\";\nworkspace -fr \"DXF_FBX\" \"data\";\nworkspace -fr \"mayaBinary\" \"scenes\";\nworkspace -fr \"move\" \"data\";\nworkspace -fr \"images\" \"images\";\nworkspace -fr \"fluidCache\" \"cache/nCache/fluid\";\nworkspace -fr \"clips\" \"clips\";\nworkspace -fr \"ASS\" \"data\";\nworkspace -fr \"OBJ\" \"data\";\nworkspace -fr \"templates\" \"assets\";\nworkspace -fr \"shaders\" \"renderData/shaders\";\n\"\"\"\n assert content == expected_result", "def test_empty_project(self):\n self.pid2 = create_project(name=\"test project2\")\n self.assertEqual(len(get_folders_in_project(self.pid2)), 0)", "def test_default_working_dir(main_window, tmpdir):\n projects = main_window.get_plugin(Plugins.Projects)\n unittest_plugin = main_window.get_plugin(UnitTestPlugin.NAME)\n project_dir = str(tmpdir)\n\n assert unittest_plugin.get_widget().default_wdir == os.getcwd()\n\n if SPYDER6:\n projects.create_project(project_dir)\n else:\n projects._create_project(project_dir)\n assert unittest_plugin.get_widget().default_wdir == project_dir\n\n projects.close_project()\n assert unittest_plugin.get_widget().default_wdir == os.getcwd()", "def test_delete_default_project(self, logger, rw_conman_proxy):\n logger.debug('Deleting the default project')\n rift.auto.mano.delete_project(rw_conman_proxy, 'default')", "def test_create_project(self):\n self.assertIsNotNone(self.pid)", "def test_no_project_setproj(self):\n self._test_non_admin_operation(ccdlib.OP_SETPROJ)", "def test_project_creation(self):\n self.assertEqual(self.proj1.name, 'Project 1')\n self.assertNotEqual(self.proj1.name, self.proj2.name)\n self.assertEqual(self.proj1.creator.username, 'User2')", "def test_no_project_showproj(self):\n self._test_non_admin_operation(ccdlib.OP_SHOWPROJ)", "def PLATFORM_CREATE_PROJECT(self):\n\t\treturn \"\"\"This is how to create project\"\"\"", "def create_new_project():\n project_name = self.project_name.get()\n if project_name == '':\n mb.showwarning('Error', 'The name of the project is blank', parent=self)\n else:\n new_project = pp.Pydef_Project(project_name) # create the new project with the given name\n parent.load_project(new_project) # load the new project\n self.destroy() # close the window", "def test_create_projects(self, logger, rw_conman_proxy, rbac_test_data):\n projects_test_data = rbac_test_data['projects']\n\n # Create all projects mentioned in projects_test_data and verify if they are created\n for project in projects_test_data:\n logger.debug('Creating project {}'.format(project))\n rift.auto.mano.create_project(rw_conman_proxy, project)", "def test_no_project_updateproj(self):\n self._test_non_admin_operation(ccdlib.OP_UPDATEPROJ)", "def test_no_project_delproj(self):\n self._test_non_admin_operation(ccdlib.OP_DELPROJ)", "def test_empty_project(self):\n prj = self._read_string(\"\"\"\n-- This is a comment and it will be ignored\nProject 3322 \"Empty project test\"\n\"\"\")\n self.failUnlessEqual(prj.id, '3322')\n self.failUnlessEqual(prj.title, 'Empty project test')", "def test_create_same_name(self):\n self.assertRaises(ValueError, lambda: create_project(\"test project\"))", "def test_storage_project_iso_create(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if the content of the workspace.mel file is correct when the create_default_project method is used.
def test_create_default_project_workspace_mel_content_is_correct( create_test_data, trash_bin ): arch = Archiver() tempdir = tempfile.gettempdir() project_path = arch.create_default_project(tempdir) trash_bin.append(project_path) workspace_mel_path = os.path.join(project_path, "workspace.mel") with open(workspace_mel_path) as f: content = f.read() expected_result = """// Anima Archiver Default Project Definition workspace -fr "translatorData" "data"; workspace -fr "offlineEdit" "scenes/edits"; workspace -fr "renderData" "renderData"; workspace -fr "scene" "scenes"; workspace -fr "3dPaintTextures" "sourceimages/3dPaintTextures"; workspace -fr "eps" "data"; workspace -fr "OBJexport" "data"; workspace -fr "mel" "scripts"; workspace -fr "furShadowMap" "renderData/fur/furShadowMap"; workspace -fr "particles" "cache/particles"; workspace -fr "audio" "sound"; workspace -fr "scripts" "scripts"; workspace -fr "sound" "sound"; workspace -fr "DXF_FBX export" "data"; workspace -fr "furFiles" "renderData/fur/furFiles"; workspace -fr "depth" "renderData/depth"; workspace -fr "autoSave" "autosave"; workspace -fr "furAttrMap" "renderData/fur/furAttrMap"; workspace -fr "diskCache" "data"; workspace -fr "fileCache" "cache/nCache"; workspace -fr "ASS Export" "data"; workspace -fr "FBX export" "data"; workspace -fr "sourceImages" "sourceimages"; workspace -fr "FBX" "data"; workspace -fr "DAE_FBX export" "data"; workspace -fr "movie" "movies"; workspace -fr "Alembic" "data"; workspace -fr "DAE_FBX" "data"; workspace -fr "iprImages" "renderData/iprImages"; workspace -fr "mayaAscii" "scenes"; workspace -fr "furImages" "renderData/fur/furImages"; workspace -fr "furEqualMap" "renderData/fur/furEqualMap"; workspace -fr "illustrator" "data"; workspace -fr "DXF_FBX" "data"; workspace -fr "mayaBinary" "scenes"; workspace -fr "move" "data"; workspace -fr "images" "images"; workspace -fr "fluidCache" "cache/nCache/fluid"; workspace -fr "clips" "clips"; workspace -fr "ASS" "data"; workspace -fr "OBJ" "data"; workspace -fr "templates" "assets"; workspace -fr "shaders" "renderData/shaders"; """ assert content == expected_result
[ "def test_create_default_project_will_create_a_workspace_mel_file(\n create_test_data, trash_bin\n):\n arch = Archiver()\n tempdir = tempfile.gettempdir()\n\n project_path = arch.create_default_project(tempdir)\n trash_bin.append(project_path)\n\n workspace_mel_path = os.path.join(project_path, \"workspace.mel\")\n\n assert os.path.exists(workspace_mel_path)", "def test_empty_project(self):\n prj = self._read_string(\"\"\"\n-- This is a comment and it will be ignored\nProject 3322 \"Empty project test\"\n\"\"\")\n self.failUnlessEqual(prj.id, '3322')\n self.failUnlessEqual(prj.title, 'Empty project test')", "def _has_workspace(self, job):\r\n\t\tif job.workspace is None or not os.path.exists(job.workspace):\r\n\t\t\t# No workspace has been defined (admin auto-add) or the initial\r\n\t\t\t# workspace was created by another user and does not exist on \r\n\t\t\t# the active computer.\r\n\t\t\tworkspace = ProjectWorkspace.make(job.job_num, self._app_data.users.log)\r\n\t\t\tif workspace is not None:\r\n\t\t\t\tjob.workspace = workspace\r\n\t\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn True", "def test_create_default_project_workspace_mel_already_exists(\n create_test_data, trash_bin\n):\n data = create_test_data\n arch = Archiver()\n tempdir = tempfile.gettempdir()\n\n # there should be no error to call it multiple times\n project_path = arch.create_default_project(tempdir)\n trash_bin.append(project_path)\n\n project_path = arch.create_default_project(tempdir)\n project_path = arch.create_default_project(tempdir)", "def _load_project(self, thefile):\n if not thefile: #no file specified so fail!\n return False\n \n linesin = list()\n filein = codecs.open(thefile, mode='r', encoding='utf-8')\n for aline in filein.readlines():\n if aline.strip():\n linesin.extend([aline.strip()])\n filein.close()\n lines = ''.join(linesin)\n self.root = etree.fromstring(lines)\n self.settings = self.root.find(\"settings\")\n etree.strip_attributes(self.settings, ['template',])\n self.smode = self.settings.find(\"mode\")\n #can't save project until after template already applied\n #so template setting is not needed\n self.sf1 = self.settings.find(\"f1\")\n self.sf2 = self.settings.find(\"f2\")\n self.sf4 = self.settings.find(\"f4\")\n self.trout = self.root.find(\"tree\")\n self._fix_old_proj_iid(self.trout)\n \n self.old_mode = dict(self.smode.attrib)\n if 'version' not in self.smode.attrib:\n self.qr.put(('MESSAGEBOXASKOKCANCEL', (\\\n 'Project created in old format!', \\\n \"This will attempt to update the project file \" + \\\n \"format to the current standard, every field \" + \\\n \"must be verified. It may be faster to \" + \\\n \"recreate the project from scratch. \" +\n \"Do you wish to continue?\")))\n if not self._wait_for_responce():\n return False\n self.smode.attrib['version'] = THIS_VERSION\n \n if 'idiot' in self.old_mode and self.old_mode['idiot'] == 'True':\n self._upgrade_child_of(self.trout)\n else:\n #data doesn't need upgrading\n pass\n if 'preferred' in self.smode.attrib:\n if self.smode.attrib['preferred'] == 'True':\n self.smode.attrib['preferred'] = '1'\n# self.preferred = 1\n elif self.smode.attrib['preferred'] == 'False':\n self.smode.attrib['preferred'] = '0'\n# self.preferred = 0\n self.preferred = int(self.smode.attrib['preferred'])\n else:\n self.preferred = 0\n #now pass self.preferred back to gui!\n self.qr.put(('PREFERRED', self.preferred))\n \n #now check the mode radio buttons\n idiot_case = self._get_idiot_case_mode_for_load_project()\n if idiot_case == 1: # downgrade\n self.mode = 0\n self.qr.put(('MESSAGEBOXASKOKCANCEL', ('Confirm Downgrade?', \\\n \"This will downgrade this project from 'Advanced' \" \\\n + \"to 'Simple'. Some data may be lost.\" )))\n #if not OK give up\n if not self._wait_for_responce():\n return False\n #do downgrade!\n #remove all non idiot tags\n difference = set(SET_TAGS['en-US'].keys()).\\\n difference(set(IDIOT_TAGS.keys))\n etree.strip_attributes(self.trout, difference)\n etree.strip_attributes(self.sf1, difference)\n pass\n elif idiot_case == 2: # upgrade:\n self.qr.put(('MESSAGEBOXASKOKCANCEL', ('Confirm Upgrade?', \\\n \"This will upgrade this project from 'Simple' to \" \\\n + \"'Advanced'.\" )))\n #if not OK give up\n if not self._wait_for_responce():\n return False\n self.mode = 1\n else:\n pass\n self.template = dict(self.sf1.attrib)\n\n if self.mode == 0:\n self.smode.attrib['Idiot'] = 'True'\n self.list_of_tags =list(set(IDIOT_TAGS.keys()))\n #so list_of_tags is a set of all idiot tags\n all_tags = self.recommendedTags + list(set(self.recommendedTags)\\\n .difference(set(IDIOT_TAGS.keys())))\n #so all_tags now holds a LIST of recommended tags\n # followed by any idiot tags left out\n else:\n self.smode.attrib['Idiot'] = 'False'\n #so list_of_tags holds all advanced tags\n self.list_of_tags = list(set(SET_TAGS['en-US'].keys()))\n all_tags = self.recommendedTags + \\\n list(set(self.recommendedTags).\\\n difference(set(SET_TAGS['en-US'].keys())))\n #all_tags now holds a LIST of recommended tags\n # followed by any advanced tags left out\n self.preferred = int(self.smode.attrib['preferred'] == 'True')\n self.qr.put(('TXTPREFCHARDEL', (0.0, 9999.9999)))\n if self.sf2.text != None:\n self.qr.put(('TXTPREFCHARINSERT', (9999.9999, self.sf2.text)))\n\n #clear tagtree\n self.qr.put(('CLEARTAGTREE', None))\n self.qr.put(('INSERTTAGTREETAGS', all_tags))\n self.qr.put(('SETTAGTREE', 'TIT2'))\n #now select tags\n for item in self.sf1.attrib.keys():\n self.qr.put(('SELECTIONTAGTREE', item))\n #f4 feature phone folders\n self.qr.put(('ENTERLIST',self.sf4.get('folderList')))\n if 'is_copy_playlists_to_top' in self.sf4.attrib:\n self.qr.put(('IS_COPY_PLAYLISTS_TO_TOP', \\\n 0 if self.sf4.attrib['is_copy_playlists_to_top'] == 'False' \\\n else 1))\n if 'M3UorM3U8' in self.sf4.attrib:\n self.qr.put(('M3UorM3U8', int(self.sf4.attrib['M3UorM3U8'])))\n\n # unpickle hashed graphic\n if thefile[:-4]: \n picklein = thefile[:-4] + '.pkl'\n self.hashed_graphics = pickle.load(open(picklein, 'rb')) \\\n if os.path.isfile(picklein) \\\n else dict()\n return True", "def test_no_project_newproj(self):\n self._test_non_admin_operation(ccdlib.OP_NEWPROJ)", "def test_empty_project_no_title(self):\n wb = \"\"\"Project 3322\"\"\"\n prj = self._read_string(wb)\n self.failUnlessEqual(prj.id, '3322')\n self.failUnlessEqual(prj.title, '3322')", "def check_readme(self):\n readme_path = join(ProjectChecker.project_dir, 'README.md')\n if not isfile(readme_path):\n raise ProjectAnomaly(\"Every project must include a README.md\")", "def is_project_directory():\n global command\n result = False\n\n logging.print_info(\n 'Checking current working directory (' + cwd + ') for project...')\n\n metadata_found = os.path.exists(metadata_path)\n outline_found = os.path.exists(outline_path)\n content_found = os.path.exists(content_path)\n output_found = os.path.exists(output_path)\n\n if ((metadata_found is False) and (outline_found is False) and\n (metadata_found is False) and (outline_found is False)):\n logging.print_warning(\n 'Tanagra project not found in current directory: ' + cwd)\n elif ((metadata_found is False) or (outline_found is False) or\n (metadata_found is False) or (outline_found is False)):\n # project not properly initialized\n logging.print_error('Tanagra project not properly initialized.')\n\n if metadata_found is False:\n logging.print_error(' - metadata.md file not found.')\n\n if outline_found is False:\n logging.print_error(' - outline.md file not found.')\n\n if content_found is False:\n logging.print_error(' - content/ directory not found.')\n\n if output_found is False:\n logging.print_error(' - output/ directory not found.')\n\n logging.print_warning(\n 'Please run Tanagra to build new book template or attempt to resolve issues manually.')\n print('')\n else:\n result = True\n\n return result", "def is_compiled(self):\n return (super(self.__class__, self).is_compiled and\n os.access(self.languagemodel_file, os.R_OK) and\n os.access(self.dictionary_file, os.R_OK))", "def test_no_meta_yaml(datafiles):\n current_folder = Path.cwd()\n os.chdir(datafiles)\n with pytest.raises(SystemExit):\n project = Project(rules=RULES)\n os.chdir(str(current_folder))", "def test_empty_project(self):\n self.pid2 = create_project(name=\"test project2\")\n self.assertEqual(len(get_folders_in_project(self.pid2)), 0)", "def test_compare_very_different_projects(self):\n app = App(None, False)\n app.load_reference_xml_file(\"Reference_project.xml\")\n app.load_compared_xml_file(\"Different_project.xml\")\n app.output_file = \"different.xlsx\" # Output\n self.assertEqual(app.launch_compare(), False)", "def test_generate_workfolder_local(self):\n assert bs.generate_workfolder(None) == os.getcwd()", "def test_add_project(self):\n # nonexistent folder\n os.environ['KEPLER_HOME'] = self.config_dir\n cmd = 'kepler add project -n foo -p /tmp/foo'.split()\n out = check_output(cmd, stderr=STDOUT)\n self.assertTrue(out.rstrip().decode('utf8').endswith(\n 'No such directory: /tmp/foo'))\n\n cmd = 'kepler add project -n foo -p .'.split()\n check_call(cmd)\n engine = get_engine()\n df = pd.read_sql_table('projects', engine)\n p = df[df['name'] == 'foo'].iloc[0]\n self.assertEqual(p['location'], os.getcwd())", "def _checkModelConfig(self):\n if (self.modelConfig.__eq__('')):\n print('Debe cargar primero el archivo de configuración')\n self.statusBar().showMessage('Debe cargar primero el archivo de configuración')\n return False\n else:\n return True #true porque no esta vacio", "def _has_pyproject_file() -> bool:\n return any(\"pyproject.toml\" in path for path in glob.glob(\"*\"))", "def test_project_creation(self):\n self.assertEqual(self.proj1.name, 'Project 1')\n self.assertNotEqual(self.proj1.name, self.proj2.name)\n self.assertEqual(self.proj1.creator.username, 'User2')", "def testEmptyBlueprintFile(self):\n blueprint = self.CreateBlueprint()\n file_contents = workspace_lib.ReadConfigFile(blueprint.path)\n\n self.assertIn(blueprint_lib.BRICKS_FIELD, file_contents)\n self.assertIn(blueprint_lib.BSP_FIELD, file_contents)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if no error will be raised when the workspace.mel file is already there
def test_create_default_project_workspace_mel_already_exists( create_test_data, trash_bin ): data = create_test_data arch = Archiver() tempdir = tempfile.gettempdir() # there should be no error to call it multiple times project_path = arch.create_default_project(tempdir) trash_bin.append(project_path) project_path = arch.create_default_project(tempdir) project_path = arch.create_default_project(tempdir)
[ "def dirty(self):\n if os.path.exists(self.file_path):\n return False\n else:\n raise RuntimeError(\"Source file missing: %s\" % self.file_path)", "def _has_workspace(self, job):\r\n\t\tif job.workspace is None or not os.path.exists(job.workspace):\r\n\t\t\t# No workspace has been defined (admin auto-add) or the initial\r\n\t\t\t# workspace was created by another user and does not exist on \r\n\t\t\t# the active computer.\r\n\t\t\tworkspace = ProjectWorkspace.make(job.job_num, self._app_data.users.log)\r\n\t\t\tif workspace is not None:\r\n\t\t\t\tjob.workspace = workspace\r\n\t\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn True", "def test_create_default_project_will_create_a_workspace_mel_file(\n create_test_data, trash_bin\n):\n arch = Archiver()\n tempdir = tempfile.gettempdir()\n\n project_path = arch.create_default_project(tempdir)\n trash_bin.append(project_path)\n\n workspace_mel_path = os.path.join(project_path, \"workspace.mel\")\n\n assert os.path.exists(workspace_mel_path)", "def test_knows_if_desnt_exists(self):\r\n existing_nfile = NFile(self._non_existing_file.name)\r\n self._non_existing_file.close()\r\n self.assertFalse(existing_nfile._exists())", "def test_recreate_failure():\n\t\n\twith workspace(virtualenvs = ['venv']) as ws:\n\t\tws.create_file('venv/dummy')\n\t\t\n\t\tws.run('venv --python no-such-python', expect_error = True)\n\t\t\n\t\tws.check_dir(['venv'])\n\t\tws.check_file('venv/dummy')", "def testFileExistence(self):\n check_file_exists(\"null\")\n check_file_exists(\"sample/null.tif\")\n check_file_exists(\"test_merger.py\")", "def test_file_context(self):\n with ipyenv.RWFreeNamedTempFile(source='', target_dir='./') as filename:\n self.assertTrue(os.path.exists(filename))\n self.assertFalse(os.path.exists(filename))", "def test_already_created(self):\n create_auto_init_templates(self.user_home_directory)\n\n with self.assertRaises(GitHomeTemplatesExists):\n create_auto_init_templates(self.user_home_directory)", "def test_create_project_twice_raises_error(tmp_cwd: Path) -> None:\n app_name = \"app_name\"\n app_description = \"A longer, human friendly, app description.\"\n gen.create_project(app_name, app_description, TESTS_AUTHOR_NAME, TESTS_AUTHOR_EMAIL)\n\n with pytest.raises(FileExistsError):\n gen.create_project(\n app_name, app_description, TESTS_AUTHOR_NAME, TESTS_AUTHOR_EMAIL\n )", "def test_knows_if_exists(self):\r\n existing_nfile = NFile(self._existing_file.name)\r\n self.assertTrue(existing_nfile._exists())", "def VerifyLocalFileExists(path):\n if not os.path.exists(path):\n raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), path)", "def test_exit_failure_on_spelling_errors(self):\n with open(self._temporary_file, \"w\") as f:\n f.write(\"splelling error\\n\")\n\n result = self._run_with_cache(self._temporary_file)\n self.assertEqual(result, 1)", "def test_write_missing(self):\n self.remove_file(self.FILENAME)\n fileio.writeline(self.FILENAME, 1, \"data\")\n\n self.assertTrue(self.file_exists(self.FILENAME))\n self.assertEquals(\"data\\n\", self.get_file_contents(self.FILENAME))", "def test_parse_stl_file_does_not_exist(self):\n try:\n solid = parser.parse_stl_file('not/a/thing')\n self.fail('Failed to raise file does not exist')\n except STLAnalysisException as e:\n self.assertEqual('STL file not found: not/a/thing', str(e))", "def test_no_file_error(self):\n root_dir = os.getcwd()\n filename = 'this-file-should-not-exist'\n with self.assertRaises(RuntimeError):\n read_externals_description_file(root_dir, filename)", "def test_create_default_project_workspace_mel_content_is_correct(\n create_test_data, trash_bin\n):\n arch = Archiver()\n tempdir = tempfile.gettempdir()\n\n project_path = arch.create_default_project(tempdir)\n trash_bin.append(project_path)\n\n workspace_mel_path = os.path.join(project_path, \"workspace.mel\")\n\n with open(workspace_mel_path) as f:\n content = f.read()\n\n expected_result = \"\"\"// Anima Archiver Default Project Definition\n\nworkspace -fr \"translatorData\" \"data\";\nworkspace -fr \"offlineEdit\" \"scenes/edits\";\nworkspace -fr \"renderData\" \"renderData\";\nworkspace -fr \"scene\" \"scenes\";\nworkspace -fr \"3dPaintTextures\" \"sourceimages/3dPaintTextures\";\nworkspace -fr \"eps\" \"data\";\nworkspace -fr \"OBJexport\" \"data\";\nworkspace -fr \"mel\" \"scripts\";\nworkspace -fr \"furShadowMap\" \"renderData/fur/furShadowMap\";\nworkspace -fr \"particles\" \"cache/particles\";\nworkspace -fr \"audio\" \"sound\";\nworkspace -fr \"scripts\" \"scripts\";\nworkspace -fr \"sound\" \"sound\";\nworkspace -fr \"DXF_FBX export\" \"data\";\nworkspace -fr \"furFiles\" \"renderData/fur/furFiles\";\nworkspace -fr \"depth\" \"renderData/depth\";\nworkspace -fr \"autoSave\" \"autosave\";\nworkspace -fr \"furAttrMap\" \"renderData/fur/furAttrMap\";\nworkspace -fr \"diskCache\" \"data\";\nworkspace -fr \"fileCache\" \"cache/nCache\";\nworkspace -fr \"ASS Export\" \"data\";\nworkspace -fr \"FBX export\" \"data\";\nworkspace -fr \"sourceImages\" \"sourceimages\";\nworkspace -fr \"FBX\" \"data\";\nworkspace -fr \"DAE_FBX export\" \"data\";\nworkspace -fr \"movie\" \"movies\";\nworkspace -fr \"Alembic\" \"data\";\nworkspace -fr \"DAE_FBX\" \"data\";\nworkspace -fr \"iprImages\" \"renderData/iprImages\";\nworkspace -fr \"mayaAscii\" \"scenes\";\nworkspace -fr \"furImages\" \"renderData/fur/furImages\";\nworkspace -fr \"furEqualMap\" \"renderData/fur/furEqualMap\";\nworkspace -fr \"illustrator\" \"data\";\nworkspace -fr \"DXF_FBX\" \"data\";\nworkspace -fr \"mayaBinary\" \"scenes\";\nworkspace -fr \"move\" \"data\";\nworkspace -fr \"images\" \"images\";\nworkspace -fr \"fluidCache\" \"cache/nCache/fluid\";\nworkspace -fr \"clips\" \"clips\";\nworkspace -fr \"ASS\" \"data\";\nworkspace -fr \"OBJ\" \"data\";\nworkspace -fr \"templates\" \"assets\";\nworkspace -fr \"shaders\" \"renderData/shaders\";\n\"\"\"\n assert content == expected_result", "def test_path_doesnt_exist(self, temp_builder):\n path = \"TestReadWrite/test_path_doesnt_exist\"\n file = f\"{path}/file\"\n assert not builder.exists(path)\n self.assert_read_write_file(file, \"test_path_doesnt_exist\")", "def test_no_file_Deletion(self):\r\n analyze_text(self.filename)\r\n self.assertTrue(os.path.exists(self.filename))", "def check(self, workspace: Workspace, u_file: UserFile) \\\n -> UserFile:\n if u_file.name == 'missfont.log':\n workspace.add_error(u_file, DISALLOWED_FILE, self.MISSFONT_WARNING,\n severity=Severity.INFO, is_persistant=False)\n workspace.remove(u_file, self.MISSFONT_WARNING)\n return u_file" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if the Archiver.flatten() is working properly for a scene with no references.
def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin): data = create_test_data arch = Archiver() project_path = arch.flatten([data["asset2_model_main_v001"].absolute_full_path]) trash_bin.append(project_path) # the returned path should be a maya project directory assert os.path.exists(project_path) # there should be a workspace.mel file assert os.path.exists(os.path.join(project_path, "workspace.mel")) # there should be a maya scene file under path/scenes with the same # name of the source file assert os.path.exists( os.path.join(project_path, "scenes", data["asset2_model_main_v001"].filename) )
[ "def test_flatten_is_working_properly_with_only_one_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_with_multiple_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path", "def test_flatten_is_working_properly_with_only_one_level_of_multiple_references_to_the_same_file(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] more than once to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have three references\n all_refs = pm.listReferences()\n assert len(all_refs) == 3\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[1]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[2]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_for_external_files(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"Textures\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"version7\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version7_path = os.path.join(\n project_path, \"scenes\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n\n assert os.path.exists(archived_version7_path)\n assert os.path.exists(archived_image_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version7_path)\n\n # and image files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n assert ref_image_path == os.path.join(project_path, \"sourceimages\", image_filename)\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path, os.path.join(project_path, \"sound\", audio_filename)", "def test_flatten_will_restore_the_current_workspace(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n current_workspace = pm.workspace.path\n\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if the current workspace is intact\n assert current_workspace == pm.workspace.path", "def test_flatten_is_working_properly_with_exclude_mask(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"..\", \"sourceimages\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver(exclude_mask=[\".png\", \".jpg\", \".tga\"])\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n # and references under path/scenes/refs path\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n archived_audio_path = os.path.join(project_path, \"sound\", audio_filename)\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n # jpg should not be included\n assert not os.path.exists(archived_image_path)\n assert os.path.exists(archived_audio_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path\n\n # and deeper level files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n # the path of the jpg should be intact\n assert ref_image_path == \"$REPOTPR/TP/Test_Task_1/sourceimages/test.jpg\"\n\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path == os.path.join(project_path, \"sound\", audio_filename)", "def flatten(self):\n if self.zernike_mirror is not None:\n self.zernike_mirror.flatten()\n if self.ripple_mirror is not None:\n self.ripple_mirror.flatten()\n if self.dm is not None:\n self.dm.flatten()", "def is_flattenable(obj):\n return obj.__class__ in [list, set, frozenset, genclass]", "def test_flatten():\n assert list(nsitertools.flatten([1, 2, [3, 4, 5, [[6]]]])) == \\\n [1, 2, 3, 4, 5, 6]", "def unpack_clear(self):\n if (not os.path.exists(self.unpack_path)):\n VERBOSE(\"no image found: %s\" % self.unpack_path)\n else:\n if (not os.path.isdir(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but is not a directory\"\n % self.unpack_path)\n if (not self.unpacked_p(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but does not appear to be an image\"\n % self.unpack_path)\n VERBOSE(\"removing existing image: %s\" % self.unpack_path)\n rmtree(self.unpack_path)", "def test_unflatten(self, shape):\n\n reshaped = np.reshape(flat_dummy_array, shape)\n unflattened = np.array([x for x in unflatten(flat_dummy_array, reshaped)])\n\n assert unflattened.shape == reshaped.shape\n assert np.array_equal(unflattened, reshaped)", "def _set_flatten_results(self):\n self._flatten_results = True\n for analysis in self._analyses:\n if isinstance(analysis, CompositeAnalysis):\n analysis._set_flatten_results()", "def test_un_flatten(self, un_flatten_test_case: UnFlattenTest) -> None:\n # Arrange done in fixtures.\n # Act.\n compound = unit_analysis.un_flatten(\n un_flatten_test_case.numerator,\n un_flatten_test_case.denominator,\n un_flatten_test_case.type_factories,\n )\n\n # Assert.\n assert compound == un_flatten_test_case.expected_compound", "def sceneHasNestedRefs(self):\r\n if self.getNestedRefs():\r\n return 1\r\n else:\r\n return 0", "def test_flatten():\n\n print(\"Testing flatten function with deeply nested lists\")\n test_deep_list = [\"one\", \"two\", [\"three\"], \"four\", [\"five\", \"six\", [\"seven\"]]]\n expected_result = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\"]\n assert flatten(test_deep_list) == expected_result\n\n print(\"Testing flatten function with list with no nested lists\")\n test_shallow_list = [\"one\", \"two\", \"three\"]\n assert flatten(test_shallow_list) == test_shallow_list\n\n print(\"Testing flatten function with empty list\")\n test_empty_list = []\n assert flatten(test_empty_list) == test_empty_list", "def test_LocationTag_flatten():", "def test_array(self):\n self.assertEqual(flatten([]), [])\n self.assertEqual(flatten([1, 2, 3, 4, 5]), [1, 2, 3, 4, 5])\n self.assertEqual(flatten([0, [[2, 3], [[4, 5]], 6]]), [0, 2, 3, 4, 5, 6])\n self.assertEqual(flatten([[1, 2, [3]], 4]), [1, 2, 3, 4])\n self.assertEqual(flatten([[[[[[7]]]]]]), [7])\n self.assertEqual(flatten([999999999999999]), [999999999999999])", "def test_filter_files_by_archive_false(populated_store: Store):\n\n # GIVEN as store with files\n\n # WHEN filtering on non-archived files\n non_archived_files_query: Query = filter_files_by_is_archived(\n files=populated_store._get_join_file_tags_archive_query(),\n is_archived=False,\n )\n\n # THEN none of the files returned should have an archive object linked to it\n for file in non_archived_files_query:\n assert file.archive is None", "def test_obs_unflattened(self):\n obs = self.env.observation_space.sample()\n action, _ = self.policy.get_action(\n self.env.observation_space.flatten(obs))\n self.env.step(action)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if the Archiver.flatten() is working properly for a scene with only one level of references.
def test_flatten_is_working_properly_with_only_one_level_of_references( create_test_data, trash_bin, create_pymel, create_maya_env ): data = create_test_data maya_env = create_maya_env pm = create_pymel # open data["asset2_model_main_v001"] maya_env.open(data["asset2_model_main_v001"], force=True) # and reference data["asset2_model_take1_v001"] to it maya_env.reference(data["asset2_model_take1_v001"]) # and save it pm.saveFile() # renew the scene pm.newFile(force=1) # create an archiver arch = Archiver() project_path = arch.flatten([data["asset2_model_main_v001"].absolute_full_path]) trash_bin.append(project_path) # now check if we have two files under the path/scenes directory archived_version1_path = os.path.join( project_path, "scenes", data["asset2_model_main_v001"].filename ) archived_version4_unresolved_path = os.path.join( "scenes/refs", data["asset2_model_take1_v001"].filename ) archived_version4_path = os.path.join( project_path, archived_version4_unresolved_path ) assert os.path.exists(archived_version1_path) assert os.path.exists(archived_version4_path) # open the archived version1 pm.workspace.open(project_path) pm.openFile(archived_version1_path) # expect it to have one reference all_refs = pm.listReferences() assert len(all_refs) == 1 # and the path is matching to archived version4 path ref = all_refs[0] assert ref.path == archived_version4_path assert ref.unresolvedPath() == archived_version4_unresolved_path
[ "def test_flatten_is_working_properly_with_multiple_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path", "def test_flatten_is_working_properly_with_only_one_level_of_multiple_references_to_the_same_file(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] more than once to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have three references\n all_refs = pm.listReferences()\n assert len(all_refs) == 3\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[1]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[2]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin):\n data = create_test_data\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # the returned path should be a maya project directory\n assert os.path.exists(project_path)\n\n # there should be a workspace.mel file\n assert os.path.exists(os.path.join(project_path, \"workspace.mel\"))\n\n # there should be a maya scene file under path/scenes with the same\n # name of the source file\n assert os.path.exists(\n os.path.join(project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename)\n )", "def test_flatten_is_working_properly_for_external_files(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"Textures\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"version7\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version7_path = os.path.join(\n project_path, \"scenes\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n\n assert os.path.exists(archived_version7_path)\n assert os.path.exists(archived_image_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version7_path)\n\n # and image files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n assert ref_image_path == os.path.join(project_path, \"sourceimages\", image_filename)\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path, os.path.join(project_path, \"sound\", audio_filename)", "def test_flatten_will_restore_the_current_workspace(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n current_workspace = pm.workspace.path\n\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if the current workspace is intact\n assert current_workspace == pm.workspace.path", "def test_flatten_is_working_properly_with_exclude_mask(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"..\", \"sourceimages\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver(exclude_mask=[\".png\", \".jpg\", \".tga\"])\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n # and references under path/scenes/refs path\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n archived_audio_path = os.path.join(project_path, \"sound\", audio_filename)\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n # jpg should not be included\n assert not os.path.exists(archived_image_path)\n assert os.path.exists(archived_audio_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path\n\n # and deeper level files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n # the path of the jpg should be intact\n assert ref_image_path == \"$REPOTPR/TP/Test_Task_1/sourceimages/test.jpg\"\n\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path == os.path.join(project_path, \"sound\", audio_filename)", "def flatten(self):\n if self.zernike_mirror is not None:\n self.zernike_mirror.flatten()\n if self.ripple_mirror is not None:\n self.ripple_mirror.flatten()\n if self.dm is not None:\n self.dm.flatten()", "def test_flatten():\n assert list(nsitertools.flatten([1, 2, [3, 4, 5, [[6]]]])) == \\\n [1, 2, 3, 4, 5, 6]", "def is_flattenable(obj):\n return obj.__class__ in [list, set, frozenset, genclass]", "def sceneHasNestedRefs(self):\r\n if self.getNestedRefs():\r\n return 1\r\n else:\r\n return 0", "def test_array(self):\n self.assertEqual(flatten([]), [])\n self.assertEqual(flatten([1, 2, 3, 4, 5]), [1, 2, 3, 4, 5])\n self.assertEqual(flatten([0, [[2, 3], [[4, 5]], 6]]), [0, 2, 3, 4, 5, 6])\n self.assertEqual(flatten([[1, 2, [3]], 4]), [1, 2, 3, 4])\n self.assertEqual(flatten([[[[[[7]]]]]]), [7])\n self.assertEqual(flatten([999999999999999]), [999999999999999])", "def test_flatten():\n\n print(\"Testing flatten function with deeply nested lists\")\n test_deep_list = [\"one\", \"two\", [\"three\"], \"four\", [\"five\", \"six\", [\"seven\"]]]\n expected_result = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\"]\n assert flatten(test_deep_list) == expected_result\n\n print(\"Testing flatten function with list with no nested lists\")\n test_shallow_list = [\"one\", \"two\", \"three\"]\n assert flatten(test_shallow_list) == test_shallow_list\n\n print(\"Testing flatten function with empty list\")\n test_empty_list = []\n assert flatten(test_empty_list) == test_empty_list", "def test_LocationTag_flatten():", "def test_un_flatten(self, un_flatten_test_case: UnFlattenTest) -> None:\n # Arrange done in fixtures.\n # Act.\n compound = unit_analysis.un_flatten(\n un_flatten_test_case.numerator,\n un_flatten_test_case.denominator,\n un_flatten_test_case.type_factories,\n )\n\n # Assert.\n assert compound == un_flatten_test_case.expected_compound", "def _set_flatten_results(self):\n self._flatten_results = True\n for analysis in self._analyses:\n if isinstance(analysis, CompositeAnalysis):\n analysis._set_flatten_results()", "def get_flattened(self):\n return ravel(self.get())", "def test_unflatten(self, shape):\n\n reshaped = np.reshape(flat_dummy_array, shape)\n unflattened = np.array([x for x in unflatten(flat_dummy_array, reshaped)])\n\n assert unflattened.shape == reshaped.shape\n assert np.array_equal(unflattened, reshaped)", "def unflatten(self, x):\n pass", "def getFlattenDeps (self, skipOptional = True):\n return self.root.flatten (skipOptional)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if the Archiver.flatten() is working properly for a scene with only one level of multiple references to the same file.
def test_flatten_is_working_properly_with_only_one_level_of_multiple_references_to_the_same_file( create_test_data, trash_bin, create_pymel, create_maya_env ): data = create_test_data maya_env = create_maya_env pm = create_pymel # open data["asset2_model_main_v001"] maya_env.open(data["asset2_model_main_v001"], force=True) # and reference data["asset2_model_take1_v001"] more than once to it maya_env.reference(data["asset2_model_take1_v001"]) maya_env.reference(data["asset2_model_take1_v001"]) maya_env.reference(data["asset2_model_take1_v001"]) # and save it pm.saveFile() # renew the scene pm.newFile(force=1) # create an archiver arch = Archiver() project_path = arch.flatten([data["asset2_model_main_v001"].absolute_full_path]) trash_bin.append(project_path) # now check if we have two files under the path/scenes directory archived_version1_path = os.path.join( project_path, "scenes", data["asset2_model_main_v001"].filename ) archived_version4_unresolved_path = os.path.join( "scenes/refs", data["asset2_model_take1_v001"].filename ) archived_version4_path = os.path.join( project_path, archived_version4_unresolved_path ) assert os.path.exists(archived_version1_path) assert os.path.exists(archived_version4_path) # open the archived version1 pm.workspace.open(project_path) pm.openFile(archived_version1_path) # expect it to have three references all_refs = pm.listReferences() assert len(all_refs) == 3 # and the path is matching to archived version4 path ref = all_refs[0] assert ref.path == archived_version4_path assert ref.unresolvedPath() == archived_version4_unresolved_path ref = all_refs[1] assert ref.path == archived_version4_path assert ref.unresolvedPath() == archived_version4_unresolved_path ref = all_refs[2] assert ref.path == archived_version4_path assert ref.unresolvedPath() == archived_version4_unresolved_path
[ "def test_flatten_is_working_properly_with_multiple_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path", "def test_flatten_is_working_properly_with_only_one_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin):\n data = create_test_data\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # the returned path should be a maya project directory\n assert os.path.exists(project_path)\n\n # there should be a workspace.mel file\n assert os.path.exists(os.path.join(project_path, \"workspace.mel\"))\n\n # there should be a maya scene file under path/scenes with the same\n # name of the source file\n assert os.path.exists(\n os.path.join(project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename)\n )", "def test_flatten_is_working_properly_for_external_files(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"Textures\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"version7\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version7_path = os.path.join(\n project_path, \"scenes\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n\n assert os.path.exists(archived_version7_path)\n assert os.path.exists(archived_image_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version7_path)\n\n # and image files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n assert ref_image_path == os.path.join(project_path, \"sourceimages\", image_filename)\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path, os.path.join(project_path, \"sound\", audio_filename)", "def test_flatten_is_working_properly_with_exclude_mask(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"..\", \"sourceimages\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver(exclude_mask=[\".png\", \".jpg\", \".tga\"])\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n # and references under path/scenes/refs path\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n archived_audio_path = os.path.join(project_path, \"sound\", audio_filename)\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n # jpg should not be included\n assert not os.path.exists(archived_image_path)\n assert os.path.exists(archived_audio_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path\n\n # and deeper level files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n # the path of the jpg should be intact\n assert ref_image_path == \"$REPOTPR/TP/Test_Task_1/sourceimages/test.jpg\"\n\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path == os.path.join(project_path, \"sound\", audio_filename)", "def test_flatten_will_restore_the_current_workspace(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n current_workspace = pm.workspace.path\n\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if the current workspace is intact\n assert current_workspace == pm.workspace.path", "def flatten(self):\n if self.zernike_mirror is not None:\n self.zernike_mirror.flatten()\n if self.ripple_mirror is not None:\n self.ripple_mirror.flatten()\n if self.dm is not None:\n self.dm.flatten()", "def test_LocationTag_flatten():", "def test_flatten():\n assert list(nsitertools.flatten([1, 2, [3, 4, 5, [[6]]]])) == \\\n [1, 2, 3, 4, 5, 6]", "def test_un_flatten(self, un_flatten_test_case: UnFlattenTest) -> None:\n # Arrange done in fixtures.\n # Act.\n compound = unit_analysis.un_flatten(\n un_flatten_test_case.numerator,\n un_flatten_test_case.denominator,\n un_flatten_test_case.type_factories,\n )\n\n # Assert.\n assert compound == un_flatten_test_case.expected_compound", "def test_array(self):\n self.assertEqual(flatten([]), [])\n self.assertEqual(flatten([1, 2, 3, 4, 5]), [1, 2, 3, 4, 5])\n self.assertEqual(flatten([0, [[2, 3], [[4, 5]], 6]]), [0, 2, 3, 4, 5, 6])\n self.assertEqual(flatten([[1, 2, [3]], 4]), [1, 2, 3, 4])\n self.assertEqual(flatten([[[[[[7]]]]]]), [7])\n self.assertEqual(flatten([999999999999999]), [999999999999999])", "def _set_flatten_results(self):\n self._flatten_results = True\n for analysis in self._analyses:\n if isinstance(analysis, CompositeAnalysis):\n analysis._set_flatten_results()", "def is_flattenable(obj):\n return obj.__class__ in [list, set, frozenset, genclass]", "def unpack_clear(self):\n if (not os.path.exists(self.unpack_path)):\n VERBOSE(\"no image found: %s\" % self.unpack_path)\n else:\n if (not os.path.isdir(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but is not a directory\"\n % self.unpack_path)\n if (not self.unpacked_p(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but does not appear to be an image\"\n % self.unpack_path)\n VERBOSE(\"removing existing image: %s\" % self.unpack_path)\n rmtree(self.unpack_path)", "def test_get_run_artifacts_tree(self):\n pass", "def test_flatten():\n\n print(\"Testing flatten function with deeply nested lists\")\n test_deep_list = [\"one\", \"two\", [\"three\"], \"four\", [\"five\", \"six\", [\"seven\"]]]\n expected_result = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\"]\n assert flatten(test_deep_list) == expected_result\n\n print(\"Testing flatten function with list with no nested lists\")\n test_shallow_list = [\"one\", \"two\", \"three\"]\n assert flatten(test_shallow_list) == test_shallow_list\n\n print(\"Testing flatten function with empty list\")\n test_empty_list = []\n assert flatten(test_empty_list) == test_empty_list", "def _on_merge_files(self, focus):\n #get parent of focus\n self.qr.put(('LOCKGUI', None))\n e_child = self.trout.find(\".//\" + focus)\n #if e_child is not collection/project give up\n if e_child.attrib['Type'] not in ['project', 'collection']:\n self.qr.put(('MESSAGEBOXSHOWWARNING2', \\\n (\"Not a collection\", \"Please select a collection not a file.\")))\n else:\n #list mp3 files which are immediate children of focus\n children = [c for c in e_child if c.attrib['Type'] is 'file']\n if len(children) > 1:\n second_of_silence = AudioSegment.silent(duration=1000) # in milliseconds second_of_silence = \n sound = AudioSegement.from_mp3(children[0].attrib['Location'])\n for c in children[1:]:\n sound += second_of_silence + AudioSegement.from_mp3(c.attrib['Location'])\n # now save new file in temp workspace?\n #create temp workspace\n #walk up tree creating list of ancestors, stop at project\n ancestors = list()\n this_child = e_child\n while this_child.attrib['Type'] is not 'project':\n e_parent = this_child.getparent()\n ancestors.insert(0, e_parent.tag)\n this_child = e_parent\n workspace = os.path.normpath('{}/Temp'.format(self.Pub2SD))\n for ancestor in ancestors:\n workspace = os.path.normpath('{}/{}'.format(workspace, ancestor.tag))\n os.makedirs(workspace, mode=0o777, exist_ok=True)\n filename = '{}/{}.mp3'.format(workspace,e_child.tag)\n sound.export(filename, 'mp3')\n e_parent = e_child.getparent()\n somevalues = self._read_mp3_tags(echild.attrib['Location'])\n self._add_a_file(afile, e_parent, somevalues)\n else:\n self.qr.put(('MESSAGEBOXSHOWWARNING2', \\\n (e_child.text, \"There are no immediate descendants which are mp3 files.\")))\n# (\"No mp3 files\", \"There are no immediate descendants which are mp3 files.\")))\n \n if etree.iselement(e_child):\n e_parent = e_child.getparent()\n# self.qr.put(('PRINT', [[kid.tag, e_parent.index(kid)] for kid in e_parent.getchildren()]))\n child_index = e_parent.index(e_child)\n if child_index > 0:\n child_index -= 1\n e_parent.remove(e_child)\n e_parent.insert(child_index, e_child)\n# self.qr.put(('PRINT', [[kid.tag, e_parent.index(kid)] for kid in e_parent.getchildren()]))\n self._on_reload_tree()\n self.qr.put(('SEEFOCUS', focus))\n self.qr.put(('UNLOCKGUI', None))\n #list children of focus which are mp3 files\n pass", "def sceneHasNestedRefs(self):\r\n if self.getNestedRefs():\r\n return 1\r\n else:\r\n return 0", "def _flattenAllLevels(self, source):\n\n for root, directories, files in os.walk(source):\n for file in files:\n filePath = os.path.join(root, file)\n destination = os.path.join(source, file)\n if filePath != destination:\n shutil.move(filePath, destination)\n\n for directory in os.listdir(source):\n if os.path.isdir(os.path.join(source, directory)):\n shutil.rmtree(os.path.join(source,directory))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if the Archiver.flatten() is working properly for a scene with multiple levels of references.
def test_flatten_is_working_properly_with_multiple_level_of_references( create_test_data, trash_bin, create_pymel, create_maya_env ): data = create_test_data maya_env = create_maya_env pm = create_pymel # open data["asset2_model_take1_v001"] maya_env.open(data["asset2_model_take1_v001"], force=True) # and reference data["version7"] to it maya_env.reference(data["version7"]) # and save it pm.saveFile() # open data["asset2_model_main_v001"] maya_env.open(data["asset2_model_main_v001"], force=True) # and reference data["asset2_model_take1_v001"] to it maya_env.reference(data["asset2_model_take1_v001"]) # and save it pm.saveFile() # renew the scene pm.newFile(force=1) # create an archiver arch = Archiver() project_path = arch.flatten([data["asset2_model_main_v001"].absolute_full_path]) trash_bin.append(project_path) # now check if we have two files under the path/scenes directory archived_version1_path = os.path.join( project_path, "scenes", data["asset2_model_main_v001"].filename ) archived_version4_path = os.path.join( project_path, "scenes/refs", data["asset2_model_take1_v001"].filename ) archived_version4_unresolved_path = os.path.join( "scenes/refs", data["asset2_model_take1_v001"].filename ) archived_version7_path = os.path.join( project_path, "scenes/refs", data["version7"].filename ) archived_version7_unresolved_path = os.path.join( "scenes/refs", data["version7"].filename ) assert os.path.exists(archived_version1_path) assert os.path.exists(archived_version4_path) assert os.path.exists(archived_version7_path) # open the archived version1 pm.workspace.open(project_path) pm.openFile(archived_version1_path) # expect it to have one reference all_refs = pm.listReferences() assert len(all_refs) == 1 # and the path is matching to archived version4 path ref = all_refs[0] assert ref.path == archived_version4_path assert ref.unresolvedPath() == archived_version4_unresolved_path # check the deeper level references deeper_ref = pm.listReferences(parentReference=ref)[0] assert deeper_ref.path == archived_version7_path assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path
[ "def test_flatten_is_working_properly_with_only_one_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_with_only_one_level_of_multiple_references_to_the_same_file(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] more than once to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have three references\n all_refs = pm.listReferences()\n assert len(all_refs) == 3\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[1]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[2]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin):\n data = create_test_data\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # the returned path should be a maya project directory\n assert os.path.exists(project_path)\n\n # there should be a workspace.mel file\n assert os.path.exists(os.path.join(project_path, \"workspace.mel\"))\n\n # there should be a maya scene file under path/scenes with the same\n # name of the source file\n assert os.path.exists(\n os.path.join(project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename)\n )", "def test_flatten_is_working_properly_for_external_files(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"Textures\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"version7\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version7_path = os.path.join(\n project_path, \"scenes\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n\n assert os.path.exists(archived_version7_path)\n assert os.path.exists(archived_image_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version7_path)\n\n # and image files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n assert ref_image_path == os.path.join(project_path, \"sourceimages\", image_filename)\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path, os.path.join(project_path, \"sound\", audio_filename)", "def test_flatten_will_restore_the_current_workspace(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n current_workspace = pm.workspace.path\n\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if the current workspace is intact\n assert current_workspace == pm.workspace.path", "def test_flatten_is_working_properly_with_exclude_mask(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"..\", \"sourceimages\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver(exclude_mask=[\".png\", \".jpg\", \".tga\"])\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n # and references under path/scenes/refs path\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n archived_audio_path = os.path.join(project_path, \"sound\", audio_filename)\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n # jpg should not be included\n assert not os.path.exists(archived_image_path)\n assert os.path.exists(archived_audio_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path\n\n # and deeper level files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n # the path of the jpg should be intact\n assert ref_image_path == \"$REPOTPR/TP/Test_Task_1/sourceimages/test.jpg\"\n\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path == os.path.join(project_path, \"sound\", audio_filename)", "def flatten(self):\n if self.zernike_mirror is not None:\n self.zernike_mirror.flatten()\n if self.ripple_mirror is not None:\n self.ripple_mirror.flatten()\n if self.dm is not None:\n self.dm.flatten()", "def test_flatten():\n assert list(nsitertools.flatten([1, 2, [3, 4, 5, [[6]]]])) == \\\n [1, 2, 3, 4, 5, 6]", "def test_LocationTag_flatten():", "def is_flattenable(obj):\n return obj.__class__ in [list, set, frozenset, genclass]", "def _set_flatten_results(self):\n self._flatten_results = True\n for analysis in self._analyses:\n if isinstance(analysis, CompositeAnalysis):\n analysis._set_flatten_results()", "def test_array(self):\n self.assertEqual(flatten([]), [])\n self.assertEqual(flatten([1, 2, 3, 4, 5]), [1, 2, 3, 4, 5])\n self.assertEqual(flatten([0, [[2, 3], [[4, 5]], 6]]), [0, 2, 3, 4, 5, 6])\n self.assertEqual(flatten([[1, 2, [3]], 4]), [1, 2, 3, 4])\n self.assertEqual(flatten([[[[[[7]]]]]]), [7])\n self.assertEqual(flatten([999999999999999]), [999999999999999])", "def test_flatten():\n\n print(\"Testing flatten function with deeply nested lists\")\n test_deep_list = [\"one\", \"two\", [\"three\"], \"four\", [\"five\", \"six\", [\"seven\"]]]\n expected_result = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\"]\n assert flatten(test_deep_list) == expected_result\n\n print(\"Testing flatten function with list with no nested lists\")\n test_shallow_list = [\"one\", \"two\", \"three\"]\n assert flatten(test_shallow_list) == test_shallow_list\n\n print(\"Testing flatten function with empty list\")\n test_empty_list = []\n assert flatten(test_empty_list) == test_empty_list", "def sceneHasNestedRefs(self):\r\n if self.getNestedRefs():\r\n return 1\r\n else:\r\n return 0", "def test_un_flatten(self, un_flatten_test_case: UnFlattenTest) -> None:\n # Arrange done in fixtures.\n # Act.\n compound = unit_analysis.un_flatten(\n un_flatten_test_case.numerator,\n un_flatten_test_case.denominator,\n un_flatten_test_case.type_factories,\n )\n\n # Assert.\n assert compound == un_flatten_test_case.expected_compound", "def test_unflatten(self, shape):\n\n reshaped = np.reshape(flat_dummy_array, shape)\n unflattened = np.array([x for x in unflatten(flat_dummy_array, reshaped)])\n\n assert unflattened.shape == reshaped.shape\n assert np.array_equal(unflattened, reshaped)", "def getFlattenDeps (self, skipOptional = True):\n return self.root.flatten (skipOptional)", "def unflatten(self, x):\n pass", "def get_flattened(self):\n return ravel(self.get())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if the Archiver.flatten() is working properly for a scene with references that has external files like textures, sound etc. and there is also an exclude_mask
def test_flatten_is_working_properly_with_exclude_mask( create_test_data, trash_bin, create_pymel, create_maya_env ): data = create_test_data maya_env = create_maya_env pm = create_pymel # open data["version7"] maya_env.open(data["version7"], force=True) # create an image file at the project root image_filename = "test.jpg" image_path = os.path.join(data["version7"].absolute_path, "..", "sourceimages") image_full_path = os.path.join(image_path, image_filename) # create the file os.makedirs(image_path, exist_ok=True) with open(image_full_path, "w+") as f: f.writelines([""]) audio_filename = "test.wav" audio_path = os.path.join(data["version7"].absolute_path, "sound") audio_full_path = os.path.join(audio_path, audio_filename) # create the file os.makedirs(audio_path, exist_ok=True) with open(audio_full_path, "w+") as f: f.writelines([""]) # create one image and one audio node pm.createNode("file").attr("fileTextureName").set(image_full_path) pm.createNode("audio").attr("filename").set(audio_full_path) # save it # replace external paths maya_env.replace_external_paths() pm.saveFile() # open data["asset2_model_take1_v001"] maya_env.open(data["asset2_model_take1_v001"], force=True) # and reference data["version7"] to it maya_env.reference(data["version7"]) # and save it pm.saveFile() # open data["asset2_model_main_v001"] maya_env.open(data["asset2_model_main_v001"], force=True) # and reference data["asset2_model_take1_v001"] to it maya_env.reference(data["asset2_model_take1_v001"]) # and save it pm.saveFile() # renew the scene pm.newFile(force=1) # create an archiver arch = Archiver(exclude_mask=[".png", ".jpg", ".tga"]) project_path = arch.flatten([data["asset2_model_main_v001"].absolute_full_path]) trash_bin.append(project_path) # now check if we have the files under the path/scenes directory archived_version1_path = os.path.join( project_path, "scenes", data["asset2_model_main_v001"].filename ) # and references under path/scenes/refs path archived_version4_path = os.path.join( project_path, "scenes/refs", data["asset2_model_take1_v001"].filename ) archived_version4_unresolved_path = os.path.join( "scenes/refs", data["asset2_model_take1_v001"].filename ) archived_version7_path = os.path.join( project_path, "scenes/refs", data["version7"].filename ) archived_version7_unresolved_path = os.path.join( "scenes/refs", data["version7"].filename ) archived_image_path = os.path.join(project_path, "sourceimages", image_filename) archived_audio_path = os.path.join(project_path, "sound", audio_filename) assert os.path.exists(archived_version1_path) assert os.path.exists(archived_version4_path) assert os.path.exists(archived_version7_path) # jpg should not be included assert not os.path.exists(archived_image_path) assert os.path.exists(archived_audio_path) # open the archived version1 pm.workspace.open(project_path) pm.openFile(archived_version1_path) # expect it to have one reference all_refs = pm.listReferences() assert len(all_refs) == 1 # and the path is matching to archived version4 path ref = all_refs[0] assert ref.path == archived_version4_path assert ref.unresolvedPath() == archived_version4_unresolved_path # check the deeper level references deeper_ref = pm.listReferences(parentReference=ref)[0] assert deeper_ref.path == archived_version7_path assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path # and deeper level files ref_image_path = pm.ls(type="file")[0].attr("fileTextureName").get() # the path of the jpg should be intact assert ref_image_path == "$REPOTPR/TP/Test_Task_1/sourceimages/test.jpg" ref_audio_path = pm.ls(type="audio")[0].attr("filename").get() assert ref_audio_path == os.path.join(project_path, "sound", audio_filename)
[ "def test_flatten_is_working_properly_for_external_files(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"Textures\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"version7\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version7_path = os.path.join(\n project_path, \"scenes\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n\n assert os.path.exists(archived_version7_path)\n assert os.path.exists(archived_image_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version7_path)\n\n # and image files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n assert ref_image_path == os.path.join(project_path, \"sourceimages\", image_filename)\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path, os.path.join(project_path, \"sound\", audio_filename)", "def test_flatten_is_working_properly_with_only_one_level_of_multiple_references_to_the_same_file(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] more than once to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have three references\n all_refs = pm.listReferences()\n assert len(all_refs) == 3\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[1]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[2]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_with_multiple_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path", "def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin):\n data = create_test_data\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # the returned path should be a maya project directory\n assert os.path.exists(project_path)\n\n # there should be a workspace.mel file\n assert os.path.exists(os.path.join(project_path, \"workspace.mel\"))\n\n # there should be a maya scene file under path/scenes with the same\n # name of the source file\n assert os.path.exists(\n os.path.join(project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename)\n )", "def test_flatten_is_working_properly_with_only_one_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_will_restore_the_current_workspace(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n current_workspace = pm.workspace.path\n\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if the current workspace is intact\n assert current_workspace == pm.workspace.path", "def blend(activity, band_map, build_clear_observation=False, block_size=None):\n # Assume that it contains a band and quality band\n numscenes = len(activity['scenes'])\n\n band = activity['band']\n\n version = activity['version']\n\n nodata = activity.get('nodata', -9999)\n if band == 'quality':\n nodata = 255\n\n # Get basic information (profile) of input files\n keys = list(activity['scenes'].keys())\n\n filename = activity['scenes'][keys[0]]['ARDfiles'][band]\n\n with rasterio.open(filename) as src:\n profile = src.profile\n tilelist = list(src.block_windows())\n\n # Order scenes based in efficacy/resolution\n mask_tuples = []\n\n for key in activity['scenes']:\n scene = activity['scenes'][key]\n resolution = scene.get('resx') or scene.get('resy') or scene.get('resolution')\n\n efficacy = int(scene['efficacy'])\n resolution = int(resolution)\n mask_tuples.append((100. * efficacy / resolution, key))\n\n # Open all input files and save the datasets in two lists, one for masks and other for the current band.\n # The list will be ordered by efficacy/resolution\n masklist = []\n\n bandlist = []\n\n provenance_merge_map = dict()\n\n for m in sorted(mask_tuples, reverse=True):\n key = m[1]\n efficacy = m[0]\n scene = activity['scenes'][key]\n\n filename = scene['ARDfiles'][band_map['quality']]\n\n try:\n masklist.append(rasterio.open(filename))\n except BaseException as e:\n raise IOError('FileError while opening {} - {}'.format(filename, e))\n\n filename = scene['ARDfiles'][band]\n\n provenance_merge_map.setdefault(key, None)\n\n if scene['ARDfiles'].get(DATASOURCE_NAME):\n provenance_merge_map[key] = SmartDataSet(scene['ARDfiles'][DATASOURCE_NAME])\n\n try:\n bandlist.append(rasterio.open(filename))\n except BaseException as e:\n raise IOError('FileError while opening {} - {}'.format(filename, e))\n\n # Build the raster to store the output images.\n width = profile['width']\n height = profile['height']\n\n # STACK will be generated in memory\n stack_raster = numpy.full((height, width), dtype=profile['dtype'], fill_value=nodata)\n # Build the stack total observation\n stack_total_observation = numpy.zeros((height, width), dtype=numpy.uint8)\n\n datacube = activity.get('datacube')\n period = activity.get('period')\n tile_id = activity.get('tile_id')\n\n is_combined_collection = len(activity['datasets']) > 1\n\n cube_file = build_cube_path(datacube, period, tile_id, version=version, band=band, suffix='.tif')\n\n # Create directory\n cube_file.parent.mkdir(parents=True, exist_ok=True)\n\n median_raster = numpy.full((height, width), fill_value=nodata, dtype=profile['dtype'])\n\n if build_clear_observation:\n logging.warning('Creating and computing Clear Observation (ClearOb) file...')\n\n clear_ob_file_path = build_cube_path(datacube, period, tile_id, version=version, band=CLEAR_OBSERVATION_NAME, suffix='.tif')\n dataset_file_path = build_cube_path(datacube, period, tile_id, version=version, band=DATASOURCE_NAME, suffix='.tif')\n\n clear_ob_profile = profile.copy()\n clear_ob_profile['dtype'] = CLEAR_OBSERVATION_ATTRIBUTES['data_type']\n clear_ob_profile.pop('nodata', None)\n clear_ob_data_set = SmartDataSet(str(clear_ob_file_path), 'w', **clear_ob_profile)\n\n dataset_profile = profile.copy()\n dataset_profile['dtype'] = DATASOURCE_ATTRIBUTES['data_type']\n dataset_profile['nodata'] = DATASOURCE_ATTRIBUTES['nodata']\n\n if is_combined_collection:\n datasets = activity['datasets']\n tags = {dataset: value for value, dataset in enumerate(datasets)}\n\n datasource = SmartDataSet(str(dataset_file_path), 'w', tags=tags, **dataset_profile)\n datasource.dataset.write(numpy.full((height, width),\n fill_value=DATASOURCE_ATTRIBUTES['nodata'],\n dtype=DATASOURCE_ATTRIBUTES['data_type']), indexes=1)\n\n provenance_array = numpy.full((height, width), dtype=numpy.int16, fill_value=-1)\n\n for _, window in tilelist:\n # Build the stack to store all images as a masked array. At this stage the array will contain the masked data\n stackMA = numpy.ma.zeros((numscenes, window.height, window.width), dtype=numpy.int16)\n\n notdonemask = numpy.ones(shape=(window.height, window.width), dtype=numpy.bool_)\n\n if build_clear_observation and is_combined_collection:\n data_set_block = numpy.full((window.height, window.width),\n fill_value=DATASOURCE_ATTRIBUTES['nodata'],\n dtype=DATASOURCE_ATTRIBUTES['data_type'])\n\n row_offset = window.row_off + window.height\n col_offset = window.col_off + window.width\n\n # For all pair (quality,band) scenes\n for order in range(numscenes):\n # Read both chunk of Merge and Quality, respectively.\n ssrc = bandlist[order]\n msrc = masklist[order]\n raster = ssrc.read(1, window=window)\n mask = msrc.read(1, window=window)\n copy_mask = numpy.array(mask, copy=True)\n\n # Mask valid data (0 and 1) as True\n mask[mask < 2] = 1\n mask[mask == 3] = 1\n # Mask cloud/snow/shadow/no-data as False\n mask[mask >= 2] = 0\n # Ensure that Raster noda value (-9999 maybe) is set to False\n mask[raster == nodata] = 0\n\n # Create an inverse mask value in order to pass to numpy masked array\n # True => nodata\n bmask = numpy.invert(mask.astype(numpy.bool_))\n\n # Use the mask to mark the fill (0) and cloudy (2) pixels\n stackMA[order] = numpy.ma.masked_where(bmask, raster)\n\n # Copy Masked values in order to stack total observation\n copy_mask[copy_mask <= 4] = 1\n copy_mask[copy_mask >= 5] = 0\n\n stack_total_observation[window.row_off: row_offset, window.col_off: col_offset] += copy_mask.astype(numpy.uint8)\n\n # Get current observation file name\n file_name = Path(bandlist[order].name).stem\n file_date = datetime.strptime(file_name.split('_')[4], '%Y-%m-%d')\n day_of_year = file_date.timetuple().tm_yday\n\n # Find all no data in destination STACK image\n stack_raster_where_nodata = numpy.where(\n stack_raster[window.row_off: row_offset, window.col_off: col_offset] == nodata\n )\n\n # Turns into a 1-dimension\n stack_raster_nodata_pos = numpy.ravel_multi_index(stack_raster_where_nodata,\n stack_raster[window.row_off: row_offset,\n window.col_off: col_offset].shape)\n\n if build_clear_observation and is_combined_collection:\n datasource_block = provenance_merge_map[file_date.strftime('%Y-%m-%d')].dataset.read(1, window=window)\n\n # Find all valid/cloud in destination STACK image\n raster_where_data = numpy.where(raster != nodata)\n raster_data_pos = numpy.ravel_multi_index(raster_where_data, raster.shape)\n\n # Match stack nodata values with observation\n # stack_raster_where_nodata && raster_where_data\n intersect_ravel = numpy.intersect1d(stack_raster_nodata_pos, raster_data_pos)\n\n if len(intersect_ravel):\n where_intersec = numpy.unravel_index(intersect_ravel, raster.shape)\n stack_raster[window.row_off: row_offset, window.col_off: col_offset][where_intersec] = raster[where_intersec]\n\n provenance_array[window.row_off: row_offset, window.col_off: col_offset][where_intersec] = day_of_year\n\n if build_clear_observation and is_combined_collection:\n data_set_block[where_intersec] = datasource_block[where_intersec]\n\n # Identify what is needed to stack, based in Array 2d bool\n todomask = notdonemask * numpy.invert(bmask)\n\n # Find all positions where valid data matches.\n clear_not_done_pixels = numpy.where(numpy.logical_and(todomask, mask.astype(numpy.bool)))\n\n # Override the STACK Raster with valid data.\n stack_raster[window.row_off: row_offset, window.col_off: col_offset][clear_not_done_pixels] = raster[\n clear_not_done_pixels]\n\n # Mark day of year to the valid pixels\n provenance_array[window.row_off: row_offset, window.col_off: col_offset][\n clear_not_done_pixels] = day_of_year\n\n if build_clear_observation and is_combined_collection:\n data_set_block[clear_not_done_pixels] = datasource_block[clear_not_done_pixels]\n\n # Update what was done.\n notdonemask = notdonemask * bmask\n\n median = numpy.ma.median(stackMA, axis=0).data\n\n median[notdonemask.astype(numpy.bool_)] = nodata\n\n median_raster[window.row_off: row_offset, window.col_off: col_offset] = median.astype(profile['dtype'])\n\n if build_clear_observation:\n count_raster = numpy.ma.count(stackMA, axis=0)\n\n clear_ob_data_set.dataset.write(count_raster.astype(clear_ob_profile['dtype']), window=window, indexes=1)\n\n if is_combined_collection:\n datasource.dataset.write(data_set_block, window=window, indexes=1)\n\n # Close all input dataset\n for order in range(numscenes):\n bandlist[order].close()\n masklist[order].close()\n\n # Evaluate cloud cover\n efficacy, cloudcover = _qa_statistics(stack_raster)\n\n profile.update({\n 'compress': 'LZW',\n 'tiled': True,\n 'interleave': 'pixel',\n })\n\n # Since count no cloud operator is specific for a band, we must ensure to manipulate data set only\n # for band clear observation to avoid concurrent processes write same data set in disk.\n # TODO: Review how to design it to avoid these IF's statement, since we must stack data set and mask dummy values\n if build_clear_observation:\n clear_ob_data_set.close()\n logging.warning('Clear Observation (ClearOb) file generated successfully.')\n\n total_observation_file = build_cube_path(datacube, period, tile_id, version=version, band=TOTAL_OBSERVATION_NAME)\n total_observation_profile = profile.copy()\n total_observation_profile.pop('nodata', None)\n total_observation_profile['dtype'] = 'uint8'\n\n save_as_cog(str(total_observation_file), stack_total_observation, block_size=block_size, **total_observation_profile)\n generate_cogs(str(clear_ob_file_path), str(clear_ob_file_path), block_size=block_size)\n\n activity['clear_observation_file'] = str(clear_ob_data_set.path)\n activity['total_observation'] = str(total_observation_file)\n\n cube_function = DataCubeFragments(datacube).composite_function\n\n if cube_function == 'MED':\n # Close and upload the MEDIAN dataset\n save_as_cog(str(cube_file), median_raster, block_size=block_size, mode='w', **profile)\n else:\n save_as_cog(str(cube_file), stack_raster, block_size=block_size, mode='w', **profile)\n\n if build_clear_observation:\n provenance_file = build_cube_path(datacube, period, tile_id, version=version, band=PROVENANCE_NAME)\n provenance_profile = profile.copy()\n provenance_profile.pop('nodata', -1)\n provenance_profile['dtype'] = PROVENANCE_ATTRIBUTES['data_type']\n\n save_as_cog(str(provenance_file), provenance_array, block_size=block_size, **provenance_profile)\n activity['provenance'] = str(provenance_file)\n\n if is_combined_collection:\n datasource.close()\n generate_cogs(str(dataset_file_path), str(dataset_file_path), block_size=block_size)\n activity['datasource'] = str(dataset_file_path)\n\n activity['blends'] = {\n cube_function: str(cube_file)\n }\n\n activity['efficacy'] = efficacy\n activity['cloudratio'] = cloudcover\n\n return activity", "def filter_samples(self, is_valid_file=None, extensions=('.png', '.jpg', '.jpeg')):\n super().filter_samples(is_valid_file, extensions)\n self.samples = [filename for filename in self.samples if filename.startswith(self.root_in_archive)]", "def unpack_clear(self):\n if (not os.path.exists(self.unpack_path)):\n VERBOSE(\"no image found: %s\" % self.unpack_path)\n else:\n if (not os.path.isdir(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but is not a directory\"\n % self.unpack_path)\n if (not self.unpacked_p(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but does not appear to be an image\"\n % self.unpack_path)\n VERBOSE(\"removing existing image: %s\" % self.unpack_path)\n rmtree(self.unpack_path)", "def filterStudioImport(includeCameras=bool, includeLights=bool, convertShellToPoly=bool, transferDirectoryName=\"string\"):\n pass", "def tests(img1, img2, img3):\n with rasterio.open(img1) as src:\n with rasterio.open(img2) as clip:\n with rasterio.open(img3) as sharp:\n # clipped image must have smaller dimensions than input image\n if src.height > clip.height and src.width > clip.width:\n print(\"clip test passed\")\n else:\n print(\"clip test is failed\")\n # sharpened image must have single band and float 32 type\n if sharp.count == 1 and sharp.dtypes[0] == 'float32':\n print(\"sharpen test is passed\")\n else:\n print(\"sharpen test is failed\")", "def flatten(self):\n if self.zernike_mirror is not None:\n self.zernike_mirror.flatten()\n if self.ripple_mirror is not None:\n self.ripple_mirror.flatten()\n if self.dm is not None:\n self.dm.flatten()", "def main() -> bool:\n # get input and output folders\n path_photos, path_export = prompt_path()\n if path_photos == \"\" or path_export == \"\":\n return False\n\n # create logger\n logger = logging.getLogger()\n logger.handlers.clear()\n f_handler = logging.FileHandler(\n filename=path_photos + divider + \"opt_and_build.log\", mode=\"a\"\n )\n f_formatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n f_handler.setFormatter(f_formatter)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(f_handler)\n\n # get all psx files\n psx_list = filter(lambda x: x.lower()[-3::] == \"psx\", os.listdir(path_photos))\n\n logger.info(\"starting opt_and_build\")\n\n for psx in psx_list:\n logger.info(psx)\n doc = meta.app.document\n doc.open(path_photos + divider + psx)\n chunk = doc.chunk\n\n # optimize cameras\n chunk.optimizeCameras()\n\n # delete points outside bounding box\n # https://www.agisoft.com/forum/index.php?topic=9030.0\n R = chunk.region.rot # Bounding box rotation matrix\n C = chunk.region.center # Bounding box center vector\n size = chunk.region.size\n if not (chunk.point_cloud and chunk.enabled):\n continue\n elif not chunk.point_cloud.points:\n continue\n for point in chunk.point_cloud.points:\n if point.valid:\n v = point.coord\n v.size = 3\n v_c = v - C\n v_r = R.t() * v_c\n if abs(v_r.x) > abs(size.x / 2.0):\n point.valid = False\n elif abs(v_r.y) > abs(size.y / 2.0):\n point.valid = False\n elif abs(v_r.z) > abs(size.z / 2.0):\n point.valid = False\n else:\n continue\n\n # read reprojection Error and delete any 0.5 or greater\n f = meta.PointCloud.Filter()\n f.init(chunk, criterion=meta.PointCloud.Filter.ReprojectionError)\n f.removePoints(0.5)\n\n try:\n # building dense cloud\n chunk.buildDepthMaps(downscale=DOWNSCALE, filter_mode=FILTERING)\n chunk.buildDenseCloud(point_colors=True)\n # saving\n doc.save(path_export + divider + psx)\n doc.open(path_export + divider + psx)\n chunk = doc.chunk\n message = psx + \": saved after dense cloud\"\n logger.info(message)\n\n except RuntimeError as r_err:\n message = psx + \": error during dense cloud: \" + str(r_err)\n print(message)\n logger.error(message)\n # issue with this project so moving to next\n continue\n\n # building mesh\n try:\n chunk.buildModel(\n surface_type=SURFACE,\n interpolation=INTERPOLATION,\n face_count=FACE_NUM,\n volumetric_masks=VOLUMETRIC_MASKS,\n )\n doc.save(path_export + divider + psx)\n doc.open(path_export + divider + psx)\n chunk = doc.chunk\n message = psx + \": saved after build model\"\n logger.info(message)\n except RuntimeError as r_err:\n message = psx + \": error during build model: \" + str(r_err)\n print(message)\n logger.error(message)\n continue\n\n # saving\n try:\n # build texture\n chunk.buildUV(mapping_mode=MAPPING, page_count=1)\n chunk.buildTexture(blending_mode=BLENDING, texture_size=ATLAS_SIZE)\n doc.save(path_export + divider + psx)\n print(\"saved \", psx, \" after build texture\")\n message = psx + \": saved after build texture\"\n logger.info(message)\n\n except RuntimeError as r_err:\n message = psx + \": error during build texture: \" + str(r_err)\n print(message)\n logger.error(message)\n\n return True", "def test_filter_files_by_archive_false(populated_store: Store):\n\n # GIVEN as store with files\n\n # WHEN filtering on non-archived files\n non_archived_files_query: Query = filter_files_by_is_archived(\n files=populated_store._get_join_file_tags_archive_query(),\n is_archived=False,\n )\n\n # THEN none of the files returned should have an archive object linked to it\n for file in non_archived_files_query:\n assert file.archive is None", "def test_filter_files_by_archive_true(populated_store: Store):\n\n # GIVEN as store with files\n\n # WHEN filtering by archived files\n archived_files_query: Query = filter_files_by_is_archived(\n files=populated_store._get_join_file_tags_archive_query(),\n is_archived=True,\n )\n\n # THEN all files returned should have an archive object linked to it\n for file in archived_files_query:\n assert file.archive", "def test_has_alpha():\n with rasterio.open(S3_ALPHA_PATH) as src_dst:\n assert utils.has_alpha_band(src_dst)\n\n with rasterio.open(COG_DST) as src_dst:\n assert not utils.has_alpha_band(src_dst)", "def test_project_sensors_onto_inflated(tmp_path):\n pytest.importorskip(\"nibabel\")\n raw = mne.io.read_raw_fif(fname_raw)\n trans = _get_trans(fname_trans)[0]\n for subject in (\"sample\", \"fsaverage\"):\n os.makedirs(tmp_path / subject / \"surf\", exist_ok=True)\n for hemi in (\"lh\", \"rh\"):\n # fake white surface for pial\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.white\",\n tmp_path / subject / \"surf\" / f\"{hemi}.pial\",\n )\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.curv\",\n tmp_path / subject / \"surf\" / f\"{hemi}.curv\",\n )\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.inflated\",\n tmp_path / subject / \"surf\" / f\"{hemi}.inflated\",\n )\n if subject == \"fsaverage\":\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.cortex.patch.flat\",\n tmp_path / subject / \"surf\" / f\"{hemi}.cortex.patch.flat\",\n )\n copyfile(\n subjects_dir / subject / \"surf\" / f\"{hemi}.sphere\",\n tmp_path / subject / \"surf\" / f\"{hemi}.sphere\",\n )\n # now make realistic sEEG locations, picked from T1\n raw.pick_types(meg=False, eeg=True)\n raw.load_data()\n raw.set_eeg_reference([])\n raw.set_channel_types({ch: \"seeg\" for ch in raw.ch_names})\n pos = (\n np.array(\n [\n [25.85, 9.04, -5.38],\n [33.56, 9.04, -5.63],\n [40.44, 9.04, -5.06],\n [46.75, 9.04, -6.78],\n [-30.08, 9.04, 28.23],\n [-32.95, 9.04, 37.99],\n [-36.39, 9.04, 46.03],\n ]\n )\n / 1000\n )\n raw.drop_channels(raw.ch_names[len(pos) :])\n raw.set_montage(\n mne.channels.make_dig_montage(\n ch_pos=dict(zip(raw.ch_names, pos)), coord_frame=\"head\"\n )\n )\n proj_info = _project_sensors_onto_inflated(\n raw.info, trans, \"sample\", subjects_dir=tmp_path\n )\n assert_allclose(\n proj_info[\"chs\"][0][\"loc\"][:3],\n np.array([0.0555809, 0.0034069, -0.04593032]),\n rtol=0.01,\n )\n # check all on inflated surface\n x_dir = np.array([1.0, 0.0, 0.0])\n head_mri_t = mne.transforms.invert_transform(trans) # need head->mri\n for hemi in (\"lh\", \"rh\"):\n coords, faces = mne.surface.read_surface(\n tmp_path / \"sample\" / \"surf\" / f\"{hemi}.inflated\"\n )\n x_ = coords @ x_dir\n coords -= np.max(x_) * x_dir if hemi == \"lh\" else np.min(x_) * x_dir\n coords /= 1000 # mm -> m\n for ch in proj_info[\"chs\"]:\n loc = ch[\"loc\"][:3]\n if not np.isnan(loc).any() and (loc[0] <= 0) == (hemi == \"lh\"):\n assert (\n np.linalg.norm(\n coords - mne.transforms.apply_trans(head_mri_t, loc), axis=1\n ).min()\n < 1e-16\n )\n\n # test flat map\n montage = raw.get_montage()\n montage.apply_trans(mne.transforms.invert_transform(trans))\n mri_mni_t = mne.read_talxfm(\"sample\", subjects_dir)\n montage.apply_trans(mri_mni_t) # mri to mni_tal (MNI Taliarach)\n montage.apply_trans(\n mne.transforms.Transform(fro=\"mni_tal\", to=\"mri\", trans=np.eye(4))\n )\n raw.set_montage(montage)\n trans = mne.channels.compute_native_head_t(montage)\n\n flat_proj_info = _project_sensors_onto_inflated(\n raw.info,\n trans=trans,\n subject=\"fsaverage\",\n subjects_dir=tmp_path,\n flat=True,\n )\n\n # check all on flat surface\n x_dir = np.array([1.0, 0.0, 0.0])\n head_mri_t = mne.transforms.invert_transform(trans) # need head->mri\n for hemi in (\"lh\", \"rh\"):\n coords, faces, _ = mne.surface._read_patch(\n tmp_path / \"fsaverage\" / \"surf\" / f\"{hemi}.cortex.patch.flat\"\n )\n coords = coords[:, [1, 0, 2]]\n coords[:, 1] *= -1\n x_ = coords @ x_dir\n coords -= np.max(x_) * x_dir if hemi == \"lh\" else np.min(x_) * x_dir\n coords /= 1000 # mm -> m\n for ch in flat_proj_info[\"chs\"]:\n loc = ch[\"loc\"][:3]\n if not np.isnan(loc).any() and (loc[0] <= 0) == (hemi == \"lh\"):\n assert (\n np.linalg.norm(\n coords - mne.transforms.apply_trans(head_mri_t, loc), axis=1\n ).min()\n < 1e-16\n )\n\n # plot to check\n # brain = mne.viz.Brain('fsaverage', subjects_dir=tempdir, alpha=0.5,\n # surf='flat')\n # brain.add_sensors(flat_proj_info, trans=trans)", "def scene_file_downloaded(scenes, data_path, filetype, dataset=\"landsat_ot_c2_l2\"):\n un_finised_scenes = []\n zero_bites = []\n\n exts = {\n \"landsat_ot_c2_l1\": [\".jpg\", \".tar\", \"_ANG.txt\", \"_B1.TIF\", \"_B10.TIF\", \"_B11.TIF\", \"_B2.TIF\", \"_B3.TIF\", \"_B4.TIF\", \"_B5.TIF\", \"_B6.TIF\", \"_B7.TIF\", \"_B8.TIF\", \"_B9.TIF\", \"_MTL.txt\", \"_MTL.xml\", \"_QA_PIXEL.TIF\", \"_QA_RADSAT.TIF\", \"_QB.jpg\", \"_qb.tif\", \"_refl.tif\", \"_SAA.TIF\", \"_SZA.TIF\", \"_TIR.jpg\", \"_tir.tif\", \"_VAA.TIF\", \"_VZA.TIF\"],\n \"landsat_ot_c2_l2\": [\"_ANG.txt\", \"_MTL.txt\", \"_MTL.xml\", \"_QA_PIXEL.TIF\", \"_QA_RADSAT.TIF\", \"_SR_B1.TIF\", \"_SR_B2.TIF\", \"_SR_B3.TIF\", \"_SR_B4.TIF\", \"_SR_B5.TIF\", \"_SR_B6.TIF\", \"_SR_B7.TIF\", \"_SR_QA_AEROSOL.TIF\", \"_ST_ATRAN.TIF\", \"_ST_B10.TIF\", \"_ST_CDIST.TIF\", \"_ST_DRAD.TIF\", \"_ST_EMIS.TIF\", \"_ST_EMSD.TIF\", \"_ST_QA.TIF\", \"_ST_TRAD.TIF\", \"_ST_URAD.TIF\"]\n }\n\n all_extensions = exts[dataset.lower()]\n if filetype == 'band':\n # ext_remove = [\".jpg\", \".tar\", \"_QB.jpg\", \"_TIR.jpg\", \"_qb.tif\", \"_refl.tif\", \"_tir.tif\"]\n # all_extensions = [ext for ext in all_extensions if ext not in ext_remove]\n all_extensions = all_extensions\n else:\n all_extensions = all_extensions\n for scene in scenes:\n for ext in all_extensions:\n file_path = os.path.join(data_path, scene + ext)\n if os.path.isfile(file_path) and os.stat(file_path).st_size == 0:\n zero_bites.append(scene, file_path)\n if not os.path.isfile(file_path):\n un_finised_scenes.append(scene + ext)\n return un_finised_scenes, zero_bites", "def clip_scenes_if_not_clipped(source_paths, bounds_tuple, filter_nan, tempdir):\n clipped_scene_paths = [Path(p) for p in tempdir.glob(\"*clipped*\")]\n resampled_scene_paths = [Path(p) for p in tempdir.glob(\"*resampled*\")]\n\n if clipped_scene_paths == [] and resampled_scene_paths == []:\n print(\"starting clipping\")\n\n batches = eio.batches_from(source_paths, 16)\n\n batch_results = []\n\n for batch in batches:\n\n batch_result = dask.delayed(eio.clip_and_save)(batch, bounds_tuple, filter_nan, outDir=tempdir)\n batch_results.append(batch_result)\n\n result_futures = client.compute(batch_results, scheduler='processes')\n\n clipped_scene_batches = [i.result() for i in result_futures]# gets rid of None that denotes too little scene overlap\n clipped_scene_paths = []\n for batch in clipped_scene_batches:\n for path in batch:\n if path != None:\n clipped_scene_paths.append(Path(path))\n print(\"done clipping\")\n return clipped_scene_paths\n else:\n return clipped_scene_paths" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if the Archiver.flatten() is working properly for a scene with textures, audio etc. external files
def test_flatten_is_working_properly_for_external_files( create_test_data, trash_bin, create_pymel, create_maya_env ): data = create_test_data maya_env = create_maya_env pm = create_pymel # open data["version7"] maya_env.open(data["version7"], force=True) # create an image file at the project root image_filename = "test.jpg" image_path = os.path.join(data["version7"].absolute_path, "Textures") image_full_path = os.path.join(image_path, image_filename) # create the file os.makedirs(image_path, exist_ok=True) with open(image_full_path, "w+") as f: f.writelines([""]) audio_filename = "test.wav" audio_path = os.path.join(data["version7"].absolute_path, "sound") audio_full_path = os.path.join(audio_path, audio_filename) # create the file os.makedirs(audio_path, exist_ok=True) with open(audio_full_path, "w+") as f: f.writelines([""]) # create one image and one audio node pm.createNode("file").attr("fileTextureName").set(image_full_path) pm.createNode("audio").attr("filename").set(audio_full_path) # save it # replace external paths maya_env.replace_external_paths() pm.saveFile() # renew the scene pm.newFile(force=1) # create an archiver arch = Archiver() project_path = arch.flatten([data["version7"].absolute_full_path]) trash_bin.append(project_path) # now check if we have the files under the path/scenes directory archived_version7_path = os.path.join( project_path, "scenes", data["version7"].filename ) archived_image_path = os.path.join(project_path, "sourceimages", image_filename) assert os.path.exists(archived_version7_path) assert os.path.exists(archived_image_path) # open the archived version1 pm.workspace.open(project_path) pm.openFile(archived_version7_path) # and image files ref_image_path = pm.ls(type="file")[0].attr("fileTextureName").get() assert ref_image_path == os.path.join(project_path, "sourceimages", image_filename) ref_audio_path = pm.ls(type="audio")[0].attr("filename").get() assert ref_audio_path, os.path.join(project_path, "sound", audio_filename)
[ "def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin):\n data = create_test_data\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # the returned path should be a maya project directory\n assert os.path.exists(project_path)\n\n # there should be a workspace.mel file\n assert os.path.exists(os.path.join(project_path, \"workspace.mel\"))\n\n # there should be a maya scene file under path/scenes with the same\n # name of the source file\n assert os.path.exists(\n os.path.join(project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename)\n )", "def test_flatten_is_working_properly_with_exclude_mask(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"..\", \"sourceimages\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver(exclude_mask=[\".png\", \".jpg\", \".tga\"])\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n # and references under path/scenes/refs path\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n archived_audio_path = os.path.join(project_path, \"sound\", audio_filename)\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n # jpg should not be included\n assert not os.path.exists(archived_image_path)\n assert os.path.exists(archived_audio_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path\n\n # and deeper level files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n # the path of the jpg should be intact\n assert ref_image_path == \"$REPOTPR/TP/Test_Task_1/sourceimages/test.jpg\"\n\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path == os.path.join(project_path, \"sound\", audio_filename)", "def test_flatten_is_working_properly_with_only_one_level_of_multiple_references_to_the_same_file(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] more than once to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have three references\n all_refs = pm.listReferences()\n assert len(all_refs) == 3\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[1]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[2]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_with_multiple_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path", "def test_flatten_is_working_properly_with_only_one_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_will_restore_the_current_workspace(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n current_workspace = pm.workspace.path\n\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if the current workspace is intact\n assert current_workspace == pm.workspace.path", "def prepareSnapshotScene(self):\n if self.debugMode:\n return\n\n createPaths(self.htotTempDir)\n\n # Mantra # WATCHME\n if self.renderer == 'Mantra':\n self.outputDriver.parm('soho_outputmode').set(True)\n self.outputDriver.parm('soho_diskfile').set(self.archiveOutput)\n self.outputDriver.parm('vm_inlinestorage').set(True)\n self.outputDriver.parm('vm_binarygeometry').set(self.binaryArchives)\n # ifdsDir = os.path.join(self.htotTempDir, 'ifds', 'storage')\n # hou.parm('{}/vm_tmpsharedstorage'.format(self.outputDriverPath)).set(ifdsDir)\n\n hou.hipFile.save(self.snapshotScene, save_to_recent_files=False)\n\n self.outputDriver.parm('soho_outputmode').set(False)\n hou.hipFile.save(self.sceneFile)\n\n # Renderman\n elif self.renderer == 'Renderman':\n self.outputDriver.parm('ri_makedir_0').set(True)\n self.outputDriver.parm('ri_device_0').set('openexr')\n originalOutputName = self.outputDriver.parm('ri_display_0').unexpandedString()\n correctOutputName = originalOutputName.replace('$HIPNAME', self.sceneFileName)\n self.outputDriver.parm('ri_display_0').set(correctOutputName)\n\n self.outputDriver.parm('diskfile').set(bool(self.archivesGeneration))\n self.outputDriver.parm('binaryrib').set(self.binaryArchives)\n self.outputDriver.parm('soho_diskfile').set(self.archiveOutput)\n\n hou.hipFile.save(self.snapshotScene, save_to_recent_files=False)\n\n self.outputDriver.parm('ri_display_0').set(originalOutputName)\n self.outputDriver.parm('diskfile').set(False)\n hou.hipFile.save(self.sceneFile)\n\n # Arnold # WATCHME\n elif self.renderer == 'Arnold':\n self.outputDriver.parm('ar_ass_export_enable').set(True)\n self.outputDriver.parm('ar_ass_file').set(self.archiveOutput)\n self.outputDriver.parm('ar_binary_ass').set(self.binaryArchives)\n\n hou.hipFile.save(self.snapshotScene, save_to_recent_files=False)\n\n self.outputDriver.parm('ar_ass_export_enable').set(False)\n hou.hipFile.save(self.sceneFile)", "def scene_file(self):\n if self.is_standalone and not self.upload_only:\n return self._standalone_scene_file_prefix + '*.ass'\n return self.original_scene_file", "def saveAsset(self, assetName, exportUV=True, exportOBJ=True, exportFBX=True, exportABC=True, selectionOnly=True, sceneFormat=\"max\", notes=\"N/A\", **info):\n # self.ssResolution = 1000\n if assetName == \"\":\n msg = \"Asset Name cannot be empty\"\n state = rt.messageBox(msg, title='Info')\n return\n\n if assetName in self.assetsList:\n msg = \"This Asset already exists.\\nDo you want to overwrite?\"\n state = rt.queryBox( msg, title='Manager Question')\n if state:\n pass\n else:\n return\n\n\n originalSelection = self._getSelection(asMaxArray=True)\n originalPath = self._getSceneFile()\n\n dump, origExt = os.path.splitext(originalPath)\n\n assetDirectory = os.path.join(self.directory, assetName)\n\n assetAbsPath = os.path.join(assetDirectory, \"%s%s\" %(assetName, u'.%s'%sceneFormat))\n\n\n if selectionOnly:\n selection = self._getSelection(asMaxArray=True)\n if len(selection) == 0:\n msg = \"No object selected\"\n rt.messageBox(msg, title='Info')\n return\n else:\n rt.select(rt.objects)\n selection = self._getSelection(asMaxArray=True)\n\n\n # originalSelection = self._getSelection(asMaxArray=True)\n\n\n if not os.path.exists(assetDirectory):\n os.mkdir(assetDirectory)\n\n # GET TEXTURES\n # ------------\n\n if selectionOnly:\n\n possibleFileHolders = rt.execute(\"selection as Array\")\n filteredBitmaps = self._getFileNodes(possibleFileHolders)\n\n else:\n allTexture = rt.usedMaps()\n allBitmap = rt.getClassInstances(rt.bitmapTexture)\n # this makes sure only the USED bitmaps will stored\n filteredBitmaps = [x for x in allBitmap if x.filename in allTexture]\n\n\n textureDatabase = [x for x in self._buildPathDatabase(filteredBitmaps, assetDirectory)]\n\n self._copyTextures(textureDatabase)\n\n # CREATE PREVIEWS\n # ---------------\n thumbPath, ssPath, swPath = self._createThumbnail(assetName, selectionOnly=selectionOnly, viewFit=True)\n\n # CREATE UV SNAPSHOTS\n # ----------------\n rt.select(selection)\n\n if exportUV:\n self._uvSnaps(assetName)\n\n # SAVE SOURCE\n # -----------\n fManager.SaveSelected(assetAbsPath)\n\n # EXPORT OBJ\n # ----------\n\n if exportOBJ:\n objFilePath = os.path.join(assetDirectory, \"%s.obj\" %assetName)\n if self._exportObj(objFilePath, exportSettings=self.exportSettings):\n objName = \"{0}.obj\".format(assetName)\n else:\n objName = \"N/A\"\n else:\n objName = \"N/A\"\n\n # EXPORT FBX\n # ----------\n if exportFBX:\n fbxFilePath = os.path.join(assetDirectory, \"%s.fbx\" %assetName)\n frame = self._getCurrentFrame()\n\n if self._exportFbx(fbxFilePath, exportSettings=self.exportSettings, timeRange=[frame, frame]):\n fbxName = \"{0}.fbx\".format(assetName)\n else:\n fbxName = \"N/A\"\n else:\n fbxName = \"N/A\"\n\n # EXPORT ALEMBIC\n # --------------\n\n if exportABC:\n\n abcFilePath = os.path.join(assetDirectory, \"%s.abc\" % assetName)\n frame = self._getCurrentFrame()\n\n if self._exportAlembic(abcFilePath, exportSettings=self.exportSettings, timeRange=[frame, frame]):\n abcName = \"{0}.abc\".format(assetName)\n else:\n abcName = \"N/A\"\n else:\n abcName = \"N/A\"\n\n # NUMERIC DATA\n # ------------\n\n polyCount = sum(rt.getPolygonCount(x)[0] for x in selection)\n # polyCount = sum(rt.getPolygonCount(x)[0] for x in countLoop)\n tiangleCount = sum(rt.getPolygonCount(x)[1] for x in selection)\n # tiangleCount = sum(rt.getPolygonCount(x)[1] for x in countLoop)\n\n versionInfo = rt.maxversion()\n vInfo = [versionInfo[0], versionInfo[1], versionInfo[2]]\n\n # DATABASE\n # --------\n\n dataDict = {}\n dataDict['sourceProject'] = \"3dsMax\"\n dataDict['version'] = vInfo\n dataDict['assetName'] = assetName\n dataDict['objPath'] = objName\n dataDict['fbxPath'] = fbxName\n dataDict['abcPath'] = abcName\n dataDict['sourcePath'] = os.path.basename(assetAbsPath)\n dataDict['thumbPath'] = os.path.basename(thumbPath)\n dataDict['ssPath'] = os.path.basename(ssPath)\n dataDict['swPath'] = os.path.basename(swPath)\n dataDict['textureFiles'] = [x[\"Texture\"] for x in textureDatabase]\n dataDict['Faces/Triangles'] = (\"%s/%s\" % (str(polyCount), str(tiangleCount)))\n dataDict['origin'] = originalPath\n dataDict['notes'] = notes\n\n self._setData(assetName, dataDict)\n\n rt.clearSelection()\n self._returnOriginal(textureDatabase)\n # self.scanAssets() # scanning issued at populate function on ui class\n rt.select(originalSelection)\n rt.messageBox(\"Asset Created Successfully\", title='Info', beep=False)", "def scene_file_downloaded(scenes, data_path, filetype, dataset=\"landsat_ot_c2_l2\"):\n un_finised_scenes = []\n zero_bites = []\n\n exts = {\n \"landsat_ot_c2_l1\": [\".jpg\", \".tar\", \"_ANG.txt\", \"_B1.TIF\", \"_B10.TIF\", \"_B11.TIF\", \"_B2.TIF\", \"_B3.TIF\", \"_B4.TIF\", \"_B5.TIF\", \"_B6.TIF\", \"_B7.TIF\", \"_B8.TIF\", \"_B9.TIF\", \"_MTL.txt\", \"_MTL.xml\", \"_QA_PIXEL.TIF\", \"_QA_RADSAT.TIF\", \"_QB.jpg\", \"_qb.tif\", \"_refl.tif\", \"_SAA.TIF\", \"_SZA.TIF\", \"_TIR.jpg\", \"_tir.tif\", \"_VAA.TIF\", \"_VZA.TIF\"],\n \"landsat_ot_c2_l2\": [\"_ANG.txt\", \"_MTL.txt\", \"_MTL.xml\", \"_QA_PIXEL.TIF\", \"_QA_RADSAT.TIF\", \"_SR_B1.TIF\", \"_SR_B2.TIF\", \"_SR_B3.TIF\", \"_SR_B4.TIF\", \"_SR_B5.TIF\", \"_SR_B6.TIF\", \"_SR_B7.TIF\", \"_SR_QA_AEROSOL.TIF\", \"_ST_ATRAN.TIF\", \"_ST_B10.TIF\", \"_ST_CDIST.TIF\", \"_ST_DRAD.TIF\", \"_ST_EMIS.TIF\", \"_ST_EMSD.TIF\", \"_ST_QA.TIF\", \"_ST_TRAD.TIF\", \"_ST_URAD.TIF\"]\n }\n\n all_extensions = exts[dataset.lower()]\n if filetype == 'band':\n # ext_remove = [\".jpg\", \".tar\", \"_QB.jpg\", \"_TIR.jpg\", \"_qb.tif\", \"_refl.tif\", \"_tir.tif\"]\n # all_extensions = [ext for ext in all_extensions if ext not in ext_remove]\n all_extensions = all_extensions\n else:\n all_extensions = all_extensions\n for scene in scenes:\n for ext in all_extensions:\n file_path = os.path.join(data_path, scene + ext)\n if os.path.isfile(file_path) and os.stat(file_path).st_size == 0:\n zero_bites.append(scene, file_path)\n if not os.path.isfile(file_path):\n un_finised_scenes.append(scene + ext)\n return un_finised_scenes, zero_bites", "def standalone_scene_file(self):\n return self._standalone_scene_file_prefix + '.ass'", "def unpack_clear(self):\n if (not os.path.exists(self.unpack_path)):\n VERBOSE(\"no image found: %s\" % self.unpack_path)\n else:\n if (not os.path.isdir(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but is not a directory\"\n % self.unpack_path)\n if (not self.unpacked_p(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but does not appear to be an image\"\n % self.unpack_path)\n VERBOSE(\"removing existing image: %s\" % self.unpack_path)\n rmtree(self.unpack_path)", "def test_create_default_project_workspace_mel_content_is_correct(\n create_test_data, trash_bin\n):\n arch = Archiver()\n tempdir = tempfile.gettempdir()\n\n project_path = arch.create_default_project(tempdir)\n trash_bin.append(project_path)\n\n workspace_mel_path = os.path.join(project_path, \"workspace.mel\")\n\n with open(workspace_mel_path) as f:\n content = f.read()\n\n expected_result = \"\"\"// Anima Archiver Default Project Definition\n\nworkspace -fr \"translatorData\" \"data\";\nworkspace -fr \"offlineEdit\" \"scenes/edits\";\nworkspace -fr \"renderData\" \"renderData\";\nworkspace -fr \"scene\" \"scenes\";\nworkspace -fr \"3dPaintTextures\" \"sourceimages/3dPaintTextures\";\nworkspace -fr \"eps\" \"data\";\nworkspace -fr \"OBJexport\" \"data\";\nworkspace -fr \"mel\" \"scripts\";\nworkspace -fr \"furShadowMap\" \"renderData/fur/furShadowMap\";\nworkspace -fr \"particles\" \"cache/particles\";\nworkspace -fr \"audio\" \"sound\";\nworkspace -fr \"scripts\" \"scripts\";\nworkspace -fr \"sound\" \"sound\";\nworkspace -fr \"DXF_FBX export\" \"data\";\nworkspace -fr \"furFiles\" \"renderData/fur/furFiles\";\nworkspace -fr \"depth\" \"renderData/depth\";\nworkspace -fr \"autoSave\" \"autosave\";\nworkspace -fr \"furAttrMap\" \"renderData/fur/furAttrMap\";\nworkspace -fr \"diskCache\" \"data\";\nworkspace -fr \"fileCache\" \"cache/nCache\";\nworkspace -fr \"ASS Export\" \"data\";\nworkspace -fr \"FBX export\" \"data\";\nworkspace -fr \"sourceImages\" \"sourceimages\";\nworkspace -fr \"FBX\" \"data\";\nworkspace -fr \"DAE_FBX export\" \"data\";\nworkspace -fr \"movie\" \"movies\";\nworkspace -fr \"Alembic\" \"data\";\nworkspace -fr \"DAE_FBX\" \"data\";\nworkspace -fr \"iprImages\" \"renderData/iprImages\";\nworkspace -fr \"mayaAscii\" \"scenes\";\nworkspace -fr \"furImages\" \"renderData/fur/furImages\";\nworkspace -fr \"furEqualMap\" \"renderData/fur/furEqualMap\";\nworkspace -fr \"illustrator\" \"data\";\nworkspace -fr \"DXF_FBX\" \"data\";\nworkspace -fr \"mayaBinary\" \"scenes\";\nworkspace -fr \"move\" \"data\";\nworkspace -fr \"images\" \"images\";\nworkspace -fr \"fluidCache\" \"cache/nCache/fluid\";\nworkspace -fr \"clips\" \"clips\";\nworkspace -fr \"ASS\" \"data\";\nworkspace -fr \"OBJ\" \"data\";\nworkspace -fr \"templates\" \"assets\";\nworkspace -fr \"shaders\" \"renderData/shaders\";\n\"\"\"\n assert content == expected_result", "def filter_samples(self, is_valid_file=None, extensions=('.png', '.jpg', '.jpeg')):\n super().filter_samples(is_valid_file, extensions)\n self.samples = [filename for filename in self.samples if filename.startswith(self.root_in_archive)]", "def test_save_and_restore(self):\n engine = self.e\n scene = self.scene\n src = scene.children[0]\n src.source = 'gaussian'\n src.data_source.maximum = 2.0\n src.data_source.standard_deviation = 15\n\n # Save visualization.\n f = BytesIO()\n f.name = abspath('test.mv2') # We simulate a file.\n engine.save_visualization(f)\n f.seek(0) # So we can read this saved data.\n\n # Remove existing scene.\n engine.close_scene(scene)\n\n # Load visualization\n engine.load_visualization(f)\n self.scene = engine.current_scene\n\n self.check()", "def test_save_and_restore(self):\n engine = self.e\n scene = self.scene\n\n # Save visualization.\n f = StringIO()\n f.name = abspath('test.mv2') # We simulate a file.\n engine.save_visualization(f)\n f.seek(0) # So we can read this saved data.\n\n # Remove existing scene.\n \n engine.close_scene(scene)\n\n # Load visualization\n engine.load_visualization(f)\n self.scene = engine.current_scene\n s = self.scene\n self.check()", "def display_data_scenes(nb_bits, p_scene):\n\n scenes = os.listdir(path)\n # remove min max file from scenes folder\n scenes = [s for s in scenes if min_max_filename not in s]\n\n # go ahead each scenes\n for folder_scene in scenes:\n\n if p_scene == folder_scene:\n print(folder_scene)\n scene_path = os.path.join(path, folder_scene)\n\n # construct each zones folder name\n zones_folder = []\n\n # get zones list info\n for index in zones:\n index_str = str(index)\n if len(index_str) < 2:\n index_str = \"0\" + index_str\n\n current_zone = \"zone\"+index_str\n zones_folder.append(current_zone)\n\n threshold_info = []\n\n for zone_folder in zones_folder:\n\n zone_path = os.path.join(scene_path, zone_folder)\n\n # get threshold information\n path_seuil = os.path.join(zone_path, seuil_expe_filename)\n\n # open treshold path and get this information\n with open(path_seuil, \"r\") as seuil_file:\n seuil_learned = int(seuil_file.readline().strip())\n threshold_info.append(seuil_learned)\n\n # compute mean threshold values\n mean_threshold = sum(threshold_info) / float(len(threshold_info))\n\n print(mean_threshold, \"mean threshold found\")\n threshold_image_found = False\n\n # get all images of folder\n scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])\n\n start_image_path = scene_images[0]\n end_image_path = scene_images[-1]\n\n start_quality_image = dt.get_scene_image_quality(scene_images[0])\n end_quality_image = dt.get_scene_image_quality(scene_images[-1])\n\n # for each images\n for img_path in scene_images:\n current_quality_image = dt.get_scene_image_quality(img_path)\n\n if mean_threshold < int(current_quality_image) and not threshold_image_found:\n\n threshold_image_found = True\n threshold_image_path = img_path\n\n threshold_image = dt.get_scene_image_quality(img_path)\n\n # all indexes of picture to plot\n images_path = [start_image_path, threshold_image_path, end_image_path]\n\n low_bits_svd_values = []\n\n for i in range(0, max_nb_bits - nb_bits + 1):\n\n low_bits_svd_values.append([])\n\n for img_path in images_path:\n\n current_img = Image.open(img_path)\n\n block_used = np.array(current_img)\n\n low_bits_block = transform.rgb_to_LAB_L_bits(block_used, (i + 1, i + nb_bits + 1))\n low_bits_svd = compression.get_SVD_s(low_bits_block)\n low_bits_svd = [b / low_bits_svd[0] for b in low_bits_svd]\n low_bits_svd_values[i].append(low_bits_svd)\n\n\n fig=plt.figure(figsize=(8, 8))\n fig.suptitle(\"Lab SVD \" + str(nb_bits) + \" bits values shifted for \" + p_scene + \" scene\", fontsize=20)\n\n for id, data in enumerate(low_bits_svd_values):\n fig.add_subplot(3, 3, (id + 1))\n plt.plot(data[0], label='Noisy_' + start_quality_image)\n plt.plot(data[1], label='Threshold_' + threshold_image)\n plt.plot(data[2], label='Reference_' + end_quality_image)\n plt.ylabel('Lab SVD ' + str(nb_bits) + ' bits values shifted by ' + str(id), fontsize=14)\n plt.xlabel('Vector features', fontsize=16)\n plt.legend(bbox_to_anchor=(0.5, 1), loc=2, borderaxespad=0.2, fontsize=14)\n plt.ylim(0, 0.1)\n plt.show()", "def load(path):\n\n # product_meta\n file_path = os.path.join(path,'product_meta.pkl')\n product_meta = pickle.load( open( file_path, \"rb\" ) )\n\n # unit\n file_path = os.path.join(path,'unit.pkl')\n unit = pickle.load( open( file_path, \"rb\" ) )\n\n # footprint\n file_path = os.path.join(path,'footprint.pkl')\n footprint = pickle.load(open( file_path, \"rb\" ) )\n\n # geo_tie_point\n file_path = os.path.join(path,'geo_tie_point.pkl')\n geo_tie_point= pickle.load( open( file_path, \"rb\" ) )\n\n # band_names\n file_path = os.path.join(path,'band_names.pkl')\n band_names = pickle.load(open( file_path, \"rb\" ) )\n\n # band_meta\n file_path = os.path.join(path,'band_meta.pkl')\n band_meta = pickle.load(open( file_path, \"rb\" ) )\n\n # bands\n file_path = os.path.join(path,'bands.pkl')\n bands = pickle.load(open( file_path, \"rb\" ) )\n\n # calibration_tables\n file_path = os.path.join(path,'calibration_tables.pkl')\n calibration_tables = pickle.load(open( file_path, \"rb\" ) )\n \n return SarImage(bands, mission=product_meta['mission'], time=product_meta['start_time'],\n footprint=footprint, product_meta=product_meta,\n band_names=band_names, calibration_tables=calibration_tables,\n geo_tie_point=geo_tie_point, band_meta=band_meta, unit=unit)", "def test_join_verts(self):\n # Test the result of rendering two tori with separate textures.\n # The expected result is consistent with rendering them each alone.\n torch.manual_seed(1)\n device = torch.device(\"cuda:0\")\n plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device)\n [verts] = plain_torus.verts_list()\n verts_shifted1 = verts.clone()\n verts_shifted1 *= 0.5\n verts_shifted1[:, 1] += 7\n\n faces = plain_torus.faces_list()\n textures1 = TexturesVertex(verts_features=[torch.rand_like(verts)])\n textures2 = TexturesVertex(verts_features=[torch.rand_like(verts)])\n mesh1 = Meshes(verts=[verts], faces=faces, textures=textures1)\n mesh2 = Meshes(verts=[verts_shifted1], faces=faces, textures=textures2)\n mesh = join_meshes_as_scene([mesh1, mesh2])\n\n R, T = look_at_view_transform(18, 0, 0)\n cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n\n raster_settings = RasterizationSettings(\n image_size=256, blur_radius=0.0, faces_per_pixel=1\n )\n\n lights = AmbientLights(device=device)\n blend_params = BlendParams(\n sigma=1e-1,\n gamma=1e-4,\n background_color=torch.tensor([1.0, 1.0, 1.0], device=device),\n )\n renderer = MeshRenderer(\n rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),\n shader=HardPhongShader(\n device=device, blend_params=blend_params, cameras=cameras, lights=lights\n ),\n )\n\n output = renderer(mesh)\n\n image_ref = load_rgb_image(\"test_joinverts_final.png\", DATA_DIR)\n\n if DEBUG:\n debugging_outputs = []\n for mesh_ in [mesh1, mesh2]:\n debugging_outputs.append(renderer(mesh_))\n Image.fromarray(\n (output[0, ..., :3].cpu().numpy() * 255).astype(np.uint8)\n ).save(DATA_DIR / \"test_joinverts_final_.png\")\n Image.fromarray(\n (debugging_outputs[0][0, ..., :3].cpu().numpy() * 255).astype(np.uint8)\n ).save(DATA_DIR / \"test_joinverts_1.png\")\n Image.fromarray(\n (debugging_outputs[1][0, ..., :3].cpu().numpy() * 255).astype(np.uint8)\n ).save(DATA_DIR / \"test_joinverts_2.png\")\n\n result = output[0, ..., :3].cpu()\n self.assertClose(result, image_ref, atol=0.05)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
testing if the Archiver.flatten() will restore the current workspace path after it has finished flattening
def test_flatten_will_restore_the_current_workspace( create_test_data, trash_bin, create_pymel, create_maya_env ): data = create_test_data maya_env = create_maya_env pm = create_pymel # open data["asset2_model_main_v001"] maya_env.open(data["asset2_model_main_v001"], force=True) current_workspace = pm.workspace.path arch = Archiver() project_path = arch.flatten([data["asset2_model_main_v001"].absolute_full_path]) trash_bin.append(project_path) # now check if the current workspace is intact assert current_workspace == pm.workspace.path
[ "def test_flatten_is_working_properly_with_no_references(create_test_data, trash_bin):\n data = create_test_data\n arch = Archiver()\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # the returned path should be a maya project directory\n assert os.path.exists(project_path)\n\n # there should be a workspace.mel file\n assert os.path.exists(os.path.join(project_path, \"workspace.mel\"))\n\n # there should be a maya scene file under path/scenes with the same\n # name of the source file\n assert os.path.exists(\n os.path.join(project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename)\n )", "def test_flatten_is_working_properly_with_multiple_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path", "def test_flatten_is_working_properly_with_only_one_level_of_references(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_with_only_one_level_of_multiple_references_to_the_same_file(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] more than once to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have two files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_path = os.path.join(\n project_path, archived_version4_unresolved_path\n )\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have three references\n all_refs = pm.listReferences()\n assert len(all_refs) == 3\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[1]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n ref = all_refs[2]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path", "def test_flatten_is_working_properly_for_external_files(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"Textures\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver()\n\n project_path = arch.flatten([data[\"version7\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version7_path = os.path.join(\n project_path, \"scenes\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n\n assert os.path.exists(archived_version7_path)\n assert os.path.exists(archived_image_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version7_path)\n\n # and image files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n assert ref_image_path == os.path.join(project_path, \"sourceimages\", image_filename)\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path, os.path.join(project_path, \"sound\", audio_filename)", "def unpack_clear(self):\n if (not os.path.exists(self.unpack_path)):\n VERBOSE(\"no image found: %s\" % self.unpack_path)\n else:\n if (not os.path.isdir(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but is not a directory\"\n % self.unpack_path)\n if (not self.unpacked_p(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but does not appear to be an image\"\n % self.unpack_path)\n VERBOSE(\"removing existing image: %s\" % self.unpack_path)\n rmtree(self.unpack_path)", "def _set_flatten_results(self):\n self._flatten_results = True\n for analysis in self._analyses:\n if isinstance(analysis, CompositeAnalysis):\n analysis._set_flatten_results()", "def test_flatten_is_working_properly_with_exclude_mask(\n create_test_data, trash_bin, create_pymel, create_maya_env\n):\n data = create_test_data\n maya_env = create_maya_env\n pm = create_pymel\n # open data[\"version7\"]\n maya_env.open(data[\"version7\"], force=True)\n\n # create an image file at the project root\n image_filename = \"test.jpg\"\n image_path = os.path.join(data[\"version7\"].absolute_path, \"..\", \"sourceimages\")\n image_full_path = os.path.join(image_path, image_filename)\n\n # create the file\n os.makedirs(image_path, exist_ok=True)\n with open(image_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n audio_filename = \"test.wav\"\n audio_path = os.path.join(data[\"version7\"].absolute_path, \"sound\")\n audio_full_path = os.path.join(audio_path, audio_filename)\n\n # create the file\n os.makedirs(audio_path, exist_ok=True)\n with open(audio_full_path, \"w+\") as f:\n f.writelines([\"\"])\n\n # create one image and one audio node\n pm.createNode(\"file\").attr(\"fileTextureName\").set(image_full_path)\n pm.createNode(\"audio\").attr(\"filename\").set(audio_full_path)\n\n # save it\n # replace external paths\n maya_env.replace_external_paths()\n pm.saveFile()\n\n # open data[\"asset2_model_take1_v001\"]\n maya_env.open(data[\"asset2_model_take1_v001\"], force=True)\n\n # and reference data[\"version7\"] to it\n maya_env.reference(data[\"version7\"])\n\n # and save it\n pm.saveFile()\n\n # open data[\"asset2_model_main_v001\"]\n maya_env.open(data[\"asset2_model_main_v001\"], force=True)\n\n # and reference data[\"asset2_model_take1_v001\"] to it\n maya_env.reference(data[\"asset2_model_take1_v001\"])\n\n # and save it\n pm.saveFile()\n\n # renew the scene\n pm.newFile(force=1)\n\n # create an archiver\n arch = Archiver(exclude_mask=[\".png\", \".jpg\", \".tga\"])\n\n project_path = arch.flatten([data[\"asset2_model_main_v001\"].absolute_full_path])\n trash_bin.append(project_path)\n\n # now check if we have the files under the path/scenes directory\n archived_version1_path = os.path.join(\n project_path, \"scenes\", data[\"asset2_model_main_v001\"].filename\n )\n\n # and references under path/scenes/refs path\n archived_version4_path = os.path.join(\n project_path, \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version4_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"asset2_model_take1_v001\"].filename\n )\n\n archived_version7_path = os.path.join(\n project_path, \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_version7_unresolved_path = os.path.join(\n \"scenes/refs\", data[\"version7\"].filename\n )\n\n archived_image_path = os.path.join(project_path, \"sourceimages\", image_filename)\n archived_audio_path = os.path.join(project_path, \"sound\", audio_filename)\n\n assert os.path.exists(archived_version1_path)\n assert os.path.exists(archived_version4_path)\n assert os.path.exists(archived_version7_path)\n # jpg should not be included\n assert not os.path.exists(archived_image_path)\n assert os.path.exists(archived_audio_path)\n\n # open the archived version1\n pm.workspace.open(project_path)\n pm.openFile(archived_version1_path)\n\n # expect it to have one reference\n all_refs = pm.listReferences()\n assert len(all_refs) == 1\n\n # and the path is matching to archived version4 path\n ref = all_refs[0]\n assert ref.path == archived_version4_path\n assert ref.unresolvedPath() == archived_version4_unresolved_path\n\n # check the deeper level references\n deeper_ref = pm.listReferences(parentReference=ref)[0]\n assert deeper_ref.path == archived_version7_path\n assert deeper_ref.unresolvedPath() == archived_version7_unresolved_path\n\n # and deeper level files\n ref_image_path = pm.ls(type=\"file\")[0].attr(\"fileTextureName\").get()\n # the path of the jpg should be intact\n assert ref_image_path == \"$REPOTPR/TP/Test_Task_1/sourceimages/test.jpg\"\n\n ref_audio_path = pm.ls(type=\"audio\")[0].attr(\"filename\").get()\n assert ref_audio_path == os.path.join(project_path, \"sound\", audio_filename)", "def begin_site(self):\n items = []\n try:\n items = self.site.config.unflattener.folders\n except AttributeError:\n pass\n\n self.logger.debug(items)\n for item in items:\n self.logger.debug(item)\n node = None\n target = ''\n try:\n node = self.site.content.node_from_relative_path(item)\n except AttributeError:\n continue\n if node:\n for resource in node.walk_resources():\n if hasattr(resource, 'meta') and hasattr(resource.meta, 'date'):\n date = resource.meta.date\n self.logger.debug(resource.meta.date)\n\n #pattern = re.compile('()-()-()')\n #date = pattern.match(date)\n #target_path = os.path.join(date.year, date.month, date.day) \n #target_path = date.strftime(\"%Y/%m/%d/\")\n target_path = \"%s/%d/%d/%d/%s\" % (node.name, date.year, date.month, date.day, resource.name)\n self.logger.debug('Unflattening resource with date [%s] to path [%s]' % (date, target_path)) \n resource.relative_deploy_path = target_path", "def flatten(self):\n if self.zernike_mirror is not None:\n self.zernike_mirror.flatten()\n if self.ripple_mirror is not None:\n self.ripple_mirror.flatten()\n if self.dm is not None:\n self.dm.flatten()", "def archive_diagnostics(self):\n try:\n # tarfile does not support context managers in python 2.6. We use closing() to work\n # around that.\n with closing(tarfile.open(self.collection_root_dir + '.tar.gz', mode='w:gz')) as\\\n archive:\n # collection_root_dir is an absoulte path. There is no point in preserving its\n # entire directory structure in the archive, so set the arcname accordingly.\n archive.add(self.collection_root_dir,\n arcname=os.path.basename(self.collection_root_dir))\n return True\n except Exception:\n logging.exception(\"Encountered an exception archiving diagnostics, cleaning up.\")\n return False\n finally:\n self.cleanup()", "def unpack_create_ok(self):\n if (not self.unpack_exist_p):\n VERBOSE(\"creating new image: %s\" % self.unpack_path)\n else:\n if (not os.path.isdir(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but is not a directory\"\n % self.unpack_path)\n if (not self.unpacked_p(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but does not appear to be an image\"\n % self.unpack_path)\n VERBOSE(\"replacing existing image: %s\" % self.unpack_path)\n rmtree(self.unpack_path)", "def flatten_toplevel_dirs( dst ):\n folders = [ os.path.join( dst , name) for name in os.listdir( dst ) if os.path.isdir( os.path.join( dst , name) ) ]\n for dPath in folders:\n if _VRB: print( \"Flatten:\" , dPath )\n flatten_dir_files( dPath )\n if _VRB: print( \"~~~ COMPLETE ~~~\\n\\n\" )", "def test_flatten():\n\n print(\"Testing flatten function with deeply nested lists\")\n test_deep_list = [\"one\", \"two\", [\"three\"], \"four\", [\"five\", \"six\", [\"seven\"]]]\n expected_result = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\"]\n assert flatten(test_deep_list) == expected_result\n\n print(\"Testing flatten function with list with no nested lists\")\n test_shallow_list = [\"one\", \"two\", \"three\"]\n assert flatten(test_shallow_list) == test_shallow_list\n\n print(\"Testing flatten function with empty list\")\n test_empty_list = []\n assert flatten(test_empty_list) == test_empty_list", "def restore(self):\n\n if not Settings.quiet:\n print('Creation of non existant files and directories', end=\" \")\n\n structure = Helpers.Dict().from_json(Helpers.File(self.structure).read())\n\n structure = structure['output']\n replace = self.restore_replace()\n\n for directory in structure:\n if not path.isdir(self.base + self.path + directory):\n self.travis_permissions()\n mkdir(self.base + self.path + directory)\n self.travis_permissions()\n\n for file in structure[directory]:\n file_path = self.path + directory + directory_separator + file\n\n content_to_write = structure[directory][file]['content']\n online_sha = structure[directory][file]['sha512']\n content_to_write = Helpers.Regex(\n content_to_write, '@@@', escape=True, replace_with='\\\\n').replace()\n\n git_to_keep = file_path.replace('gitignore', 'keep')\n keep_to_git = file_path.replace('keep', 'gitignore')\n\n if replace:\n if path.isfile(file_path) and Hash(\n file_path, 'sha512', True).get() == online_sha:\n rename(file_path, git_to_keep)\n write = False\n else:\n Helpers.File(file_path).delete()\n file_path = git_to_keep\n write = True\n else:\n if path.isfile(keep_to_git) and Hash(\n file_path, 'sha512', True).get() == online_sha:\n rename(file_path, keep_to_git)\n write = False\n else:\n Helpers.File(keep_to_git).delete()\n file_path = keep_to_git\n write = True\n\n if write:\n Helpers.File(file_path).write(\n content_to_write + '\\n', True)\n\n if not Settings.quiet:\n print(Settings.done)", "def test_flatten():\n assert list(nsitertools.flatten([1, 2, [3, 4, 5, [[6]]]])) == \\\n [1, 2, 3, 4, 5, 6]", "def restore( path ): #@NoSelf # pylint: disable=E0213\n if not os.path.exists( path ):\n nTerror('CingSummary.restore: path \"%s\" does not exist', path)\n return True\n #end if\n return xML2obj(path = path)", "def begin_site(self):\n items = []\n try:\n items = self.site.config.flattener.items\n except AttributeError:\n pass\n\n for item in items:\n node = None\n target = ''\n try:\n node = self.site.content.node_from_relative_path(item.source)\n target = Folder(item.target)\n except AttributeError:\n continue\n if node:\n for resource in node.walk_resources():\n target_path = target.child(resource.name)\n self.logger.debug(\n 'Flattening resource path [%s] to [%s]' %\n (resource, target_path))\n resource.relative_deploy_path = target_path\n for child in node.walk():\n child.relative_deploy_path = target.path", "def pre_start_restore():\n Logger.info('Restoring Falcon backed up directories after upgrade...')\n directoryMappings = _get_directory_mappings()\n\n for directory in directoryMappings:\n archive = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR,\n directoryMappings[directory])\n\n if not os.path.isfile(archive):\n raise Fail(\"Unable to restore missing backup archive {0}\".format(archive))\n\n tar_archive.untar_archive(archive, directory)\n\n # cleanup\n Directory(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR), action = \"delete\" )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Utility function to get an AOV from it's given info. AOV nodes can't be retrieved using a simple path like "|RenderPass|Layer|Beauty" and the AOV name ("Beauty" here) is not the node name but a plug value ("PlugName"). This function try to provide an easy way to retrieve an AOV node based on predicted render passe and render layer names. So "|RenderPass|Layer|Beauty" will return the AOV node representing the "Beauty" AOV.
def aov_node(parser, rp_name, rl_name, aov_name): # get render layer node rl = parser.path_to_node('|{rp_name}|{rl_name}'.format(**locals())) aov_nodes = [] # and find aov based on its display name for aov_node in rl.children: if aov_node.display_name == aov_name: aov_nodes.append(aov_node) if len(aov_nodes) == 0: raise PathError(("Can't find AOV '{rp_name}', '{rl_name}', " "'{aov_name}'").format(**locals())) elif len(aov_nodes) == 2: raise PathError(("More than one AOV found '{rp_name}', " "'{rl_name}', '{aov_name}'").format(**locals())) else: assert len(aov_nodes) == 1, aov_nodes return aov_nodes[0]
[ "def add_aov(self):\n prompt = QtWidgets.QInputDialog(self._view)\n prompt.setWindowTitle('Add AOV')\n prompt.setLabelText('AOV name:')\n prompt.setOkButtonText('Add')\n if prompt.exec_():\n self._model.add_aov(prompt.textValue())", "def _prepare_ovas(self):\n ovalist = []\n for name in self.args.NAME:\n if name.endswith('.ova'):\n ovalist.append(name)\n elif os.path.isdir(name):\n ovalist.extend(self._ova_from_dir(name))\n else:\n print(\"%s doesn't looks like directory or OVA\" % name,\n file=stderr)\n return ovalist", "def AeroOutputGetter(name):\n\n assert name in _AERO_OUTPUTS, ('Invalid value \"%s\". Must be one of %s.'\n % _AERO_OUTPUTS)\n if name == 'Cx':\n return lambda cfm, alpha, beta: cfm.force.x\n elif name == 'Cy':\n return lambda cfm, alpha, beta: cfm.force.y\n elif name == 'Cz':\n return lambda cfm, alpha, beta: cfm.force.z\n if name == 'Cl':\n return lambda cfm, alpha, beta: cfm.moment.x\n elif name == 'Cm':\n return lambda cfm, alpha, beta: cfm.moment.y\n elif name == 'Cn':\n return lambda cfm, alpha, beta: cfm.moment.z\n elif name == 'CL':\n return lambda cfm, alpha, beta: -RotBToW(cfm.force, alpha, beta).z\n elif name == 'CD':\n return lambda cfm, alpha, beta: -RotBToW(cfm.force, alpha, beta).x\n else:\n raise ProgrammerError('Case \"%s\" is not handled.' % name)", "def estimate_fov(model, target_layer = None):\n adjacency_list = model_to_graph(model)\n output_nodes = [k for k, v in adjacency_list.items() if not v[\"out\"]]\n sorted_nodes = topological_sort(adjacency_list, copy = True)\n node_fovs = {n: {\"fovs\": [], \"strides\": []} for n in sorted_nodes}\n undefined = list()\n for name in sorted_nodes:\n fovs = node_fovs[name][\"fovs\"]\n strides = node_fovs[name][\"strides\"] \n layer = model.get_layer(name)\n layer_id = type(layer).__name__\n if layer_id in _UNDEFINED_LAYERS or (None, None) in fovs:\n fov, stride = (None, None), (None, None)\n undefined.append(layer_id)\n else:\n if not fovs:\n fovs.append((1, 1))\n if not strides:\n strides.append((1, 1))\n fov, stride = _DISPATCHER[layer_id](layer, fovs, strides)\n node_fovs[name][\"result\"] = fov\n for output in adjacency_list[name][\"out\"]:\n node_fovs[output][\"fovs\"].append(fov)\n node_fovs[output][\"strides\"].append(stride)\n \n if undefined:\n warnings.warn(f\"Undefined FOV for layers {undefined}. Partial result is returned\")\n \n if target_layer is not None:\n if isinstance(target_layer, str):\n return node_fovs[target_layer][\"result\"]\n else:\n return [node_fovs[name][\"result\"] for name in target_layer]\n else:\n if len(output_nodes) == 1:\n return node_fovs[output_nodes[0]][\"result\"]\n else:\n return [node_fovs[name][\"result\"] for name in output_nodes]", "def GetOVFProperty(self, vm, key):\n for prop in vm.config.vAppConfig.property:\n if prop.id == key:\n return prop.value\n\n return None", "def optimized_node_to_virtual_edge(self, idx, robot_idx):\n if robot_idx == 'a':\n pose = self.gtsam_graph1.get_pose(idx)\n cov = self.gtsam_graph1.cov(idx)\n info = self.to_info(cov)\n elif robot_idx == 'b':\n pose = self.gtsam_graph2.get_pose(idx)\n cov = self.gtsam_graph2.cov(idx)\n info = self.to_info(cov)\n return Edge2D('w', idx, pose[0], pose[1], pose[2], info)", "def get_tune_path_info(est):\n\n if hasattr(est, 'best_estimator_'):\n return get_pen_path_setup(est)\n else:\n return None, None, None", "def scheme_from_ova(obj: ova.Node, config: Config) -> t.Optional[SchemeNode]:\n\n ova_type = obj[\"type\"]\n ova_scheme = obj[\"text\"]\n\n if ova_type in aif2scheme:\n scheme = aif2scheme[t.cast(aif.SchemeType, ova_type)]\n\n if scheme and (found_scheme := text2scheme[type(scheme)].get(ova_scheme)):\n scheme = found_scheme\n\n premise_descriptors = [\n str(node_id)\n for description, node_id in obj[\"descriptors\"].items()\n if not description.lower().startswith(\"s_conclusion\")\n ]\n\n timestamp = dt.from_format(obj.get(\"date\"), ova.DATE_FORMAT) or pendulum.now()\n\n return config.SchemeNodeClass(\n id=str(obj[\"id\"]),\n metadata=config.MetadataClass(timestamp, timestamp),\n scheme=scheme,\n premise_descriptors=premise_descriptors,\n )\n\n return None", "def atom_from_ova(obj: ova.Node, config: Config) -> AtomNode:\n timestamp = dt.from_format(obj.get(\"date\"), ova.DATE_FORMAT) or pendulum.now()\n\n return config.AtomNodeClass(\n id=str(obj[\"id\"]),\n metadata=config.MetadataClass(timestamp, timestamp),\n text=utils.parse(obj[\"text\"], config.nlp),\n )", "def aov(self):\n return self.item", "def test_get_vm_ovf_xml(self, fake_CreateDescriptorParams):\n fake_vm_ovf = MagicMock()\n fake_vm_ovf.error = []\n fake_vm_ovf.ovfDescriptor = '<some>xml</some>'\n fake_vcenter = MagicMock()\n fake_vcenter.content.ovfManager.CreateDescriptor.return_value = fake_vm_ovf\n fake_vm = MagicMock()\n fake_vm.name = 'myVM'\n fake_device_ovfs = [MagicMock(), MagicMock()]\n\n output = virtual_machine.get_vm_ovf_xml(fake_vm, fake_device_ovfs, fake_vcenter)\n expected = fake_vm_ovf.ovfDescriptor\n\n self.assertEqual(output, expected)", "def map_homee_node(node):\n from pyhomee import const\n if node.id == -1:\n return 'sensor'\n if node.profile in const.PROFILE_TYPES[const.DISCOVER_LIGHTS]:\n return 'light'\n if node.profile in const.PROFILE_TYPES[const.DISCOVER_CLIMATE]:\n return 'climate'\n if node.profile in const.PROFILE_TYPES[const.DISCOVER_BINARY_SENSOR]:\n return 'binary_sensor'\n if node.profile in const.PROFILE_TYPES[const.DISCOVER_SWITCH]:\n return 'switch'\n\n attr_types = [attr.type for attr in node.attributes]\n if const.COVER_POSITION in attr_types:\n return 'cover'", "def optimized_node_to_virtual_edge(self, idx, robot_idx):\n if robot_idx == 'a':\n translation, R = self.gtsam_graph1.get_pose(idx)\n cov = self.gtsam_graph1.cov(idx)\n info = self.to_info(cov)\n elif robot_idx == 'b':\n translation, R = self.gtsam_graph2.get_pose(idx)\n cov = self.gtsam_graph2.cov(idx)\n info = self.to_info(cov)\n return Edge3D('w', idx, translation, Quaternion.from_R(R).q, info)", "def getPlane(self, distFromEye: 'float const') -> \"SbPlane\":\n return _coin.SbViewVolume_getPlane(self, distFromEye)", "def test_make_ova_provide_name(self, fake_rmtree, fake_listdir, fake_rename, fake_open,\n fake_sleep, fake_makedirs, fake_tarfile, fake_download_vmdk, fake_get_vm_ovf_xml,\n fake_block_on_lease, fake_power):\n fake_listdir.return_value = ['vm01.ova']\n fake_vcenter = MagicMock()\n fake_vm = MagicMock()\n fake_vm.name = 'myVM'\n fake_log = MagicMock()\n\n output = virtual_machine.make_ova(fake_vcenter, fake_vm, '/save/ova/here', fake_log, ova_name='vm01.ova')\n expected = '/save/ova/here/vm01.ova'\n\n self.assertEqual(output, expected)", "def AoA(s, obj):\n\n # Check if AoA should be negative. AoA is negative\n # when the angle between the flight vector and the lift vector\n # is less than 90 deg.\n \n if (obj.V.angleBetween(obj.lift) < pi/2):\n return -((obj.n.angleBetween(obj.V)) * (180/pi))\n\n return (obj.n.angleBetween(obj.V)) * (180/pi)", "def map3dToAspect2d(self, node, point):\n\n # Convert the point to the 3-d space of the camera\n p3 = self.camera.getRelativePoint(node, point)\n # Convert it through the lens to render2d coordinates\n p2 = Point2()\n if not self.camLens.project(p3, p2):\n return None\n r2d = Point3(p2[0], 0, p2[1])\n # And then convert it to aspect2d coordinates\n a2d = self.aspect2d.getRelativePoint(self.render2d, r2d)\n return a2d", "def getPlane(self, distFromEye: 'double const') -> \"SbPlane\":\n return _coin.SbDPViewVolume_getPlane(self, distFromEye)", "def __str__(self):\n text = \"Attractor \" + self.label + \"\\n\"\n text += \"\\tLength: \"+ str(len(self.states)) + \"\\n\"\n text += \"\\tBasin: \"+ str(self.basin) + \"\\n\"\n text += \"\\tWith nodes: \"+ ', '.join(self.node_names) + \"\\n\" \n text += \"\\tWith states: \"\n for a in self.states: text += \" -> \" + state_to_str(a)\n return text.strip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a URL that is most likely to route to `local_host` from outside. The point is that we may be running on a remote host from the user's point of view, so they can't access `local_host` from a Web browser just
def guess_external_url(local_host, port): if local_host in ['0.0.0.0', '::']: # The server is listening on all interfaces, but we have to pick one. # The system's FQDN should give us a hint. local_host = socket.getfqdn() # https://github.com/vfaronov/turq/issues/9 match = IPV4_REVERSE_DNS.match(local_host) if match: local_host = '.'.join(reversed(match.groups())) else: match = IPV6_REVERSE_DNS.match(local_host) if match: address_as_int = int(''.join(reversed(match.groups())), 16) local_host = str(IPv6Address(address_as_int)) if ':' in local_host: # Looks like an IPv6 literal. Has to be wrapped in brackets in a URL. # Also, an IPv6 address can have a zone ID tacked on the end, # like "%3". RFC 6874 allows encoding them in URLs as well, # but in my experiments on Windows 8.1, I had more success # removing the zone ID altogether. After all this is just a guess. local_host = '[%s]' % local_host.rsplit('%', 1)[0] return 'http://%s:%d/' % (local_host, port)
[ "def get_current_host_url():\n if settings.DEBUG or is_local():\n return 'http://jaypark.sinwoobang.me:8000'\n return 'http://jaypark.sinwoobang.me'", "def local_url(self) -> str:\n return f'ws://127.0.0.1:{self.port}'", "def toLocalhostUri(uri):\n return uri.replace(CELLAR_BASE + '/resource/celex',\n '%(localhost)s/memento' % {'localhost': local_host})", "def mondrian_server_internal_url():\n return 'http://127.0.0.1:8080'", "def get_versioned_hosturl():\n if is_local_dev_server():\n # TODO(maruel): It'd be nice if it were easier to use a ephemeral SSL\n # certificate here and not assume unsecured connection.\n return 'http://' + modules.get_hostname()\n\n return 'https://%s-dot-%s' % (\n get_app_version(), app_identity.get_default_version_hostname())", "def getServerURL(environ):\n scheme = environ.get('wsgi.url_scheme')\n if scheme is None:\n scheme = 'HTTPS' in environ and 'https' or 'http'\n\n http_host = environ.get('HTTP_HOST')\n\n # if vhm specifies a virtual host base, prefer it over the http\n # host\n vhm_host_base = environ.get('repoze.vhm.virtual_host_base')\n\n http_host = vhm_host_base or http_host\n\n if http_host:\n if ':' in http_host:\n host, port = http_host.split(':', 1)\n else:\n host = http_host\n port = None\n else:\n host = environ.get('SERVER_NAME', 'localhost')\n port = environ.get('SERVER_PORT', '8080')\n\n if port is not None and port != DEFAULT_PORTS.get(scheme):\n netloc = '%s:%s' % (host, port)\n else:\n netloc = host\n\n return urlunsplit((scheme, netloc, '', '', ''))", "def get_external_url():", "def get_runner_url(context, riurik_url):\n\tdef replace_localhost(url):\n\t\treturn url.replace('localhost', socket.gethostname())\n\n\tuse_local_runner = context.get('use_local_runner')\n\tremote_runner_url = get_runner_from_context(context)\n\tif use_local_runner or not remote_runner_url:\n\t\ttarget = riurik_url\n\telse:\n\t\ttarget = remote_runner_url\n\n\treturn replace_localhost(target)", "def get_uri(host, port):\n if port:\n return '%s:%s' % (host, port)\n return host", "def _resolve_url():\n\ttry:\n\t\tname, aliaslist, addresslist = socket.gethostbyname_ex('paste')\n\texcept socket.gaierror:\n\t\t# jaraco generously hosts paste for the world\n\t\tname = 'paste.jaraco.com'\n\t\tname, aliaslist, addresslist = socket.gethostbyname_ex(name)\n\tname = _patch_heroku(name, aliaslist)\n\tfallback = 'https://{name}/'.format(name=name)\n\treturn os.environ.get('LIBRARYPASTE_URL', fallback)", "def is_local():\n return 'sinwoo' in socket.gethostname()", "def getUrl(host, port):\n return \"http://\" + host + \":\" + port", "def getHostFrom(fromHost):", "def local_plugin_url(cls, plugin_name):\r\n return urllib.pathname2url(TestRemotePluginInstaller.local_plugin(plugin_name))", "def require_linklocal(handler: Handler) -> Handler:\n\n @functools.wraps(handler)\n async def decorated(request: web.Request) -> web.Response:\n ipaddr_str = request.headers.get(\"x-host-ip\")\n invalid_req_data = {\n \"error\": \"bad-interface\",\n \"message\": (\n f\"The endpoint {request.rel_url}\"\n f\" can only be used from link-local connections.\"\n f\" Make sure you're connected to this robot directly by cable\"\n f\" and using this robot's wired IP address\"\n f\" (not its wireless IP address).\"\n ),\n }\n if not ipaddr_str:\n return web.json_response( # type: ignore[no-untyped-call,no-any-return]\n data=invalid_req_data, status=403\n )\n try:\n addr = ipaddress.ip_address(ipaddr_str)\n except ValueError:\n LOG.exception(f\"Couldn't parse host ip address {ipaddr_str}\")\n raise\n\n if not addr.is_link_local:\n return web.json_response( # type: ignore[no-untyped-call,no-any-return]\n data=invalid_req_data, status=403\n )\n\n return await handler(request)\n\n return decorated", "def get_url(path):\n return 'http://%(host)s:%(port)s/%(path)s' % {\n 'host': getenv('TEST_WEB_SERVER_HOST', 'http://localhost'),\n 'port': getenv('TEST_WEB_SERVER_PORT', 8000),\n 'path': path\n }", "def get_server_url(): # type: () -> str\n\n server = '127.0.0.1'\n if 'DOCKER_HOST' in os.environ:\n server = urlparse.urlparse(os.environ['DOCKER_HOST']).hostname\n return 'http://{}:{}/'.format(server, PORT)", "def _get_remote_host(request):\n result = request.META.get('REMOTE_HOST', None)\n if not result:\n try:\n hostinfo = socket.gethostbyaddr(request.META['REMOTE_ADDR'])\n result = hostinfo[0]\n except socket.error as err:\n if err[0] != 1:\n raise\n\n return result", "def is_local_url_decorator(original_function):\n @wraps(original_function)\n def is_local_url_wrapper(*args, **kwargs):\n url = args[1]\n if url.startswith('///'):\n return False\n return original_function(*args, **kwargs)\n return is_local_url_wrapper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if the minimum duration in the stage has passed. Otherwise return False.
def min_duration_over(self): return self.start_date + datetime.timedelta(seconds= \ self.group_stage.stage.min_duration) <= timezone.now()
[ "def has_run(self, duration):\n if self.time() - self.start_time > duration:\n return True\n return False", "def has_duration(self):\n return # boolean", "def validate_time_step(self):\n valid = (self.time_step >= 0) and (is_number(self.time_step))\n # set flag\n self.time_step_valid = valid\n return valid", "def is_duration(self) -> \"bool\":\n return self._value.getType() == Value.DUVAL", "def exceeded(self):\n\n if self.timeout is None:\n return False\n return self.passed >= self.timeout", "def should_start(self):\n # XXX Don't return true if it should_stop.\n now = datetime.datetime.utcnow()\n delay_delta = datetime.timedelta(seconds=self.container_set.run_delay)\n return now >= self.run.started_at + delay_delta", "def min_exptime_reached(self):\n try:\n return self._exptimes[-1] <= self._min_exptime\n except IndexError:\n return False", "def has_start_time(self):\n return # boolean", "def is_live(self):\n action_created = arrow.get(self.created)\n mp_start = arrow.get(self.episode.mediapackage.start)\n mp_duration_sec = int(self.episode.mediapackage.duration) / 1000\n return action_created <= mp_start.replace(seconds=+mp_duration_sec)", "def _check_duration(duration: float) -> float:\n _check_type(duration, (\"numeric\",), item_name=\"duration\")\n assert 0 < duration\n return duration", "def new_frame_time(self):\n if self.frame_start_time is None:\n return False\n\n return (time.time() - self.frame_start_time) > self.frame_length", "def _is_duration(self, name):\n return self._arg_tree[name]['type'] == 'duration'", "def check_start(self) -> bool:\n # You can't play with less than MIN_PLAYERS players.\n return len(self._players) >= MIN_PLAYERS", "def min_duration(self):\n value = None\n for timer in self.timers:\n if not value or value > timer.duration:\n value = timer.duration\n return value", "def detect_time_threshold(self, ):\n if self.threshold_timestamp:\n return time.time() - self.threshold_timestamp > self.time_threshold\n return False", "def min_study_duration(self):\n return self._min_study_duration", "def larger_than_min_t(self):\n total_t = len(self.__detections)\n return total_t >= self.__min_t, total_t", "def in_the_future(self):\n return int(self.time) > time.time() + CHAIN.TIME_TOLERANCE", "def is_overdue(self):\n\t\tdeadline = self.get_deadline()\n\n\t\tif deadline is None:\n\t\t\t# No deadline has been set for this stage\n\t\t\treturn False\n\n\t\tif self.status == 0:\n\t\t\t# The stage has already been completed\n\t\t\treturn False\n\n\t\treturn timezone.now() > deadline" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the date/time of the deadline for this stage. Return None if there is no deadline for this stage.
def get_deadline(self): if self.start_date is None: # The stage hasn't been started yet: no deadline return None if self.group_stage.stage.deadline is None: # This stage has no deadline associated with it return None # Compute the deadline for this stage days_to_complete_stage = datetime.timedelta(days=self.group_stage.stage.deadline) return self.start_date + days_to_complete_stage
[ "def deadline(self):\n if not self._deadline:\n self._deadline = self.now + timezone.timedelta(days=1)\n return self._deadline", "def deadline(self):\n\n print \"# Deadline info\"\n print \"#\"\n print \"# Exipres in: {time}\".format(time=self.subtree['.meta'].get('deadline', -1))", "def _get_deadline(self):\n\t\tjobid = os.getenv('SLURM_JOB_ID')\n\t\tif jobid is None:\n\t\t\treturn 0\n\n\t\tcmd = 'squeue -h -j %s -o \"%%L\"' % jobid\n\t\ttry:\n\t\t\tp = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)\n\t\texcept:\n\t\t\treturn 0\n\t\tremaining = p.stdout.decode('utf8').strip()\n\n\t\t# parse time format\n\t\tdays = 0\n\t\tif '-' in remaining:\n\t\t\tdays, rest = remaining.split('-')\n\t\telse:\n\t\t\trest = remaining\n\t\ttry:\n\t\t\thours, minutes, seconds = rest.split(':')\n\t\texcept:\n\t\t\treturn 0\n\n\t\treturn time.time() + ((int(days)*24 + int(hours))*60 + int(minutes))*60 + int(seconds)", "def get_deadline():\n static_data = MY_SESSION.get(constants.FANTASY_API_URL).json()\n result = static_data['next_event_fixtures'][0]['deadline_time']\n return result", "def get_next_deadline(self):\n\n now = utc_now()\n for ddl in self.deadlines:\n if ddl[0] >= now:\n return (ddl[0], ddl[1])", "def last_due(self):\n\n if self.schedule:\n due = self.schedule.before(self.now)\n return due.date() if due else None\n\n if self.initial_due_date and self.initial_due_date <= self.now.date():\n return self.initial_due_date", "def scheduled_end_datetime(self) -> Optional[dt]:\n try:\n estimate: dt = self[\"estimate\"]\n duration = parse_duration(estimate)\n return self[\"scheduled\"] + duration\n except TypeError:\n return None", "def leads_deadline(dt_lead):\n\n if dt_lead < datetime.datetime.combine(dt_lead.date(), datetime.time(9, 0)):\n deadline = datetime.datetime.combine(dt_lead.date(), datetime.time(10, 0))\n elif dt_lead > datetime.datetime.combine(dt_lead.date(), datetime.time(20, 0)):\n deadline = datetime.datetime.combine(dt_lead.date(), datetime.time(10, 0)) + datetime.timedelta(days=1)\n else:\n deadline = dt_lead + datetime.timedelta(hours=int(config.DL_HOURS))\n return deadline", "def getLeaseTime(self):\n d = self.do_getattrdict([], [FATTR4_LEASE_TIME])\n return d[FATTR4_LEASE_TIME]", "def remaining_time(self):\n return max(self.expiration_deadline - timezone.now(),\n datetime.timedelta(0))", "def eligible_time(self):\n return self._job.get('eligible_time', 0)", "def _workflow_time(self):\n return self.__time", "def get_remaining_seconds(self) -> float:\n retval = None\n\n self.__calculate_start_time()\n\n if self.__end_time is not None:\n # Calculate how long it is until the deadline\n now = datetime.datetime.now()\n delta = self.__end_time - now\n retval = delta.total_seconds()\n if retval < 0:\n retval = 0\n\n return retval", "def delay_rule_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"delay_rule_time\")", "def frame_end_time(self) -> str:\n return pulumi.get(self, \"frame_end_time\")", "def batch_send_deadline(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"batch_send_deadline\")", "def end_time(self):\n ret = self._get_attr(\"endTime\")\n return ret", "def delay_rule_time(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"delay_rule_time\")", "def complete_time(self):\n\n return self.__end" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if this stage is overdue, False otherwise.
def is_overdue(self): deadline = self.get_deadline() if deadline is None: # No deadline has been set for this stage return False if self.status == 0: # The stage has already been completed return False return timezone.now() > deadline
[ "def is_overdue(self):\n return bool(self.due_back and date.today() > self.due_back)", "def is_overdue(self):\n if self.due_back and date.today() > self.due_back:\n return True\n return False", "def overdue(self) -> bool:\n if not self.scheduled_start_datetime:\n return False\n\n now = dt.now()\n now_ts = dt.timestamp(now)\n\n if self[\"end\"] is None:\n start_ts = dt.timestamp(self.scheduled_start_datetime)\n if now_ts > start_ts:\n return True\n\n return False\n\n end_ts = dt.timestamp(self[\"end\"])\n if now_ts > end_ts:\n return True\n\n return False", "def is_over(self):\n return self.booking.end_datetime < tznow()", "def is_over(self, state: StonehengeState) -> bool:\n return state.game_over()", "def _alert_due(self):\n last_alert_time = self._get_last_alert_seconds()\n if last_alert_time:\n return last_alert_time > self._get_interval_in_seconds()\n else:\n return True", "def is_game_over(self):\n return self.game_over", "def isPast(self):\n return self.end_time < get_now_time()", "def game_over(self):\n return self.winner() is not None", "def is_expired(self, never_onhold=True):\n if self.onhold and never_onhold:\n # On-hold pastes are never expired.\n return False\n\n try:\n elapsed = datetime.today() - self.publish_date\n except Exception as ex:\n log.error('Error getting elapsed time:\\n{}'.format(ex))\n return False\n return (elapsed.days > 0)", "def is_game_over(self, state):\n result = self.outcome(state)\n return not result == 2", "def game_over(self):\n return len(self.teams) is 1", "def min_exptime_reached(self):\n try:\n return self._exptimes[-1] <= self._min_exptime\n except IndexError:\n return False", "def is_expired(self) -> bool:\n return self._needs_recalc or self._gen_sync.is_expired()", "def has_expired(self):\n return self.time_remaining <= 0", "def game_over(self):\n if self.get_current_state() not in self._actions.keys():\n return True\n return False", "def is_future_effective(self) -> bool:\n with suppress(AttributeError, TypeError):\n if self._storage.effective_date > self._storage.payment_completion_date:\n return True\n return False", "def expired(self):\n return getattr(self, \"invalidated\", False) or self.half_life > 0 and self.age > self.half_life * 2", "def check_overdue(self, cr, uid, ids, context=None):\n context = context or {}\n fs_obj = self.pool.get('freight.shipment')\n for form in self.read(cr, uid, ids, context=context):\n fs_ids = {True: [], False: []}\n for fs_id in context['active_ids']:\n if (fs_obj._check_shipment_overdue(\n cr, uid, fs_id, context=context) or\n fs_obj._check_prepare_overdue(\n cr, uid, fs_id, context=context)):\n fs_ids[True] += [fs_id]\n else:\n fs_ids[False] += [fs_id]\n fs_obj.write(\n cr, uid, fs_ids[True], {'is_overdue': True}, context=context)\n fs_obj.write(\n cr, uid, fs_ids[False], {'is_overdue': False}, context=context)\n return {'type': 'ir.actions.act_window_close'}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the active stages for the specified 'user' and 'study'. An active stage is defined as a stage that has been started but not ended.
def get_active_stages(user, study=None): active_stages = UserStage.objects.filter(user=user, status=1) # Studies should be started start_date_exists = Q(group_stage__stage__study__start_date__isnull=False) start_date_in_past = Q(group_stage__stage__study__start_date__lte=timezone.now()) active_stages = active_stages.filter(start_date_exists & start_date_in_past) # Studies should not have ended end_date_does_not_exist = Q(group_stage__stage__study__end_date__isnull=True) end_date_is_in_future = Q(group_stage__stage__study__end_date__gt=timezone.now()) active_stages = active_stages.filter(end_date_does_not_exist | end_date_is_in_future) if study is not None: active_stages = active_stages.filter(group_stage__stage__study=study) return active_stages
[ "def get_next_user_stage(user, study):\n\t\n\tus = UserStage.objects.filter(user=user, group_stage__stage__study=study).order_by('group_stage__order')\n\t\n\tfor s in us.all():\n\t\tif s.status != 0:\n\t\t\treturn s\n\t\t\n\treturn None", "def get_active_exams_for_user(user_id, course_id=None):\n result = []\n\n student_active_exams = ProctoredExamStudentAttempt.objects.get_active_student_attempts(user_id, course_id)\n for active_exam in student_active_exams:\n # convert the django orm objects\n # into the serialized form.\n exam_serialized_data = ProctoredExamSerializer(active_exam.proctored_exam).data\n active_exam_serialized_data = ProctoredExamStudentAttemptSerializer(active_exam).data\n student_allowances = ProctoredExamStudentAllowance.get_allowances_for_user(\n active_exam.proctored_exam.id, user_id\n )\n allowance_serialized_data = [ProctoredExamStudentAllowanceSerializer(allowance).data for allowance in\n student_allowances]\n result.append({\n 'exam': exam_serialized_data,\n 'attempt': active_exam_serialized_data,\n 'allowances': allowance_serialized_data\n })\n\n return result", "def _completed_stages(user, level_id):\n levels_of_completed_stage = [x.level_id for x in user.stage_set.all()]\n\n return levels_of_completed_stage.count(level_id)", "def getUserContests(user):\n\tquery = Q(active=True) & (Q(users=user) | Q(groups__in=user.groups.all()))\n\treturn Contest.objects.filter(query).distinct()", "def find_stages(self, stages, mash_name='_main', start_time=timedelta()):\n stage = []\n stages[mash_name] = stage\n #\n t = start_time\n vol = Quantity('0gal')\n temp = Quantity()\n num = 0\n for step in self.steps:\n num += 1\n type = step['type']\n # Current state\n if (num > 1):\n stage.append({'type': step['type'], 'time': t, 'volume': vol, 'temp': temp})\n if ('volume' in step):\n vol = step['volume']\n if ('temp' in step):\n temp = step['temp']\n # Action of this step\n action = {}\n action.update(step)\n action['time'] = t\n if ('time' in step):\n t += self.parsetime(step['time'])\n # add final state\n stage.append({'type': 'state', 'time': t, 'volume': vol, 'temp': temp})\n # no return val, data left in stage", "def stages(self):\n return self.instance.get_task_progress(self.name).stages", "def stages(self) -> typing.List[str]:\n return typing.cast(\n typing.List[str],\n self._properties.get(\"stages\"),\n )", "def get_stages(self):\n def resolve_intersections(stage):\n \"\"\"Removes actions from a stage that creates\n conflict between the selected stage candidates.\"\"\"\n actions_to_remove = set()\n for a in stage:\n if self.action[a].get('next', None):\n intersection = self.action[a]['next'].intersection(stage)\n if intersection:\n for i in intersection:\n actions_to_remove.add(i)\n\n for a in actions_to_remove:\n stage.remove(a)\n\n current_stage = self.root\n\n while current_stage:\n yield current_stage\n next_stage = set()\n for n in current_stage:\n next_stage.update(\n self.action[n].get(\n 'next', set()))\n resolve_intersections(next_stage)\n current_stage = next_stage", "def get_studies_for_user(self, user, categories, include_invalid=False):\n\n associated = session.query(StudyAssociated).filter_by(uid=user.uid, access=True).all()\n associated_studies = [x.study_id for x in associated]\n db_studies = session.query(StudyModel).filter((StudyModel.user_uid == user.uid) |\n (StudyModel.id.in_(associated_studies))).all()\n\n studies = []\n for study_model in db_studies:\n if include_invalid or study_model.review_type in self.VALID_REVIEW_TYPES:\n studies.append(StudyService.get_study(study_model.id, categories, study_model=study_model,\n process_categories=False))\n return studies", "def get_lifecycle_stage_list(self):\n response = self.client.get(self.client.get_url())\n\n results = {}\n for item in response.json():\n results[item['StageID']] = item['StageName']\n\n return results", "def started_courses(user):\n courses = Course.objects.filter(\n module__question__try__user=user)\n return courses.distinct()", "def view_stage(request):\n user = request.user\n stage = Stage.objects.get(pk=request.POST['pk'])\n template = stage.get_stage_url()\n context = {'pkid':request.POST['pk']}\n\n # if user has access to the level of the stages\n level_of_stage = stage.get_level()\n try:\n UserLevel.objects.get(user=request.user, level=level_of_stage)\n except:\n raise Http404(\"Puzzle cannot be accessed.\")\n\n # return the stage to user\n return render_to_response(template, context=context)", "def all_scenes_for_sequence(sequence, user_context=False):\n if user_context:\n return gazu_user.all_scenes_for_sequence(sequence)\n else:\n return gazu_scene.all_scenes_for_sequence(sequence)", "def get_trials_from_study(self):\n return self.study.get_trials(states=(optuna.trial.\n TrialState.COMPLETE, ))", "def getGamesWithStatus(self, user, status):\n\n if user == None:\n return []\n\n hostGamesInProgress = self.cm.getGamesTable().query(IndexName='HostId-StatusDate-index', KeyConditionExpression=(Key('HostId').eq(user) & Key('StatusDate').begins_with(status))) \n \n oppGamesInProgress = self.cm.getGamesTable().query(IndexName='OpponentId-StatusDate-index', KeyConditionExpression=(Key('OpponentId').eq(user) & Key('StatusDate').begins_with(status))) \n\n games = self.mergeQueries(hostGamesInProgress['Items'],\n oppGamesInProgress['Items'])\n return games", "def get_sessions_for_user(user):\n\n audit_sessions = UserAudit.objects.filter(user=user)\n return audit_sessions", "def get_stage(cls, name):\n return cls.pipeline_stages[name][0]", "def ls_stage(self):\n return \"\\n\".join(self.stages)", "def split_query_into_stages(query):\n stages = []\n current_stage = []\n subsearch = []\n\n for token in tokenize_query(query):\n\n if token.type == \"LBRACKET\":\n subsearch.append(token)\n current_stage.append(token.value)\n continue\n \n if token.type == \"RBRACKET\":\n current_stage.append(token.value)\n subsearch.pop(-1)\n if len(subsearch) == 0:\n stages.append(\" \".join(current_stage))\n current_stage = []\n continue\n\n if len(subsearch) > 0:\n current_stage.append(token.value)\n continue \n\n if token.type == \"PIPE\":\n if len(current_stage) > 0:\n stages.append(\" \".join(current_stage))\n current_stage = [token.value]\n continue\n \n current_stage.append(token.value)\n \n if len(current_stage) > 0:\n stages.append(\" \".join(current_stage))\n\n return stages" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the next user stage.
def get_next_user_stage(user, study): us = UserStage.objects.filter(user=user, group_stage__stage__study=study).order_by('group_stage__order') for s in us.all(): if s.status != 0: return s return None
[ "def get_last_stage(self):\n return self.stages[-1] if self.stages else None", "def get_next_user(tgt_user):\n handles = sorted(list_users())\n try:\n current_idx = handles.index(tgt_user.handle)\n except ValueError:\n # what if we just deleted the target user?\n # inject it back into `handles' list and try again\n handles = sorted(handles + [tgt_user.handle])\n current_idx = handles.index(tgt_user.handle)\n idx = min(current_idx + 1, len(handles) - 1)\n try:\n return get_user(handles[idx])\n except KeyError:\n # and what if we deleted the last user of the list?\n # take the previous one.\n return get_user(handles[idx - 1])", "def find_nextlocation(request, user):\n if not user.last_login:\n # Redirect to profile on first login\n return reverse(\"core:user_index\")\n nextlocation = request.POST.get(\"next\", request.GET.get(\"next\"))\n condition = (\n nextlocation and\n url_has_allowed_host_and_scheme(nextlocation, request.get_host())\n )\n if condition:\n return nextlocation\n if request.user.role == \"SimpleUsers\":\n topredir = request.localconfig.parameters.get_value(\n \"default_top_redirection\")\n if topredir != \"user\":\n infos = exts_pool.get_extension_infos(topredir)\n nextlocation = infos[\"topredirection_url\"]\n else:\n nextlocation = reverse(\"core:user_index\")\n else:\n nextlocation = reverse(\"core:dashboard\")\n return nextlocation", "def get_stage(cls, name):\n return cls.pipeline_stages[name][0]", "def next(self):\n try:\n obj = self.get_next_by_created(hidden=False)\n return obj\n except Submission.DoesNotExist:\n return None", "def get_next_question(self, user):\n questions = self.get_ordered_question_list(required_questions=True)\n index = 1\n for i in questions:\n if not i.user_response_object(user):\n return {'index': index, 'question': i}\n index = index + 1\n return None", "def next_player(self):\n self.current_player = self.players[(self.current_player.identity.id + 1) % len(self.players)]", "def get_id_of_next_user_to_post(self) -> Union[int, None]:\n users_with_no_max_likes = [\n i for i in sorted(self, key=lambda x: x.my_likes_count, reverse=True) # returns new list\n if i.my_likes_count < self.max_likes_per_user\n ]\n\n if len(users_with_no_max_likes) > 0:\n return self.index(users_with_no_max_likes[0])\n else:\n return None", "def stage_number(self):\n return self._stage_number", "def get_stage(self):\n return self.get_prev_state().value - 99", "def get_next(self):\n if len(self.tasks) == 0:\n return None\n else:\n task = self.get_next_task(self.tasks)\n return task", "def get_single_user():", "def who_plays_next(self):\n if self.game_over:\n return self.no_one\n return self.players[self.total_play_count % 2]", "def get_next_session_name_stura():\n config = settings.VOTING_SESSIONS_CONFIG\n return get_next_session_name(config['weekday'])", "def nextLeader(self):\n leader = self.leader + 1\n if leader >= len(self.players):\n self.leader = 0\n else:\n self.leader = leader\n if self.debug: print self.players[self.leader].name, \"is the new leader.\"", "def _get_next(self, prev_job_id, state):\n plan_key = Job.encode_plan_key(prev_job_id, state)\n job_id = self.plan.get(plan_key)\n return job_id", "def next_player(self):\n current_player_index, = [idx for idx, color in enumerate(list(self.players.keys()))\n if self.current_player.color == color]\n next_player_index = (current_player_index + 1) % len(self.players)\n _, player = list(self.players.items())[next_player_index]\n return player", "def get_current_step(self) -> str:", "def group_leader(self) -> str | None:\n for child_player in self.get_child_players(True):\n # simply return the first (non passive) powered child player\n if child_player.is_passive:\n continue\n if not child_player.current_url:\n continue\n if not (self.active_queue and self.active_queue.stream):\n continue\n if self.active_queue.stream.stream_id not in child_player.current_url:\n continue\n return child_player.player_id\n # fallback to the first player\n return self.group_members[0] if self.group_members else None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the number of images in the dataset split.
def __len__(self) -> int: return self.num_images
[ "def get_number_of_images(self) -> int:\n return len(self._soup.find_all(\"img\"))", "def num_images(self):\n return len(self.get_image_keys(include_labels=False))", "def get_dataset_size(self):\n keys = self.get_keys()\n\n dataset_size = 0\n for key in keys:\n image = self.get_image(key)\n image.load_from_uri()\n dataset_size += image.get_blob().nbytes\n\n return dataset_size", "def num_imgs(self):\n return len(self.rec_imgs)", "def display_num_images(data_sets):\n\tfor dataset in data_sets:\n\t\tname = dataset.split_name\n\t\tprint(\"\\n\")\n\t\tprint(f\"Dataset: {name}\")\n\t\tfor folder, count in dataset.folder_counts:\n\t\t\tprint(f\"num images in {folder} folder = {count}\")", "def number_of_pictures():\n database = get_db()\n return database.number_of_pictures()", "def get_num_samples(self, split_name):", "def getNumberOfPatternImages(self) -> retval:\n ...", "def num_im_data(self):\n return len(self.rec_im_data)", "def itkImageBase3_GetImageDimension() -> \"unsigned int\":\n return _itkImagePython.itkImageBase3_GetImageDimension()", "def GetNumberOfDimensions(self) -> \"unsigned int\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_GetNumberOfDimensions(self)", "def itkImageBase4_GetImageDimension() -> \"unsigned int\":\n return _itkImagePython.itkImageBase4_GetImageDimension()", "def getNumpyArraySize(image):\n return image.shape", "def get_num_batches(self, dataset: Dataset) -> int:\n raise NotImplementedError", "def get_num_data_items(dataset_directory):\n num_data_items = 0\n for filename in os.listdir(os.path.join(dataset_directory, \"non-shellcode\")):\n name, extension = os.path.splitext(filename)\n if extension == \".bin\":\n num_data_items += 1\n for filename in os.listdir(os.path.join(dataset_directory, \"shellcode\")):\n name, extension = os.path.splitext(filename)\n if extension == \".bin\":\n num_data_items += 1\n return num_data_items", "def itkImageBase2_GetImageDimension() -> \"unsigned int\":\n return _itkImagePython.itkImageBase2_GetImageDimension()", "def num_samples(self, split: str) -> int:\n raise NotImplementedError", "def __len__(self):\n length = int(np.ceil(len(self.samples) / float(self.batch_size)))\n return length", "def count_data(self):\n num_data = 0\n for cur_file_name in self.file_names:\n cur_file_features, cur_file_labels = self.load_data(cur_file_name)\n num_data += self.get_num_samples( cur_file_features )\n return num_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all valid lengths
def getLengths(self): return [self.length]
[ "def lengths(self):\n return self._lengths.__copy__()", "def determine_lengths(dtrajs):\n if (isinstance(dtrajs[0],(int))):\n return len(dtrajs)*np.ones((1))\n lengths = np.zeros((len(dtrajs)))\n for i in range(len(dtrajs)):\n lengths[i] = len(dtrajs[i])\n return lengths", "def generate_random_lengths(self):\n random_lengths = []\n amount = self.spec.num_segments\n while amount > 0:\n random_lengths.append((random.uniform(self.spec.min_lengths[0], self.spec.max_lengths[1])))\n amount -= 1\n return tuple(random_lengths)", "def get_lengths(timestep):\n t = np.arange(-early_buffer, late_buffer, timestep)\n return len(t)", "def __len__(self):\n return sum(len(r) for r in self.ranges)", "def sequence_lengths(self):\n return self._sequence_lengths", "def barcode_lengths(self,lane=None):\n lengths = set()\n for barcode in self.barcodes(lane=lane):\n lengths.add(len(normalise_barcode(barcode)))\n return sorted(list(lengths))", "def infer_lengths_from_mask(mask):\n return mask.long().sum(1)", "def find_appropriate_length(self, data):\n total_length = data.shape[0]\n for i in range(total_length):\n trunc_data = data[i:, :]\n total_missing_values = np.isnan(trunc_data).sum(axis=0).sum()\n missing_percentage = float(total_missing_values)/(trunc_data.shape[0]*trunc_data.shape[1])\n print \"missing percentage is %f\" % missing_percentage\n if missing_percentage <= 0.15:\n print \"it's good length\"\n break\n return trunc_data", "def lengthary(self):\n I = self.linmax\n\n while 1 <= I:\n if not (self.Linary[I] == \" \"):\n return I\n I -= 1\n return 0", "def get_display_lengths(self):\n self.d_lengths = [50, 20, 20, 16, 16]\n if self.paths:\n self.d_lengths[0] = max([len(i) for i in self.paths.keys()])\n self.d_lengths.append(sum(self.d_lengths) + 2)", "def __len__(self):\n return len(self.bed_vals)", "def valid_length(self, length):\n if self.resample:\n length *= 2\n\n for _ in range(self.depth):\n length = math.ceil((length - self.kernel_size) / self.stride) + 1\n length = max(1, length)\n\n for idx in range(self.depth):\n length = (length - 1) * self.stride + self.kernel_size\n\n if self.resample:\n length = math.ceil(length / 2)\n return int(length)", "def test_length(self):\n\n self.assertEqual(self.tr.getLength(), 2)\n self.tr.setValue(0, 0.1, 0.2, 0.3)\n self.tr.setLength(3)\n self.assertEqual(self.tr.getLength(), 3)\n # Changing the length reset LUT values to identity.\n r, g, b = self.tr.getValue(0)\n self.assertEqual([r, g, b], [0, 0, 0])", "def valid_length(self, length):\r\n length = math.ceil(length * self.resample)\r\n for _ in range(self.depth):\r\n length = math.ceil((length - self.kernel_size) / self.stride) + 1\r\n length = max(length, 1)\r\n for _ in range(self.depth):\r\n length = (length - 1) * self.stride + self.kernel_size\r\n length = int(math.ceil(length / self.resample))\r\n return int(length)", "def __len__(self):\n return(len(self.nucleotides))", "def _ragged_size(table: ArrayLike) -> list:\n\n R, G, B = tsplit(table)\n\n R_len = R.shape[-1] - np.sum(np.isnan(R))\n G_len = G.shape[-1] - np.sum(np.isnan(G))\n B_len = B.shape[-1] - np.sum(np.isnan(B))\n\n return [R_len, G_len, B_len]", "def get_length_data_block_s(self):\r\n return self.length_data_block_s", "def test_00_lengths_behave_correctly(self):\n\t\trcl = regular.VOCAB['literal']\n\t\tspace = rcl()\n\t\tdel_ = rcl()\n\t\tnumber = regular.VOCAB['number']\n\t\trng = regular.VOCAB['range'](space, del_) # The ascii printing characters :)\n\t\tchar = regular.VOCAB[\"cls\"](rng)\n\t\tpair = regular.VOCAB['sequence'](char, char) # Two of them in a row\n\t\tthree = number()\n\t\tfour = number()\n\t\tnumbers = {three:3, four:4, space:32, del_:127}\n\t\trc = regular.RemoveCounts(numbers)\n\t\tsizer = regular.RuleAnalyzer({})\n\t\tfor regex, expected_size in [\n\t\t\t(char, 1),\n\t\t\t(pair, 2),\n\t\t\t(regular.VOCAB['alternation'](char, char), 1),\n\t\t\t(regular.VOCAB['alternation'](pair, pair), 2),\n\t\t\t(regular.VOCAB['alternation'](char, pair), None),\n\t\t\t(regular.VOCAB['hook'](char), None),\n\t\t\t(regular.VOCAB['star'](char), None),\n\t\t\t(regular.VOCAB['plus'](char), None),\n\t\t\t(regular.VOCAB['n_times'](char, four), 4),\n\t\t\t(regular.VOCAB['n_times'](pair, four), 8),\n\t\t\t(regular.VOCAB['n_to_m'](pair, three, four), None),\n\t\t]:\n\t\t\twith self.subTest(regex=regex):\n\t\t\t\tself.assertEqual(sizer(rc(regex)), expected_size)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all decision byte positions
def getDecisionBytes(self): decBytes = self.hdr.getDecisionBytes()[0] decBytes = [byte - self.startPos for byte in decBytes] while len(decBytes) > 0 and decBytes[-1] >= self.length: decBytes.pop() while len(decBytes) > 0 and decBytes[0] < 0: decBytes.pop(0) return decBytes
[ "def _identify_all_possible_position(self):\n lign = 1\n index_number = 1\n while lign < 16:\n column = 1\n while column < 16:\n self.all_position.append(index_number)\n column += 1\n index_number += 1\n index_number += 85\n lign += 1", "def _getPtychographyPositions(self) -> np.ndarray:\n\n p1 = self._scan_params.scan_area_buffer_npix\n p2 = self._probe_params.npix - p1 - self._obj_params.obj_w_border_npix\n positions_x = np.arange(p1, p2, self._scan_params.scan_step_npix)\n positions = []\n\n for r in positions_x:\n for c in positions_x:\n positions.append([r,c])\n return np.array(positions)", "def _getbytepos(self) -> int:\n if self._pos % 8:\n raise ByteAlignError(\"Not byte aligned when using bytepos property.\")\n return self._pos // 8", "def encode_c_positions(seq):\n indexes = \"\"\n prev_index = 0\n index = seq.find(\"C\",prev_index)\n offset = index + 34\n while True:\n if index < 0:\n break\n while offset > 255:\n indexes += chr(255)\n offset -= 255\n indexes += chr(offset)\n\n prev_index = index + 1\n index = seq.find(\"C\",prev_index)\n offset = index - prev_index + 34\n return indexes", "def locOfZeroes(self):\n locs = []\n for i in range(4):\n for j in range(4):\n if self.v[i][j] == 0:\n locs.append([i,j])\n return locs", "def __find_control_qubits(self):\r\n if self.n_control == 1:\r\n return np.array([0])\r\n else:\r\n control_qubit_indices = np.arange(self.n_control)\r\n\r\n # Exclude the item of self.empty_qw because it's the number of empty\r\n # lines between the last control and first target qubit\r\n control_qubit_indices[1:] = control_qubit_indices[1:] + self.empty_qw[:-1]\r\n\r\n return control_qubit_indices", "def get_ctcf_indices(self):\n\n \"gets CTCF positions\"\n ctcf_ob = TFChip(cfg, chr)\n data = ctcf_ob.get_ctcf_data()\n data = data.filter(['start'], axis=1)\n\n \"converts to cumulative indices\"\n cum_pos = get_cumpos(self.cfg, self.chr)\n data[\"start\"] = data[\"start\"] + cum_pos\n indices = np.array(data[\"start\"])\n return indices", "def positions(self):\n return get_positions(as_numpy=True).reshape((self.natom, 3))", "def listof_positions(self):\n l = []\n for rnum in range(self.rnum_min, self.rnum_max+1):\n if rnum in self._pieces:\n l.append(rnum)\n return l", "def get_bytes(self):\n raw = [self.frame_id] + [self.get_control()] + self.payload + self.checksum()\n self.stuffed = [self.HEADER_BYTE, self.HEADER_BYTE, self.HEADER_BYTE]\n\n count = 0\n\n for i in raw:\n self.stuffed.append(i)\n if i == self.HEADER_BYTE:\n count += 1\n if count == 2:\n self.stuffed.append(self.STUFF_BYTE)\n count = 0\n else:\n count = 0\n\n self.stuffed.append(self.EOF_BYTE)\n\n return self.stuffed", "def getFaceIndices(self) -> \"int const *\":\n return _coin.SoGlyph_getFaceIndices(self)", "def getCprMask(self) -> List[int]:\n ...", "def bytes(self):\n \n # Remove all spaces from the contents of the hex view.\n contents = self.hex_view()\n \n # Every two hex digits represents a single byte.\n byte_values = [-1 if contents[i:i+2] == b'??' \n else int(contents[i:i+2], 16) \n for i in range(0, len(contents), 2)]\n\n # The first four bytes of every 20 bytes contains an address, which\n # are not useful for analysis.\n byte_values = [byte_values[i] for i in range(len(byte_values))\n if i % 20 >= 4]\n \n return byte_values", "def _freePos(self):\n res = []\n for i, row in enumerate(self.mazeTable):\n for j, p in enumerate(row):\n if p == False:\n res.append((i, j))\n return res", "def _getOffsets(self, mapped, magicbytes, start = None):\n if start is None:\n start = 0\n else:\n start -= len(magicbytes)\n\n start = mapped.find(magicbytes, start)\n while True:\n end = mapped.find(magicbytes, start + len(magicbytes))\n if end == -1:\n yield (start + len(magicbytes), mapped.size())\n raise StopIteration\n\n yield (start + len(magicbytes), end)\n start = end", "def get_available_positions(self):\n available_positions = []\n for y, line in enumerate(self.lines_list):\n for x, char in enumerate(line):\n if char == \" \":\n available_positions.append((x, y))\n return available_positions", "def getTexIndices(self) -> \"int32_t const *\":\n return _coin.SoConvexDataCache_getTexIndices(self)", "def label_vector(self):\n labels = np.array(self.label.get_fdata().flatten(), dtype=int)\n labels = np.array(labels == 47, dtype=int) + np.array(labels == 48, dtype=int)\n\n return labels", "def getOperandIndices(self) -> List[int]:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all extract byte positions
def getExtractBytes(self): extBytes = self.hdr.getExtractBytes() extBytes = [byte - self.startPos for byte in extBytes] while len(extBytes) > 0 and extBytes[-1] >= self.length: extBytes.pop() while len(extBytes) > 0 and extBytes[0] < 0: extBytes.pop(0) return extBytes
[ "def _getOffsets(self, mapped, magicbytes, start = None):\n if start is None:\n start = 0\n else:\n start -= len(magicbytes)\n\n start = mapped.find(magicbytes, start)\n while True:\n end = mapped.find(magicbytes, start + len(magicbytes))\n if end == -1:\n yield (start + len(magicbytes), mapped.size())\n raise StopIteration\n\n yield (start + len(magicbytes), end)\n start = end", "def _index_ek80(self):\n ind2 = []\n with open(self.fn ,'rb') as bin_file:\n #bin_file.seek(7)\n position = bin_file.tell()\n \n raw = bin_file.read(self.BLOCK_SIZE)\n while len(raw) > 4:\n \n for match in re.finditer(b'NME0|XML0|RAW3|TAG0|self.mru0|FIL1', raw):\n \n if match:\n if match.span()[0] >= 4:\n l = unpack('i', raw[match.span()[0]-self.LENGTH_SIZE : match.span()[0]])[0]\n ind2.append([match.group(), position + match.span()[0],l])\n else:\n bin_file.seek(position + self.BLOCK_SIZE - 4)\n position = bin_file.tell()\n # Read the next block for regex search\n bin_file.seek(position - 4)\n position = position - 4\n raw = bin_file.read(self.BLOCK_SIZE)\n idx = pd.DataFrame(ind2, columns=['datagram','start','length'])\n return(idx)", "def _get_position_frames(self):\n \n f = open( self.filename)\n f.seek(self._first_line) # start reading after header\n lines_per_frame = self.n_chunks + 1\n offsets = []\n counter = 0\n \n line = True\n while line:\n if not counter % lines_per_frame:\n offsets.append(int(f.tell()))\n line = f.readline()\n counter += 1\n array_offsets = offsets[:-1] # last is EOF\n self.offsets = np.array(array_offsets, dtype = int)\n \n f.close()", "def _getPtychographyPositions(self) -> np.ndarray:\n\n p1 = self._scan_params.scan_area_buffer_npix\n p2 = self._probe_params.npix - p1 - self._obj_params.obj_w_border_npix\n positions_x = np.arange(p1, p2, self._scan_params.scan_step_npix)\n positions = []\n\n for r in positions_x:\n for c in positions_x:\n positions.append([r,c])\n return np.array(positions)", "def get_pos(fai, chromosome, start, end):\n chrom = fai.records[chromosome]\n fai_entry_length = chrom.length\n fai_entry_offset = chrom.offset\n fai_entry_line_length = chrom.line_length\n fai_entry_line_length_bytes = chrom.line_length_bytes\n seq_len = end - start\n line_ratio = fai_entry_line_length * (fai_entry_line_length_bytes - fai_entry_line_length)\n newlines_total = int(fai_entry_length / line_ratio)\n newlines_before = 0\n if start > 0:\n newlines_before = int(start / line_ratio)\n newlines_to_end = int(end / line_ratio)\n byte_len_seq = newlines_to_end - newlines_before + seq_len\n byte_start = fai_entry_offset + newlines_before + start\n byte_end = fai_entry_offset + newlines_total + fai_entry_length\n return byte_start, byte_end, byte_len_seq", "def _getbytepos(self) -> int:\n if self._pos % 8:\n raise ByteAlignError(\"Not byte aligned when using bytepos property.\")\n return self._pos // 8", "def _inspect_binary_data(self):\n\n found_pattern = {}\n\n for class_name, data in self.swf.binary_data.items():\n self.ml.debug(\"Inspecting {}'s binary data\".format(class_name))\n\n offset = data.find(self.pattern)\n\n if offset >= 0:\n found_pattern[class_name] = offset\n\n return found_pattern", "def determine_frame_positions(self):\n self.blob_file.seek(0, 0)\n with ignored(struct.error):\n while True:\n pointer_position = self.blob_file.tell()\n length = struct.unpack(\"<i\", self.blob_file.read(4))[0]\n self.blob_file.seek(length - 4, 1)\n self.frame_positions.append(pointer_position)\n self.blob_file.seek(0, 0)\n log.info(\"Found {0} frames.\".format(len(self.frame_positions)))", "def get_bytes(self):\n raw = [self.frame_id] + [self.get_control()] + self.payload + self.checksum()\n self.stuffed = [self.HEADER_BYTE, self.HEADER_BYTE, self.HEADER_BYTE]\n\n count = 0\n\n for i in raw:\n self.stuffed.append(i)\n if i == self.HEADER_BYTE:\n count += 1\n if count == 2:\n self.stuffed.append(self.STUFF_BYTE)\n count = 0\n else:\n count = 0\n\n self.stuffed.append(self.EOF_BYTE)\n\n return self.stuffed", "def bytes(self):\n \n # Remove all spaces from the contents of the hex view.\n contents = self.hex_view()\n \n # Every two hex digits represents a single byte.\n byte_values = [-1 if contents[i:i+2] == b'??' \n else int(contents[i:i+2], 16) \n for i in range(0, len(contents), 2)]\n\n # The first four bytes of every 20 bytes contains an address, which\n # are not useful for analysis.\n byte_values = [byte_values[i] for i in range(len(byte_values))\n if i % 20 >= 4]\n \n return byte_values", "def extract_from_packet(x, start=0, end=0):\n x = bytes(x)\n l = len(x)\n i = start\n body = []\n\n if end == 0:\n end = l\n\n while i < end:\n for j in range(16):\n if i + j < end:\n body.append(\"%02X\" % x[i + j])\n # print(\"%02X\" % x[i + j], end=\" \")\n i += 16\n return body", "def extract_coords(self, chunk):\n x0 = chunk['left']\n x1 = x0 + chunk['width']\n y0 = chunk['top']\n y1 = y0 + chunk['height']\n return Coords(x0, y0, x1, y1)", "def _identify_all_possible_position(self):\n lign = 1\n index_number = 1\n while lign < 16:\n column = 1\n while column < 16:\n self.all_position.append(index_number)\n column += 1\n index_number += 1\n index_number += 85\n lign += 1", "def encode_c_positions(seq):\n indexes = \"\"\n prev_index = 0\n index = seq.find(\"C\",prev_index)\n offset = index + 34\n while True:\n if index < 0:\n break\n while offset > 255:\n indexes += chr(255)\n offset -= 255\n indexes += chr(offset)\n\n prev_index = index + 1\n index = seq.find(\"C\",prev_index)\n offset = index - prev_index + 34\n return indexes", "def find(self, str):\n return [m.start(0) + self.preLength for m in\n self.re.finditer(str)]", "def extract_blobs(msg):\n blobs = []\n i = msg.find(CRYPTO_MIRROR_HEADER, i)\n while i != -1:\n j = msg.find(CRYPTO_MIRROR_TRAILER, i)\n if (j != -1):\n if (j - i < BLOB_SIZE_LIMIT):\n blobs += msg[i:j]\n if len(blobs) > BLOB_LIMIT:\n break\n i = msg.find(CRYPTO_MIRROR_HEADER, i+1)\n return blobs", "def _get_buffer_ints(self, byte_buffer):\n\n int_buffer = []\n n = len(byte_buffer)\n\n # The byte_buffer should contain an even number of items.\n assert n % 2 == 0\n\n for i in range(0, n, 2):\n least_sig = ord(byte_buffer[i])\n most_sig = ord(byte_buffer[i+1])\n number = most_sig * 256 + least_sig\n int_buffer.append(number)\n\n return int_buffer", "def _get_positions(self):\n positions = ''\n for rotor in self.rotors:\n positions += rotor.get_position()\n return positions", "def pick_out42(s, clfs=classifiers):\n ind = []\n index = 0\n for fragment in [s[i:i + 42] for i in range(0, len(s), 42)]:\n if isb64(fragment, clfs=clfs):\n ind.append(index)\n index += 1\n return ind" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all fields within the header
def getFields(self): raise RuntimeError('This function needs vetting') return self.hdr.getLookupLengthFields()
[ "def _basicFields(self):\n headers = self.message.getHeaders(False, *self._HEADERS)\n\n # Number of octets total\n size = self.message.getSize()\n\n major, minor = self.main, self.subtype\n\n # content-type parameter list\n unquotedAttrs = self._unquotedAttrs()\n\n return [\n major, minor, unquotedAttrs,\n headers.get('content-id'),\n headers.get('content-description'),\n headers.get('content-transfer-encoding'),\n size,\n ]", "def getFields(sorted=True):", "def fieldnames(self):\n column_headers = []\n with open(self.data, 'r+') as csv_file:\n # make csv_reader and get first row from iterator with next()\n csv_reader = csv.reader(csv_file)\n header_row = next(csv_reader)\n for column_header in header_row:\n column_headers.append(column_header)\n return column_headers", "def get_header_names(self) -> List[str]:\n\t\theaders = []\n\t\tfor f in self._fields:\n\t\t\theaders.append(f.field_name)\n\t\treturn headers", "def fields(self):\n yield from self._field_list", "def readHeader(lines):\n while 1:\n # skip blank line at top\n hd = lines.next().strip()\n if hd: break\n fieldNames = hd.split('\\t')\n\n while 1:\n bal = lines.next().strip()\n if bal: break\n dummy, dt, a = bal.split()\n dt = isoDate(dt)\n a = amt(a)\n\n hd = lines.next().strip() # skip blank line\n if hd: raise IOError, \"expected blank line; got\" + hd\n \n return fieldNames, dt, a", "def headers(self):\n header_list = []\n for index in range(self.size):\n kw = self[index]\n header_list.append( kw.header )\n return header_list", "def getRawFields(sorted=True):", "def get_info_from_header(fits):\n hdr = pyfits.getheader(fits, 1)\n odate = hdr['DATE-OBS']\n obsid = hdr['OBS_ID']\n expo = hdr['EXPOSURE']\n expo = float(expo)\n atemp = re.split('-', odate)\n year = float(atemp[0])\n mon = float(atemp[1])\n fyear = year + mon/12\n det = hdr['DETNAM'].lower()\n ra_pnt = hdr['RA_PNT']\n ra_pnt = float(ra_pnt)\n dec_pnt= hdr['DEC_PNT']\n dec_pnt= float(dec_pnt)\n\n return [odate, obsid, expo, fyear, det, ra_pnt, dec_pnt]", "def print_data_headers(self):\n for header in self.header_dict.keys():\n print header", "def _validate_and_get_extra_fields(self, header_fields):\n if \"email\" not in header_fields:\n raise KeyError\n if \"name\" not in header_fields:\n raise KeyError\n\n extra_fields = header_fields\n extra_fields.remove(\"email\")\n extra_fields.remove(\"name\")\n return extra_fields", "def field_names(self):\n return self._data.keys()", "def get_header(self, lines):\n lines = copy(lines)\n original_lines = copy(lines)\n header = []\n\n finished = False\n while not finished:\n element = self.get_element(lines)['element']\n background = self.get_background(lines)['background']\n\n if element is None and background is None and len(lines) > 0:\n header.append(lines[0])\n lines = lines[1:]\n else:\n finished = True\n\n return {\n 'header': header,\n 'remaining': lines,\n 'raw_input': original_lines,\n }", "def _get_data_headers(self):\n only_args = ['form_data_headers'] + self.only_args\n # Normal RDMBs\n try:\n qs = self.queryset.only(*only_args)\n qs = qs.distinct('form_data_headers')\n qs = [obj.form_data_headers for obj in qs]\n\n # Engines like SQLite\n except NotImplementedError:\n qs = self.queryset.only(*only_args)\n qs = [obj.form_data_headers for obj in qs]\n qs = list(set(qs))\n\n data_headers = {}\n for q in qs:\n try:\n headers = json.loads(q)\n data_headers.update(headers)\n except (ValueError, TypeError):\n pass\n\n return data_headers", "def list_headers():\n return (\n 'Name',\n 'Port',\n 'Scheme',\n 'PID',\n 'Created',\n )", "def read_header(fits_file):\n\n head = {}\n F = pf.open(fits_file)\n H = F[0].header\n head['Ntot'] = H['N_TOT']\n head['Nmu'] = H['N_MU']\n head['Nsig'] = H['N_SIGMA']\n head['Nv'] = H['N_VOIGT']\n head['Ncoef'] = H['N_COEF']\n head['Nspa'] = H['N_SPARSE']\n head['mu'] = [H['MU1'], H['MU2']]\n head['sig'] = [H['SIGMA1'], H['SIGMA2']]\n head['z'] = F[1].data.field('redshift')\n F.close()\n return head", "def getLinesHeader(self):\n return self.__header.getLines()", "def __readHeaders(self, fh):\n fh.readline()\n fh.readline()\n \n headersStr = fh.readline()\n headers = [ s.strip() for s in headersStr[1:].split() ]\n unitsStr = fh.readline()\n units = [ s.strip() for s in unitsStr[1:].split() ]\n \n fh.readline()\n \n headers.pop(1)\n units[0] = 'mjd'\n units[1] = 'seconds'\n\n self.startDate = self.__getStartDate(fh)\n\n # Get a mapping of header names to column index\n headerDict = dict(list(zip(headers,list(range(len(headers))))))\n return (headerDict, units)", "def create_header_row(self):\n if not self._has_fields:\n raise AssertionError(\"No data fields have been added to the data row. Header row would be empty.\")\n else:\n header_list = [f.header for f in self.fields]\n return header_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
App factory to create website
def create_site(): if os.path.exists(instfolder): app = Flask('threecolor', instance_path=instfolder, instance_relative_config=True) # configure flask app from default settings, then overide with settings.cfg app.config.from_object('threecolor.configs.default_settings') app.config.from_pyfile('settings.cfg') # configure paths and folders according to instance path app.config['FLATPAGES_ROOT'] = os.path.join(app.instance_path, 'content') app.config['IMAGE_DIR'] = os.path.join(app.instance_path, 'images') app.config['FREEZER_DESTINATION'] = os.path.join(app.instance_path, app.config['BUILD_DIR']) from .site.coolviews import site, pages, freezer app.register_blueprint(site) pages.init_app(app) freezer.init_app(app) return app else: # app = Flask('threecolor') # # # configure flask app from default settings, then overide with settings.cfg # app.config.from_object('threecolor.configs.default_settings') misc.make_home(APP_ROOT) return app
[ "def software_factory(app, company_factory):\n pass", "def app(par=None):\n\n return Miniweb.get_instance(par)", "def make_site():\n\n def site(name=\"site1\", devices=None):\n \"\"\"Provide an instance of a Site model.\"\"\"\n if not devices:\n devices = []\n return Site(name=name, devices=devices)\n\n return site", "def getFactory(self):\n logPath = self.httpLog and self.httpLog.path\n appStore = getAppStore(self.store)\n return AxiomSite(appStore, IResource(appStore), logPath=logPath)", "def create_html_app(): # pragma: no cover\n app = webapp2.WSGIApplication(\n handlers.get_frontend_routes(), debug=utils.is_local_dev_server())\n gae_ts_mon.initialize(app, cron_module='backend')\n return app", "def build_app():\n app.route(\"/hello\")(_hello)\n app.route(\"/hostname\")(_hostname)\n return app", "def app():\n return app", "def app():\n app = create_app(TestConfig)\n return app", "def setup(self):\n self.site = SiteFactory(is_default_site=True)", "def create_app(models):\n app = Flask(__name__)\n # register models\n for model in models:\n app.add_url_rule('/{}/predict/'.format(model.name),\n '{}_predict'.format(model.name),\n predict_view(model), methods=['POST'])\n if model.probability:\n app.add_url_rule(\n '/{}/predict_proba/'.format(model.name),\n '{}_predict_proba'.format(model.name),\n predict_view(model, predict_method='predict_proba'),\n methods=['POST'])\n return app", "def create_web_server(app, section: str = \"web\", config: typing.Optional[dict] = None) -> aiohttp.web.UrlDispatcher:\n\tapp.add_module(Module)\n\twebsvc = app.get_service(\"asab.WebService\")\n\tcontainer = WebContainer(websvc, section, config=config)\n\treturn container.WebApp.router", "def create_app():\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_object(config[SELECTED_CONFIG])\n db.init_app(app)\n app.register_blueprint(recipes)\n\n ma.init_app(app)\n Bootstrap(app)\n\n app.before_request(create_before_request(app))\n return app", "def app_factory(klass):\n pychron_plugin = PychronTasksPlugin()\n\n plugins = [\n CorePlugin(),\n myTasksPlugin(),\n pychron_plugin,\n LoggerPlugin(),\n UsersPlugin(),\n ]\n\n plugins.extend(get_hardware_plugins())\n plugins.extend(get_user_plugins())\n\n app = klass(plugins=plugins)\n\n # set key bindings\n update_key_bindings(pychron_plugin.actions)\n\n return app", "def web():\n from mephisto.client.full.server import app\n\n app.run(debug=False)", "def app():\n\n application = create_app()\n application.test_client_class = JSON_Client\n application.response_class = Load_JSON_Response\n return application", "def gen_user_app(user_app_name):\n\n doc = '''\n\"\"\"\n%s app.\n\"\"\"\nimport sys\nsys.dont_write_bytecode = True\nimport %s.urls\nfrom %s.urls import URLS\n\n__all__ = ['URLS']\n\nif __name__ == '__main__':\n pass\n''' % (user_app_name, user_app_name, user_app_name)\n\n return doc", "def webResourceFactory(staticfiles, datafiles=None):\n\n class Root(resource.Resource):\n\n def __init__(self, wsgi_resource):\n resource.Resource.__init__(self)\n self.wsgi_resource = wsgi_resource\n\n def getChild(self, path, request):\n path0 = request.prepath.pop(0)\n request.postpath.insert(0, path0)\n return self.wsgi_resource\n\n\n # The kernel server does not require django, so this step is not\n # required for every import of service.py (this file)\n from django.core.handlers.wsgi import WSGIHandler\n from twisted.python import threadpool\n\n pool = threadpool.ThreadPool()\n reactor.callWhenRunning(pool.start)\n reactor.addSystemEventTrigger('after', 'shutdown', pool.stop)\n\n django_wsgi_resource = wsgi.WSGIResource(reactor, pool, WSGIHandler())\n resource_root = Root(django_wsgi_resource)\n\n static_resource = static.File(staticfiles)\n\n\n backend_bus = backend.BackendBus()\n\n resource_root.putChild(\"asyncnotebook\", backend.EngineBusAdapter(backend_bus))\n resource_root.putChild(\"static\", static_resource)\n \n if datafiles: \n data_resource = static.File(datafiles)\n resource_root.putChild(\"data\", data_resource)\n \n return resource_root", "def getApplication():", "def app_new(input_params={}, always_retry=False, **kwargs):\n return DXHTTPRequest('/app/new', input_params, always_retry=always_retry, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether the node is terminal. If there is no left node and no right node, it's a terminal node. If either is nonNone, it is a parent to something.
def is_terminal(self): return self.left is None and self.right is None
[ "def is_terminal(self):\n return isinstance(self, Terminal)", "def is_right_child(self):\n return self.node_type() == -1", "def is_terminal(item):\n return hasattr(item, '__hash__') and not isinstance(item, LexNonterminal)", "def is_right_child(self):\n return self.parent and self.parent.right_child == self", "def is_root(self,node) :\n if node.parent is None:\n return True\n else :\n return False", "def is_rightmost(self) -> bool:\n if self.parent is None: return True\n return self.parent.children[-1] is self", "def is_left_child(self):\n return self.node_type() == 1", "def is_right_hand_side(self):\n rh_node = tree.Node(\"\", self._state.RIGHTHANDSIDE)\n self._expected_state.append(self._state.RIGHTHANDSIDE)\n\n valid, result = self.is_epsilon()\n if(valid):\n rh_node.add_child(result)\n self.next_word() #epsilon consumes input in right hand side\n self._expected_state.pop()\n return (True, rh_node)\n valid, result = self.is_symbol_list()\n if (valid) or (self.word[\"type\"] == self.TOKENS.EPSILON):\n rh_node.add_child(result)\n self._expected_state.pop()\n return (True, rh_node)\n return self.fail()", "def has_right_child(self):\n return self.right_child() is not None", "def _findTerminalNodeOf(self, word):\n\n lastNode = self._findLastNodeOf(word)\n if lastNode:\n if lastNode.isEnd:\n return lastNode\n return None", "def IsNode(self, *args):\n return _snap.TNEANet_IsNode(self, *args)", "def IsNode(self, *args):\n return _snap.TBPGraph_IsNode(self, *args)", "def has_both_child(self):\n return self.has_left_child() and self.has_right_child()", "def IsNode(self, *args):\n return _snap.TUNGraph_IsNode(self, *args)", "def is_left_child(self):\n return self.parent and self.parent.left_child == self", "def IsNode(self, *args):\n return _snap.PUNGraph_IsNode(self, *args)", "def IsNode(self, *args):\n return _snap.PNGraph_IsNode(self, *args)", "def IsNode(self, *args):\n return _snap.TNGraph_IsNode(self, *args)", "def IsNode(self, *args):\n return _snap.PNEANet_IsNode(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates multiple clones according to config using a `model_fn`. The returned values of `model_fn(args, kwargs)` are collected along with the scope and device used to created it in a namedtuple `Clone(outputs, scope, device)`
def create_clones(config, model_fn, args=None, kwargs=None): clones = [] args = args or [] kwargs = kwargs or {} with slim.arg_scope([slim.model_variable, slim.variable], device=config.variables_device()): # Create clones. for i in range(0, config.num_clones): with tf.name_scope(config.clone_scope(i)) as clone_scope: clone_device = config.clone_device(i) with tf.device(clone_device): with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope(), reuse=True if i > 0 else None): outputs = model_fn(*args, **kwargs) clones.append(Clone(outputs, clone_scope, clone_device)) return clones
[ "def deploy(config,\n model_fn,\n args=None,\n kwargs=None,\n optimizer=None,\n summarize_gradients=False):\n # Gather initial summaries.\n summaries = set(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.SUMMARIES))\n\n # Create Clones.\n clones = create_clones(config, model_fn, args, kwargs)\n first_clone = clones[0]\n\n # Gather update_ops from the first clone. These contain, for example,\n # the updates for the batch_norm variables created by model_fn.\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS,\n first_clone.scope)\n\n train_op = None\n total_loss = None\n with tf.device(config.optimizer_device()):\n if optimizer:\n # Place the global step on the device storing the variables.\n with tf.device(config.variables_device()):\n global_step = tf.compat.v1.train.get_or_create_global_step()\n\n # Compute the gradients for the clones.\n total_loss, clones_gradients = optimize_clones(clones, optimizer)\n\n if clones_gradients:\n if summarize_gradients:\n # Add summaries to the gradients.\n summaries |= set(_add_gradients_summaries(clones_gradients))\n\n # Create gradient updates.\n grad_updates = optimizer.apply_gradients(clones_gradients,\n global_step=global_step)\n update_ops.append(grad_updates)\n\n update_op = tf.group(*update_ops)\n with tf.control_dependencies([update_op]):\n train_op = tf.identity(total_loss, name='train_op')\n else:\n clones_losses = []\n regularization_losses = tf.get_collection(\n tf.GraphKeys.REGULARIZATION_LOSSES)\n for clone in clones:\n with tf.name_scope(clone.scope):\n clone_loss = _gather_clone_loss(clone, len(clones),\n regularization_losses)\n if clone_loss is not None:\n clones_losses.append(clone_loss)\n # Only use regularization_losses for the first clone\n regularization_losses = None\n if clones_losses:\n total_loss = tf.add_n(clones_losses, name='total_loss')\n\n # Add the summaries from the first clone. These contain the summaries\n # created by model_fn and either optimize_clones() or _gather_clone_loss().\n summaries |= set(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.SUMMARIES,\n first_clone.scope))\n\n if total_loss is not None:\n # Add total_loss to summary.\n summaries.add(tf.compat.v1.summary.scalar('total_loss', total_loss))\n\n if summaries:\n # Merge all summaries together.\n summary_op = tf.compat.v1.summary.merge(list(summaries),\n name='summary_op')\n else:\n summary_op = None\n\n return DeployedModel(train_op, summary_op, total_loss, clones)", "def __deepcopy__(self, memo):\n unique_name = datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n environment_model_copy = MultilayerNNEnvModel('env_copy_'+unique_name,\n self.sess,\n self.observation_space,\n self.action_space,\n self.learning_rate,\n self.env_model_save_path,\n False)\n env_model_weights = []\n for param in self.env_model_params:\n env_model_weights.append(param.eval(session = self.sess))\n environment_model_copy.set_env_model_weights(env_model_weights)\n return environment_model_copy", "def multi_gpu_model(model, gpus):\n if isinstance(gpus, (list, tuple)):\n num_gpus = len(gpus)\n target_gpu_ids = gpus\n else:\n num_gpus = gpus\n target_gpu_ids = range(num_gpus)\n\n def get_slice(data, i, parts):\n shape = tf.shape(data)\n batch_size = shape[:1]\n input_shape = shape[1:]\n step = batch_size // parts\n if i == num_gpus - 1:\n size = batch_size - step * i\n else:\n size = step\n size = tf.concat([size, input_shape], axis=0)\n stride = tf.concat([step, input_shape * 0], axis=0)\n start = stride * i\n return tf.slice(data, start, size)\n\n all_outputs = []\n for i in range(len(model.outputs)):\n all_outputs.append([])\n\n # Place a copy of the model on each GPU,\n # each getting a slice of the inputs.\n for i, gpu_id in enumerate(target_gpu_ids):\n with tf.device('/cpu:0'):\n inputs = []\n # Retrieve a slice of the input on the CPU\n for x in model.inputs:\n input_shape = tuple(x.get_shape().as_list())[1:]\n slice_i = tf.keras.layers.Lambda(\n get_slice, output_shape=input_shape, arguments={'i': i, 'parts': num_gpus})(x)\n inputs.append(slice_i)\n\n with tf.device('/gpu:%d' % gpu_id):\n with tf.name_scope('replica_%d' % gpu_id):\n # Apply model on slice (creating a model replica on the target device).\n outputs = model(inputs)\n if not isinstance(outputs, list):\n outputs = [outputs]\n\n # Save the outputs for merging back together later.\n for o in range(len(outputs)):\n all_outputs[o].append(outputs[o])\n\n # Merge outputs on CPU.\n with tf.device('/cpu:0'):\n merged = []\n for name, outputs in zip(model.output_names, all_outputs):\n merged.append(tf.keras.layers.concatenate(outputs, axis=0, name=name))\n return tf.keras.Model(model.inputs, merged)", "def multi_config():\n\n class MultiModel(TFModel):\n model_args = Config()\n\n @classmethod\n def default_config(cls):\n config = TFModel.default_config()\n config['body/block'] = {}\n return config\n\n @classmethod\n def initial_block(cls, inputs, name='initial_block', **kwargs):\n kwargs = cls.fill_params(name, **kwargs)\n cls.model_args['initial_block'] = kwargs\n return inputs\n\n @classmethod\n def body(cls, inputs, name='body', **kwargs):\n kwargs = cls.fill_params(name, **kwargs)\n cls.model_args['body'] = kwargs\n\n block_args = cls.pop('block', kwargs)\n block_args = {**kwargs, **block_args}\n\n input_1, _ = inputs\n inputs = cls.block(input_1, **block_args)\n\n return inputs\n\n @classmethod\n def block(cls, input_1, **kwargs):\n kwargs = cls.fill_params('body/block', **kwargs)\n cls.model_args['block'] = kwargs\n return input_1\n\n @classmethod\n def head(cls, inputs, name='head', **kwargs):\n inputs = super().head(inputs, name='head', **kwargs)\n kwargs = cls.fill_params(name, **kwargs)\n cls.model_args['head'] = kwargs\n return inputs\n\n config = {'inputs': {'images_1': {'shape': (10, 10, 3)},\n 'images_2': {'shape': (10, 10, 3)},\n 'labels': {'classes': 2}},\n 'initial_block/inputs': ['images_1', 'images_2'],\n 'head': {'layout': 'f', 'units': 2},\n 'loss': 'ce'}\n\n return MultiModel, config", "def create_env_model(self):\n \n observation_inputs = tf.placeholder(tf.float32, shape=(None, self.observation_dim), name = 'EnvModel_observation_input')\n obs_h1 = layers.Dense(units = 100, activation = tf.nn.relu, \n kernel_initializer = tf.initializers.truncated_normal)(observation_inputs)\n obs_h1 = layers.BatchNormalization()(obs_h1)\n obs_h1 = layers.Dropout(0.5)(obs_h1)\n \n action_inputs = tf.placeholder(tf.float32, shape=(None, self.action_dim), name = 'EnvModel_action_input')\n act_h1 = layers.Dense(units = 100, activation = tf.nn.relu, \n kernel_initializer = tf.initializers.truncated_normal)(action_inputs)\n act_h1 = layers.BatchNormalization()(act_h1)\n act_h1 = layers.Dropout(0.5)(act_h1)\n \n merged = tf.concat([obs_h1, act_h1], axis=1, name = 'EnvModel_merged_input')\n \n merged_h1 = layers.Dense(units = 100, activation = tf.nn.relu,\n kernel_initializer = tf.initializers.truncated_normal)(merged)\n merged_h1 = layers.BatchNormalization()(merged_h1)\n merged_h1 = layers.Dropout(0.5)(merged_h1)\n \n observation_output = layers.Dense(units = self.observation_dim, \n activation = tf.nn.relu,\n kernel_initializer = tf.initializers.truncated_normal)(merged_h1)\n \n reward_output = layers.Dense(units = 1,\n kernel_initializer = tf.initializers.truncated_normal)(merged_h1)\n \n return observation_inputs, action_inputs,\\\n observation_output, reward_output", "def multi_pvc_clone_factory(pvc_clone_factory, pod_factory):\n\n def factory(\n pvc_obj,\n status=constants.STATUS_BOUND,\n clone_name=None,\n storageclass=None,\n size=None,\n access_mode=None,\n volume_mode=None,\n wait_each=False,\n attach_pods=False,\n verify_data_integrity=False,\n file_name=None,\n ):\n \"\"\"\n Args:\n pvc_obj (list): List PVC object from which clone has to be created\n status (str): If provided then factory waits for cloned PVC to\n reach the desired state\n clone_name (str): Name to be provided for cloned PVC\n storageclass (str): storage class to be used for cloned PVC\n size (int): The requested size for the cloned PVC. This should\n be same as the size of parent PVC for a successful clone\n access_mode (str): This decides the access mode to be used for\n the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany\n volume_mode (str): Volume mode for PVC. This should match the\n volume mode of parent PVC\n wait_each(bool): True to wait for each PVC to be in status 'status'\n before creating next PVC, False otherwise\n attach_pods(bool): True if we want to attach PODs to the cloned PVCs, False otherwise.\n verify_data_integrity(bool): True if we want to verify data integrity by checking the existence and md5sum\n of file in the cloned PVC, False otherwise.\n file_name(str): The name of the file for which data integrity is to be checked.\n\n Returns:\n PVC: List PVC instance\n\n \"\"\"\n cloned_pvcs = []\n\n status_tmp = status if wait_each else \"\"\n\n log.info(\"Started creation of clones of the PVCs.\")\n for obj in pvc_obj:\n # Create clone\n clone_pvc_obj = pvc_clone_factory(\n pvc_obj=obj,\n clone_name=clone_name,\n storageclass=storageclass,\n size=size,\n access_mode=access_mode,\n volume_mode=volume_mode,\n status=status_tmp,\n )\n cloned_pvcs.append(clone_pvc_obj)\n\n if status and not wait_each:\n for cloned_pvc in cloned_pvcs:\n helpers.wait_for_resource_state(cloned_pvc, status)\n\n log.info(\"Successfully created clones of the PVCs.\")\n\n if attach_pods:\n # Attach PODs to cloned PVCs\n cloned_pod_objs = list()\n for cloned_pvc_obj in cloned_pvcs:\n if cloned_pvc_obj.get_pvc_vol_mode == constants.VOLUME_MODE_BLOCK:\n cloned_pod_objs.append(\n pod_factory(\n pvc=cloned_pvc_obj,\n raw_block_pv=True,\n status=constants.STATUS_RUNNING,\n pod_dict_path=constants.CSI_RBD_RAW_BLOCK_POD_YAML,\n )\n )\n else:\n cloned_pod_objs.append(\n pod_factory(pvc=cloned_pvc_obj, status=constants.STATUS_RUNNING)\n )\n\n # Verify that the fio exists and md5sum matches\n if verify_data_integrity:\n verify_data_integrity_for_multi_pvc_objs(\n cloned_pod_objs, pvc_obj, file_name\n )\n\n return cloned_pvcs, cloned_pod_objs\n\n return cloned_pvcs\n\n return factory", "def make_model_fn(arch_fn):\n def model_fn(features, labels, mode, params):\n learning_rate = params['learning_rate']\n is_batch_norm = params.get('is_batch_norm', False)\n\n loss, pred = arch_fn(features, labels, mode, params)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n\n if is_batch_norm:\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n else:\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n\n predictions = {'prob': pred}\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(labels, tf.math.greater_equal(pred, 0.5)),\n 'auc': tf.metrics.auc(labels, pred),\n }\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops)\n\n return model_fn", "def iterative_process_builder(\n model_fn: Callable[[], tff.learning.Model],\n ) -> tff.templates.IterativeProcess:\n\n logging.info('Trainable weights:')\n for weight in model_fn().weights.trainable:\n logging.info('name: %s shape: %s', weight.name, weight.shape)\n\n if FLAGS.uniform_weighting:\n client_weighting = tff.learning.ClientWeighting.UNIFORM\n elif FLAGS.task == 'shakespeare' or FLAGS.task == 'stackoverflow_nwp':\n\n def client_weighting(local_outputs):\n return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)\n else:\n client_weighting = None\n\n if FLAGS.noise_multiplier is None:\n if FLAGS.uniform_weighting:\n aggregation_factory = tff.aggregators.UnweightedMeanFactory()\n else:\n aggregation_factory = tff.aggregators.MeanFactory()\n if FLAGS.clip is not None:\n if FLAGS.clip <= 0:\n raise ValueError('clip must be positive if clipping is enabled.')\n if FLAGS.adaptive_clip_learning_rate is None:\n clip = FLAGS.clip\n else:\n if FLAGS.adaptive_clip_learning_rate <= 0:\n raise ValueError('adaptive_clip_learning_rate must be positive if '\n 'adaptive clipping is enabled.')\n clip = tff.aggregators.PrivateQuantileEstimationProcess.no_noise(\n initial_estimate=FLAGS.clip,\n target_quantile=FLAGS.target_unclipped_quantile,\n learning_rate=FLAGS.adaptive_clip_learning_rate)\n aggregation_factory = tff.aggregators.clipping_factory(\n clip, aggregation_factory)\n else:\n if not FLAGS.uniform_weighting:\n raise ValueError(\n 'Differential privacy is only implemented for uniform weighting.')\n if FLAGS.noise_multiplier <= 0:\n raise ValueError('noise_multiplier must be positive if DP is enabled.')\n if FLAGS.clip is None or FLAGS.clip <= 0:\n raise ValueError('clip must be positive if DP is enabled.')\n if FLAGS.adaptive_clip_learning_rate is None:\n aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(\n noise_multiplier=FLAGS.noise_multiplier,\n clients_per_round=FLAGS.clients_per_round,\n clip=FLAGS.clip)\n else:\n if FLAGS.adaptive_clip_learning_rate <= 0:\n raise ValueError('adaptive_clip_learning_rate must be positive if '\n 'adaptive clipping is enabled.')\n aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive(\n noise_multiplier=FLAGS.noise_multiplier,\n clients_per_round=FLAGS.clients_per_round,\n initial_l2_norm_clip=FLAGS.clip,\n target_unclipped_quantile=FLAGS.target_unclipped_quantile,\n learning_rate=FLAGS.adaptive_clip_learning_rate)\n\n #add stuff for compression\n if FLAGS.use_compression:\n # We create a `MeasuredProcess` for broadcast process and a\n # `MeasuredProcess` for aggregate process by providing the\n # `_broadcast_encoder_fn` and `_mean_encoder_fn` to corresponding utilities.\n # The fns are called once for each of the model weights created by\n # tff_model_fn, and return instances of appropriate encoders.\n encoded_broadcast_process = (\n tff.learning.framework.build_encoded_broadcast_process_from_model(\n tff_model_fn, example_broadcast_encoder_fn))\n encoded_mean_process = (\n tff.learning.framework.build_encoded_mean_process_from_model(\n tff_model_fn, example_mean_encoder_fn))\n else:\n encoded_broadcast_process = None\n encoded_mean_process = None\n\n return tff.learning.build_federated_averaging_process(\n model_fn=model_fn,\n server_optimizer_fn=server_optimizer_fn,\n client_weighting=client_weighting,\n client_optimizer_fn=client_optimizer_fn,\n model_update_aggregation_factory=aggregation_factory,\n aggregation_process=encoded_mean_process,\n broadcast_process=encoded_broadcast_proces)", "def clone(self, gpu=None):\n # print(\"\\u001b[31;1m|py|\\u001b[0m\\u001b[37m\", \"ModelInterface::\", inspect.currentframe().f_code.co_name)\n\n mi = ModelInterface(self.option_map)\n for key, model in self.models.items():\n mi.models[key] = model.clone(gpu=gpu)\n if key in self.optimizers:\n # Same parameters.\n mi.optimizers[key] = torch.optim.Adam(\n mi.models[key].parameters())\n new_optim = mi.optimizers[key]\n old_optim = self.optimizers[key]\n\n new_optim_params = new_optim.param_groups[0]\n old_optim_params = old_optim.param_groups[0]\n # Copy the parameters.\n for k in new_optim_params.keys():\n if k != \"params\":\n new_optim_params[k] = old_optim_params[k]\n # Copy the state\n '''\n new_optim.state = { }\n for k, v in old_optim.state.items():\n if isinstance(v, (int, float, str)):\n new_optim.state[k] = v\n else:\n new_optim.state[k] = v.clone()\n if gpu is not None:\n new_optim.state[k] = new_optim.state[k].cuda(gpu)\n '''\n return mi", "def _make_model(sources_shape, targets_shape):\n sources = tf.keras.Input(sources_shape, name='sources')\n targets = tf.keras.Input(targets_shape, name='targets')\n outputs = pairwise_distance_lib.PairwiseDistance(\n configs.DistanceConfig(\n distance_type=configs.DistanceType.KL_DIVERGENCE,\n reduction=tf.compat.v1.losses.Reduction.NONE,\n sum_over_axis=-1))(sources, targets)\n return tf.keras.Model(inputs=[sources, targets], outputs=outputs)", "def generate_ensemble_model_and_batch(\n self,\n ):\n batch_x = tf.constant([[1, 1], [0, 0]], dtype=tf.float32)\n models = []\n for k in range(2):\n model = tf.keras.layers.Dense(2, activation='softmax')\n model(batch_x)\n if k == 0:\n weights = np.array([[1, 3], [0, 0]], dtype=np.float32)\n else:\n weights = np.array([[2, 1], [0, 1]], dtype=np.float32)\n bias = np.zeros(2, dtype=np.float32)\n model.set_weights([weights, bias])\n models.append(model)\n return models, batch_x", "def __init__(self, env_make_fn, num_envs=32):\n self.num_envs = num_envs\n self.envs = [env_make_fn() for _ in range(self.num_envs)]\n self.num_actions = self.envs[0].action_space.n", "def model_fn(features, labels, mode, params, config):\n with tf.name_scope('params'):\n for name, value in six.iteritems(params):\n tf.add_to_collection('params', tf.constant(name=name, value=value))\n\n # The Estimator model_fn property always returns a wrapped \"public\"\n # model_fn. The public wrapper doesn't take \"params\", and passes the params\n # from the Estimator constructor into the internal model_fn. Therefore, it\n # only matters that we pass the params to the Estimator below.\n return estimator.model_fn(features, labels, mode, config)", "def build_target(self):\n for model_name in self.src_models:\n self.dst_models[model_name] = build_model(\n **self.dst_kwargs[model_name])", "def build_models(self, _, __) -> Tuple[Model, Model]:\n\n self.cfg.use_horizontal_flips = False\n self.cfg.use_vertical_flips = False\n self.cfg.rot_90 = False\n self.cfg.im_size = self.env.min_image_side_length or 600\n model_dir, model_path, latest_checkpoint_path = self.get_checkpoint_location()\n self.cfg.model_path = model_path\n\n if K.image_dim_ordering() == 'th':\n input_shape_img = (3, None, None)\n else:\n input_shape_img = (None, None, 3)\n\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(None, 4))\n\n # self.env.allow_base_layer_training = True\n\n # define the base network (resnet here, can be VGG, Inception, etc)\n # define the base network (resnet here, can be VGG, Inception, etc)\n print('Allow resnet training: ', not self.env.use_transfer_learning or self.env.use_transfer_learning and self.env.allow_base_layer_training)\n shared_layers = nn.nn_base(img_input,\n trainable=not self.env.use_transfer_learning or self.env.use_transfer_learning and self.env.allow_base_layer_training)\n # resnet = ResNet50(\n # include_top=False,\n # input_tensor=img_input,\n # input_shape=input_shape_img,\n #\n # )\n #\n # for layer in resnet.layers:\n # layer.trainable = False\n\n # shared_layers = resnet.outputs[0]\n\n # print(len(shared_layers))\n\n num_anchors = len(self.cfg.anchor_box_scales) * len(self.cfg.anchor_box_ratios)\n rpn = nn.rpn(shared_layers, num_anchors)\n\n # for layer in resnet.layers:\n # layer.name += '_1'\n # print(layer.name)\n\n classifier = nn.classifier(shared_layers, roi_input, self.cfg.num_rois, nb_classes=len(self._classes) + 1, trainable=True)\n\n model_rpn = Model(img_input, rpn[:2])\n model_classifier = Model([img_input, roi_input], classifier)\n\n # this is a model that holds both the RPN and the classifier, used to load/save weights for the models\n model_all = Model([img_input, roi_input], rpn[:2] + classifier)\n\n try:\n\n try:\n last_checkpoint = self.find_last()\n except:\n last_checkpoint = None\n\n if self.env.use_transfer_learning and not last_checkpoint:\n path = '/home/samuelblattner/.keras/models/' + 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n print('loading weights from {}'.format(path))\n model_rpn.load_weights(path, by_name=True)\n model_classifier.load_weights(path, by_name=True)\n\n else:\n print('loading weights from {}'.format(last_checkpoint))\n model_rpn.load_weights(last_checkpoint, by_name=True)\n model_classifier.load_weights(last_checkpoint, by_name=True)\n\n except Exception as e:\n print(e)\n print('Could not load pretrained model weights. Weights can be found in the keras application folder '\n 'https://github.com/fchollet/keras/tree/master/keras/applications')\n\n optimizer = Adam(lr=self.env.learning_rate, )\n optimizer_classifier = Adam(lr=self.env.learning_rate)\n\n model_rpn.compile(optimizer=optimizer,\n loss=[losses_fn.rpn_loss_cls(num_anchors), losses_fn.rpn_loss_regr(num_anchors)])\n model_classifier.compile(optimizer=optimizer_classifier,\n loss=[losses_fn.class_loss_cls, losses_fn.class_loss_regr(len(self._classes))],\n metrics={'dense_class_{}'.format(len(self._classes) + 1): 'accuracy'})\n model_all.compile(optimizer='sgd', loss='mae')\n\n model_all.summary()\n\n return (model_rpn, model_classifier, model_all), None", "def generate_model_configuration(args):\n\n model_config = {\n\n \"dataset_path\": args.dataset_config.output_folder, # Input dataset folder path.\n \"reaction_classes\": args.dataset_config.final_classes, # Final list of reaction classes.\n \"input_configs\": args.descriptor_config.model_training, # List of input configurations to train the model on.\n\n \"logs_folder\": args.model_config.logs_folder, # Path to the designated log folder.\n \"use_oversampling\": eval(args.model_config.use_oversampling), # Use SMOTE oversampling.\n \"random_seed\": args.model_config.random_seed, # Random seed used for reproducibility purposes.\n \"learning_rate\": args.model_config.learning_rate, # ADAM optimizer learning rate.\n \"max_epochs\": args.model_config.max_epochs, # Maximum number of epochs.\n \"batch_size\": args.model_config.batch_size, # Batch size.\n \"early_stopping\": args.model_config.early_stopping, # Number of epochs for early stopping detection.\n\n \"input_size\": args.model_config.input_layer[\"size\"], # Input layer size.\n \"output_size\": args.model_config.output_layer[\"size\"], # Output layer size.\n \"output_act_fcn\": args.model_config.output_layer[\"activation_fcn\"], # Output layer activation.\n\n \"hidden_types\": args.model_config.hidden_layers[args.model_config.fixed_model][\"types\"], # Hidden layer types.\n \"hidden_sizes\": args.model_config.hidden_layers[args.model_config.fixed_model][\"sizes\"], # Hidden layer sizes.\n # Hidden layer activation functions.\n \"hidden_act_fcns\": args.model_config.hidden_layers[args.model_config.fixed_model][\"activation_fcns\"],\n # Hidden layer dropout values.\n \"hidden_dropouts\": args.model_config.hidden_layers[args.model_config.fixed_model][\"dropouts\"]\n }\n\n return model_config", "def single_config():\n\n class SingleModel(TFModel):\n model_args = Config()\n\n @classmethod\n def default_config(cls):\n config = super().default_config()\n config['body/block'] = {}\n return config\n\n @classmethod\n def initial_block(cls, inputs, name='initial_block', **kwargs):\n kwargs = cls.fill_params(name, **kwargs)\n cls.model_args['initial_block'] = kwargs\n return inputs\n\n @classmethod\n def body(cls, inputs, name='body', **kwargs):\n kwargs = cls.fill_params(name, **kwargs)\n cls.model_args['body'] = kwargs\n\n block_args = cls.pop('block', kwargs)\n block_args = {**kwargs, **block_args}\n inputs = cls.block(inputs, name='block', **block_args)\n return inputs\n\n @classmethod\n def block(cls, inputs, **kwargs):\n kwargs = cls.fill_params('body/block', **kwargs)\n cls.model_args['block'] = kwargs\n return inputs\n\n @classmethod\n def head(cls, inputs, name='head', **kwargs):\n inputs = super().head(inputs, **kwargs)\n kwargs = cls.fill_params(name, **kwargs)\n cls.model_args['head'] = kwargs\n return inputs\n\n config = {'inputs': {'images': {'shape': (10, 10, 3)},\n 'labels': {'classes': 2}},\n 'initial_block/inputs': 'images',\n 'head': {'layout': 'f', 'units': 2},\n 'loss': 'ce'}\n\n return SingleModel, config", "def build_multi_model_pipeline(feature_cols: list): #-> (list, Pipeline):\n\n assembler = VectorAssembler(inputCols=feature_cols, outputCol=\"features_index_assembler\")\n scaler = Normalizer(inputCol=assembler.getOutputCol(), outputCol=\"features\")\n\n lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0)\n lr_regParam = [0.01, 0.1, 0.3, 0.5]\n lr_elasticNetParam=[0, .5, 1]\n\n dt = DecisionTreeClassifier(maxDepth=3)\n dt_maxDepth = [3, 5]\n\n rf = RandomForestClassifier()\n\n gbt = GBTClassifier()\n gbt_maxDepth = [2, 4, 6]\n gbt_maxBins = [20, 60]\n gbt_maxIter = [10, 20]\n\n pipeline = Pipeline(stages = [])\n \n lr_stages = [assembler, scaler, lr]\n lr_paramgrid = ParamGridBuilder().baseOn({pipeline.stages:lr_stages}) \\\n .addGrid(lr.regParam, lr_regParam) \\\n .addGrid(lr.elasticNetParam, lr_elasticNetParam) \\\n .build()\n\n dt_stages = [assembler, scaler, dt]\n dt_paramgrid = ParamGridBuilder().baseOn({pipeline.stages:dt_stages}) \\\n .addGrid(dt.maxDepth, dt_maxDepth) \\\n .build()\n\n rf_stages = [assembler, scaler, rf]\n rf_paramgrid = ParamGridBuilder().baseOn({pipeline.stages: rf_stages}) \\\n .build()\n\n gbt_stages = [assembler, scaler, gbt] \n gbt_paramgrid = ParamGridBuilder().baseOn({pipeline.stages:gbt_stages}) \\\n .addGrid(gbt.maxDepth, gbt_maxDepth) \\\n .addGrid(gbt.maxBins, gbt_maxBins) \\\n .addGrid(gbt.maxIter, gbt_maxIter) \\\n .build()\n\n grid_loop = [lr_paramgrid, gbt_paramgrid, rf_paramgrid]\n # grid_loop = [lr_paramgrid, dt_paramgrid, rf_paramgrid, gbt_paramgrid]\n\n return grid_loop, pipeline", "def makeclone(in_fn, coords):\n # remove existing clone if present\n if os.path.exists('./clone.map'): os.remove('./clone.map')\n # open the raster\n rast = gdal.Open(in_fn)\n out_rows, out_columns, ul_x, ul_y = getRowsCols(rast, coords)\n # Make the clone with the following inputs\n # -s for not invoking the menu\n # -R nr of rows\n # -C nr of columns\n # -N data type Nominal\n # -P y coordinates increase bottom to top\n # -x x-coordinate of upper left corner\n # -y y-coordinate of upper left corner\n # -l cell length, set to 100 m (same as Corine)\n strings = ['mapattr -s', ' -R ' + str(out_rows), \\\n ' -C ' + str(out_columns), ' -N ', '-P yb2t', \\\n ' -x ' + str(coords[0]), ' -y ' + str(coords[3]), \\\n ' -l 100 clone.map'] \n command = \"\".join(strings)\n print(command)\n os.system(command)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gather the loss for a single clone.
def _gather_clone_loss(clone, num_clones, regularization_losses): # The return value. sum_loss = None # Individual components of the loss that will need summaries. clone_loss = None regularization_loss = None # Compute and aggregate losses on the clone device. with tf.device(clone.device): all_losses = [] clone_losses = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.LOSSES, clone.scope) if clone_losses: clone_loss = tf.add_n(clone_losses, name='clone_loss') if num_clones > 1: clone_loss = tf.div(clone_loss, 1.0 * num_clones, name='scaled_clone_loss') all_losses.append(clone_loss) if regularization_losses: regularization_loss = tf.add_n(regularization_losses, name='regularization_loss') all_losses.append(regularization_loss) if all_losses: sum_loss = tf.add_n(all_losses) # Add the summaries out of the clone device block. if clone_loss is not None: tf.compat.v1.summary.scalar('/'.join(filter(None, ['Losses', clone.scope, 'clone_loss'])), clone_loss) if regularization_loss is not None: tf.compat.v1.summary.scalar('Losses/regularization_loss', regularization_loss) return sum_loss
[ "def _optimize_clone(optimizer, clone, num_clones, regularization_losses,\n **kwargs):\n sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)\n clone_grad = None\n if sum_loss is not None:\n with tf.device(clone.device):\n clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)\n return sum_loss, clone_grad", "def _compute_copy_loss(self, batch, output, target, align, attns):\n scores = self.generator(\n self._bottle(output), self._bottle(attns[\"copy\"]), batch[\"src_map\"]\n )\n loss = self.criterion(scores, align, target).sum()\n\n return loss, scores", "def _CreateWeightLoss(self):\n self.AssertInitialized()\n with self._BlockScope():\n return [tf.nn.l2_loss(v) for v in self._variables]", "def loss(self, batch):\n err = self.td_err(\n batch.obs, batch.action, batch.reward, batch.bootstrap, batch.next_obs\n )\n loss = nn.functional.smooth_l1_loss(\n err, torch.zeros_like(err), reduction=\"none\"\n )\n priority = err.detach().abs().cpu()\n return loss, priority", "def _compute_loss(self, _block_id):\n raise NotImplementedError()", "def loss_criterion(self) -> torch.nn.Module:\n\n pass", "def loss(self, result, config=None):\r\n return result.get('loss', None)", "def scale_loss(self, loss):\n\n yield loss\n return", "def tf_loss_per_instance(self, states, internals, actions, terminal, reward, update):\n raise NotImplementedError", "def CreateWeightLoss(self):\n losses = list(itertools.chain(\n itertools.chain.from_iterable(\n t.CreateWeightLoss() for t in self._subblocks),\n self._CreateWeightLoss()))\n return losses", "def GetLoss(self, get_deriv=False, **kwargs):\n perf = deepnet_pb2.Metrics()\n perf.MergeFrom(self.proto.performance_stats)\n perf.count = self.batchsize\n tiny = self.tiny\n if self.loss_function == deepnet_pb2.Layer.CROSS_ENTROPY:\n temp2 = self.temp2\n temp = self.batchsize_temp\n batchsize = self.batchsize\n dimensions = self.dimensions\n numlabels = self.numlabels\n state = self.state\n data = self.data\n unitcell = self.unitcell\n indices = self.indices\n\n # Optimized for space to handle large number of labels in a softmax.\n data.reshape((1, batchsize * dimensions))\n data.add(self.rowshift, target=indices)\n state.reshape((numlabels, dimensions * batchsize))\n state.max(axis=0, target=temp2)\n state.reshape((1, batchsize * numlabels * dimensions))\n state.select_columns(indices, temp)\n temp2.subtract(temp)\n temp2.sign(target=temp2)\n temp2.sum(axis=1, target=unitcell)\n correct_preds = batchsize - unitcell.euclid_norm()\n if get_deriv:\n temp.subtract(1, target=temp2)\n state.set_selected_columns(indices, temp2)\n state.reshape((numlabels * dimensions, batchsize))\n self.deriv.assign(self.state)\n state.reshape((numlabels * dimensions, batchsize))\n temp.add(tiny)\n cm.log(temp)\n temp.sum(axis=1, target=unitcell)\n cross_entropy = unitcell.euclid_norm()\n perf.cross_entropy = cross_entropy\n perf.correct_preds = correct_preds\n elif self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:\n self.expansion_matrix.select_columns(self.data, target=self.expanded_batch)\n self.state.subtract(self.expanded_batch, target=self.deriv)\n error = self.deriv.euclid_norm()**2\n perf.error = error\n else:\n raise Exception('Unknown loss function for Softmax units.')\n return perf", "def lossSummary(loss: _S2T) -> _S2T:\n return {\n \"loss/\" + v: loss[k].detach()\n for k, v in MultiTask.itemdic.items() if k in loss\n }", "def update_loss(self, loss):\n self.loss += loss\n self.num_loss_attempts += 1", "def multiTaskLoss(self, loss: _S2T) -> torch.Tensor:\n return sum(v * self.cmgr.get(\"task.\" + k, 1) for k, v in loss.items())", "def _compute_loss(self, model_output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n pass", "def compute_losses(self, inputs, outputs,save_error=False):\n losses = {}\n total_loss = 0\n losses['perceptional_loss'] = 0\n\n \n for scale in self.opt.scales:\n #scales=[0,1,2,3]\n loss = 0\n reprojection_losses = []\n perceptional_losses = []\n\n if self.opt.v1_multiscale:\n source_scale = scale\n else:\n source_scale = 0\n\n disp = outputs[(\"disp\", scale)]\n ##add feature map\n color = inputs[(\"color\", 0, scale)]\n target = inputs[(\"color\", 0, source_scale)]\n \n #adding feature_loss\n #for frame_id in self.opt.frame_ids[1:]:\n # src_f = outputs[(\"feature\", frame_id, 0)]\n # tgt_f = self.models[\"extractor\"](inputs[(\"color\", 0, 0)])[0]\n # perceptional_losses.append(self.compute_perceptional_loss(tgt_f, src_f))\n #perceptional_loss = torch.cat(perceptional_losses, 1)\n\n #min_perceptional_loss, outputs[(\"min_index\", scale)] = torch.min(perceptional_loss, dim=1)\n #losses[('min_perceptional_loss', scale)] = self.opt.perception_weight * min_perceptional_loss.mean() / len(self.opt.scales)\n \n #losses['perceptional_loss'] += losses[('min_perceptional_loss',scale)]\n\n # photometric_loss\n for frame_id in self.opt.frame_ids[1:]:\n pred = outputs[(\"color\", frame_id, scale)]\n reprojection_losses.append(self.compute_reprojection_loss(pred, target,save_error))\n\n reprojection_losses = torch.cat(reprojection_losses, 1)\n\n if not self.opt.disable_automasking:\n identity_reprojection_losses = []\n for frame_id in self.opt.frame_ids[1:]:\n pred = inputs[(\"color\", frame_id, source_scale)]\n identity_reprojection_losses.append(\n self.compute_reprojection_loss(pred, target,save_error))\n\n identity_reprojection_losses = torch.cat(identity_reprojection_losses, 1)\n\n if self.opt.avg_reprojection:\n identity_reprojection_loss = identity_reprojection_losses.mean(1, keepdim=True)\n else:\n # save both images, and do min all at once below\n identity_reprojection_loss = identity_reprojection_losses\n if self.opt.mask_plan in [1,2,3]:\n self.distance_constraint_automask = identity_reprojection_loss.min(1,keepdim=True)\n\n elif self.opt.predictive_mask:\n mask = outputs[\"predictive_mask\"][\"predictive_mask\", scale]\n if not self.opt.v1_multiscale:\n mask = F.interpolate(\n mask, [self.opt.height, self.opt.width],\n mode=\"bilinear\", align_corners=False)\n\n reprojection_losses *= mask\n #using distance_constraint_mask\n #elif self.opt.distance_constraint_mask:\n # add a loss pushing mask to 1 (using nn.BCELoss for stability)\n weighting_loss = 0.2 * nn.BCELoss()(mask, torch.ones(mask.shape).cuda()) if torch.cuda.is_available() else 0.2 * nn.BCELoss()(mask, torch.ones(mask.shape).cpu())\n loss += weighting_loss.mean()\n\n if self.opt.avg_reprojection:\n reprojection_loss = reprojection_losses.mean(1, keepdim=True)\n else:\n reprojection_loss = reprojection_losses\n\n if not self.opt.disable_automasking:\n # add random numbers to break ties\n #identity_reprojection_loss.shape).cuda() * 0.00001\n if torch.cuda.is_available():\n identity_reprojection_loss += torch.randn(identity_reprojection_loss.shape).cuda(1) * 0.00001 if self.opt.no_cuda else torch.randn(identity_reprojection_loss.shape).cuda() * 0.00001\n else:\n identity_reprojection_loss += torch.randn(identity_reprojection_loss.shape).cpu() * 0.00001\n combined = torch.cat((identity_reprojection_loss, reprojection_loss), dim=1)\n else:\n combined = reprojection_loss\n\n if combined.shape[1] == 1:\n to_optimise = combined\n else:\n to_optimise, idxs = torch.min(combined, dim=1)\n\n if not self.opt.disable_automasking:\n outputs[\"identity_selection/{}\".format(scale)] = (\n idxs > identity_reprojection_loss.shape[1] - 1).float()\n\n loss += to_optimise.mean()\n\n mean_disp = disp.mean(2, True).mean(3, True)\n norm_disp = disp / (mean_disp + 1e-7)\n smooth_loss = get_smooth_loss(norm_disp, color)\n\n loss += self.opt.disparity_smoothness * smooth_loss / (2 ** scale)\n total_loss += loss\n losses[\"loss/{}\".format(scale)] = loss\n\n total_loss /= self.num_scales\n #total_loss = (1 - self.opt.perception_weight) * total_loss + self.opt.perception_weight * losses['perceptional_loss']\n #total_loss = total_loss + self.opt.perception_weight * losses['perceptional_loss']\n if self.opt.flipping_loss == True:\n losses['symmetry_loss'] = self.opt.flipping_loss_weight *self.flipped_loss(inputs, outputs) \n total_loss = total_loss + self.opt.perception_weight * losses['perceptional_loss'] + losses['symmetry_loss']\n else:\n total_loss = total_loss + self.opt.perception_weight * losses['perceptional_loss']\n\n \n #using new architecture\n #if self.opt.add_neighboring_frames == 1:\n # depth_loss_sum = 0\n # depth_loss_weights_sum = 0\n # if self.opt.depth_multiscale:\n # for i in self.opt.scales:\n # #testing before\n # depth_mid = torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - \\\n # self.neighboring_depth[(\"depth_next\",i)]) / 2 if self.opt.respective_depth_constraint \\\n # else torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - \\\n # self.neighboring_depth[(\"depth_next\",i)]) / 2 + self.neighboring_depth[(\"depth_next\",i)]\n # ## L2 loss\n # #depth_loss = nn.MSELoss()(torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - outputs[(\"depth\",0,i)]), depth_mid) * self.depth_mask[0] + \\\n # # nn.MSELoss()(torch.abs(self.neighboring_depth[(\"depth_next\",i)] - outputs[(\"depth\",0,i)]), depth_mid)*self.depth_mask[1] if self.opt.respective_depth_constraint \\\n # # else nn.MSELoss()(depth_mid , outputs[(\"depth\",0,i)])\n # \n # depth_loss = torch.abs(torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - outputs[(\"depth\",0,i)]) - depth_mid) * self.depth_mask[0] + \\\n # torch.abs(torch.abs(self.neighboring_depth[(\"depth_next\",i)] - outputs[(\"depth\",0,i)]) - depth_mid)*self.depth_mask[1] if self.opt.respective_depth_constraint \\\n # else torch.abs(depth_mid - outputs[(\"depth\",0,i)])\n # #depth_loss = torch.abs(torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - outputs[(\"depth\",0,i)]) - depth_mid) + \\\n # # torch.abs(torch.abs(self.neighboring_depth[(\"depth_next\",i)] - outputs[(\"depth\",0,i)]) - depth_mid) if self.opt.respective_depth_constraint \\\n # # else torch.abs(depth_mid - outputs[(\"depth\",0,i)])\n # \n # if self.opt.distance_constraint_mask:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_mask).mean()\n # if not self.opt.disable_BCELoss:#when setting distance mask will doing this \n # depth_loss_weights = self.opt.distance_mask_weight* nn.BCELoss()\\\n # (self.distance_constraint_mask, \\\n # torch.ones(self.distance_constraint_mask.shape).cuda()) \\\n # if torch.cuda.is_available() \\\n # else \\\n # self.opt.distance_mask_weight * nn.BCELoss()\\\n # (self.distance_constraint_mask, \\\n # torch.ones(self.distance_constraint_mask.shape).cpu())\n # depth_loss_weights_sum += depth_loss_weights\n # if float(depth_loss_weights) == 0:\n # print(\"distance_mask is useless\")\n # else:\n # if self.opt.mask_plan == 0:\n # depth_lossing = (depth_loss * self.opt.depth_loss_weight).mean()\n # elif self.opt.mask_plan == 1:\n # depth_lossing = (depth_loss * self.distance_constraint_automask[0]).mean()\n # elif self.opt.mask_plan == 2:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_automask[0]).mean()\n # elif self.opt.mask_plan == 3:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_automask).mean()\n # depth_loss_sum += depth_lossing\n # else:\n # depth_mid = torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - \\\n # self.neighboring_depth[(\"depth_next\",0)]) / 2 if self.opt.respective_depth_constraint \\\n # else torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - \\\n # self.neighboring_depth[(\"depth_next\",0)]) / 2 + self.neighboring_depth[(\"depth_next\",0)]\n # for i in self.opt.scales:\n # ## L2 loss\n # #depth_loss = nn.MSELoss()(torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - outputs[(\"depth\",0,i)]), depth_mid) * self.depth_mask[0] + \\\n # # nn.MSELoss()(torch.abs(self.neighboring_depth[(\"depth_next\",0)] - outputs[(\"depth\",0,i)]), depth_mid)*self.depth_mask[1] if self.opt.respective_depth_constraint \\\n # # else nn.MSELoss()(depth_mid, outputs[(\"depth\",0,i)])\n # \n # depth_loss = torch.abs(torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - outputs[(\"depth\",0,i)]) - depth_mid) * self.depth_mask[0] + \\\n # torch.abs(torch.abs(self.neighboring_depth[(\"depth_next\",0)] - outputs[(\"depth\",0,i)]) - depth_mid)*self.depth_mask[1] if self.opt.respective_depth_constraint \\\n # else torch.abs(depth_mid - outputs[(\"depth\",0,i)])\n # #depth_loss = torch.abs(torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - outputs[(\"depth\",0,i)]) - depth_mid) + \\\n # # torch.abs(torch.abs(self.neighboring_depth[(\"depth_next\",0)] - outputs[(\"depth\",0,i)]) - depth_mid) if self.opt.respective_depth_constraint\\\n # # else torch.abs(depth_mid - outputs[(\"depth\",0,i)])\n # if self.opt.distance_constraint_mask:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_mask).mean()\n # if not self.opt.disable_BCELoss:\n # depth_loss_weights = self.opt.distance_mask_weight* nn.BCELoss()\\\n # (self.distance_constraint_mask, \\\n # torch.ones(self.distance_constraint_mask.shape).cuda()) \\\n # if torch.cuda.is_available() \\\n # else \\\n # self.opt.distance_mask_weight * nn.BCELoss()\\\n # (self.distance_constraint_mask, \\\n # torch.ones(self.distance_constraint_mask.shape).cpu())\n # depth_loss_weights_sum += depth_loss_weights\n # else:\n # if self.opt.mask_plan == 0:\n # depth_lossing = (depth_loss * self.opt.depth_loss_weight).mean()\n # elif self.opt.mask_plan == 1:\n # depth_lossing = (depth_loss * self.distance_constraint_automask[0]).mean()\n # elif self.opt.mask_plan == 2:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_automask[0]).mean()\n # elif self.opt.mask_plan == 3:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_automask).mean()\n # depth_loss_sum += depth_lossing\n # depth_loss_sum /= 4\n # if depth_loss_sum == 0:\n # print(\"depth_loss is useless\")\n # depth_loss_weights_sum /= 4\n # if self.opt.combined_loss == True:\n # total_loss = (1-self.opt.depth_loss_weight) * total_loss + depth_loss_sum + depth_loss_weights_sum\n # else:\n # total_loss += depth_loss_sum + depth_loss_weights_sum\n losses[\"loss\"] = total_loss\n return losses", "def calculate_loss(self, batch):\n cost = self._session.run(self.cost, feed_dict={self.X: batch})\n return cost", "def reg_loss(self):\n \n loss = 0.0\n \n for xlayer in self.layers.values():\n loss += xlayer.reg_loss()\n \n return loss", "def CreateWeightLoss():\n stack = _block_stacks[tf.get_default_graph()]\n if not stack:\n return []\n return stack[0].CreateWeightLoss()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute losses and gradients for a single clone.
def _optimize_clone(optimizer, clone, num_clones, regularization_losses, **kwargs): sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses) clone_grad = None if sum_loss is not None: with tf.device(clone.device): clone_grad = optimizer.compute_gradients(sum_loss, **kwargs) return sum_loss, clone_grad
[ "def _gather_clone_loss(clone, num_clones, regularization_losses):\n # The return value.\n sum_loss = None\n # Individual components of the loss that will need summaries.\n clone_loss = None\n regularization_loss = None\n # Compute and aggregate losses on the clone device.\n with tf.device(clone.device):\n all_losses = []\n clone_losses = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.LOSSES,\n clone.scope)\n if clone_losses:\n clone_loss = tf.add_n(clone_losses, name='clone_loss')\n if num_clones > 1:\n clone_loss = tf.div(clone_loss, 1.0 * num_clones,\n name='scaled_clone_loss')\n all_losses.append(clone_loss)\n if regularization_losses:\n regularization_loss = tf.add_n(regularization_losses,\n name='regularization_loss')\n all_losses.append(regularization_loss)\n if all_losses:\n sum_loss = tf.add_n(all_losses)\n # Add the summaries out of the clone device block.\n if clone_loss is not None:\n tf.compat.v1.summary.scalar('/'.join(filter(None,\n ['Losses', clone.scope, 'clone_loss'])),\n clone_loss)\n if regularization_loss is not None:\n tf.compat.v1.summary.scalar('Losses/regularization_loss',\n regularization_loss)\n return sum_loss", "def optimize_clones(clones, optimizer,\n regularization_losses=None,\n **kwargs):\n grads_and_vars = []\n clones_losses = []\n num_clones = len(clones)\n if regularization_losses is None:\n regularization_losses = tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)\n for clone in clones:\n with tf.name_scope(clone.scope):\n clone_loss, clone_grad = _optimize_clone(\n optimizer, clone, num_clones, regularization_losses, **kwargs)\n if clone_loss is not None:\n clones_losses.append(clone_loss)\n grads_and_vars.append(clone_grad)\n # Only use regularization_losses for the first clone\n regularization_losses = None\n # Compute the total_loss summing all the clones_losses.\n total_loss = tf.add_n(clones_losses, name='total_loss')\n # Sum the gradients across clones.\n grads_and_vars = _sum_clones_gradients(grads_and_vars)\n return total_loss, grads_and_vars", "def GetLoss(self, get_deriv=False, **kwargs):\n perf = deepnet_pb2.Metrics()\n perf.MergeFrom(self.proto.performance_stats)\n perf.count = self.batchsize\n tiny = self.tiny\n if self.loss_function == deepnet_pb2.Layer.CROSS_ENTROPY:\n temp2 = self.temp2\n temp = self.batchsize_temp\n batchsize = self.batchsize\n dimensions = self.dimensions\n numlabels = self.numlabels\n state = self.state\n data = self.data\n unitcell = self.unitcell\n indices = self.indices\n\n # Optimized for space to handle large number of labels in a softmax.\n data.reshape((1, batchsize * dimensions))\n data.add(self.rowshift, target=indices)\n state.reshape((numlabels, dimensions * batchsize))\n state.max(axis=0, target=temp2)\n state.reshape((1, batchsize * numlabels * dimensions))\n state.select_columns(indices, temp)\n temp2.subtract(temp)\n temp2.sign(target=temp2)\n temp2.sum(axis=1, target=unitcell)\n correct_preds = batchsize - unitcell.euclid_norm()\n if get_deriv:\n temp.subtract(1, target=temp2)\n state.set_selected_columns(indices, temp2)\n state.reshape((numlabels * dimensions, batchsize))\n self.deriv.assign(self.state)\n state.reshape((numlabels * dimensions, batchsize))\n temp.add(tiny)\n cm.log(temp)\n temp.sum(axis=1, target=unitcell)\n cross_entropy = unitcell.euclid_norm()\n perf.cross_entropy = cross_entropy\n perf.correct_preds = correct_preds\n elif self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:\n self.expansion_matrix.select_columns(self.data, target=self.expanded_batch)\n self.state.subtract(self.expanded_batch, target=self.deriv)\n error = self.deriv.euclid_norm()**2\n perf.error = error\n else:\n raise Exception('Unknown loss function for Softmax units.')\n return perf", "def calculate_loss(self, activations, labels):\n\n # get the regularisation for each layer in the model\n regularisation = 0.0\n for layer in self.layers:\n regularisation += layer.get_regularisation()\n\n loss, gradients = self.loss_function(activations, labels)\n return loss + regularisation, gradients", "def manually_compute_losses(numpy_inputs, inputs_placeholder, loss, num_workers,\n params):\n batch_size = params.batch_size * params.num_gpus\n assert numpy_inputs.shape[0] % (num_workers * batch_size) == 0\n l2_loss = tf.add_n([tf.nn.l2_loss(x) for x in tf.trainable_variables()])\n total_loss = loss + params.weight_decay * l2_loss\n reported_loss = (loss if params.loss_type_to_report == 'base_loss'\n else total_loss)\n gradient_multiplier = 1\n if params.variable_update in ('replicated', 'distributed_all_reduce'):\n # In certain variable updates, tf_cnn_benchmarks add the gradients of the\n # GPUs instead of taking their mean, making the gradients effectively\n # params.num_gpu times higher.\n # TODO(b/62722498): Make all variable updates consistent.\n gradient_multiplier = params.num_gpus\n\n opt = benchmark_cnn.get_optimizer(params, params.init_learning_rate)\n grad_vars = opt.compute_gradients(\n total_loss, grad_loss=tf.constant(gradient_multiplier, dtype=tf.float32))\n grads = [g for g, _ in grad_vars]\n # We apply gradients from a placeholder. That way, we can first compute the\n # gradients from each worker, then afterwards apply them one by one by feeding\n # them into the placeholder.\n placeholder_grad_vars = [(tf.placeholder(g.dtype, g.shape), v)\n for g, v in grad_vars]\n placeholder_grads = [g for g, _ in placeholder_grad_vars]\n apply_grads_op = opt.apply_gradients(placeholder_grad_vars)\n\n batch_iterators = [_worker_batches_in_numpy_array(numpy_inputs, batch_size,\n shift_ratio=i / num_workers)\n for i in range(num_workers)]\n # Set the GPU count to 0, to avoid taking all the GPU memory. Unfortunately,\n # doing so still takes up about ~1GB for some reason.\n config = tf.ConfigProto(device_count={'GPU': 0})\n config.graph_options.rewrite_options.pin_to_host_optimization = (\n rewriter_config_pb2.RewriterConfig.OFF)\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n losses = [[] for _ in range(num_workers)]\n for i in range(params.num_batches):\n computed_grads = []\n for j in range(num_workers):\n batch_feed = next(batch_iterators[j])\n batch_feed = batch_feed / 127.5 - 1\n worker_loss, worker_grads = sess.run((reported_loss, grads),\n {inputs_placeholder: batch_feed})\n losses[j].append(worker_loss)\n computed_grads.append(worker_grads)\n for worker_grads in computed_grads:\n # TODO(reedwm): With multiple workers, applying the gradients\n # sequentially per worker is not equivalent to what tf_cnn_benchmarks\n # does when the optmizer is not SGD. Therefore, this currently does not\n # work currently when num_workers > 1 and params.optimizer != 'sgd'.\n feed_dict = dict(zip(placeholder_grads, worker_grads))\n sess.run(apply_grads_op, feed_dict)\n return losses", "def clone(self, **kwargs) -> Tuple[Tensor, Tensor]:\n clone, compas = self.will_clone, self.compas_clone\n self.cum_rewards[clone] = self.cum_rewards[compas][clone]\n self.id_walkers[clone] = self.id_walkers[compas][clone]\n self.virtual_rewards[clone] = self.virtual_rewards[compas][clone]\n return clone, compas", "def _compute_network_model(self) :\n \n # build the network\n self.i = T.vector('i',dtype = self.int_dtype)\n \n self.network_outputs = compute_network_outputs(self.i,self.s0,self.V,\n self.U,self.W,self.b)\n \n \n # build mean log likelyhood loss\n \n # variables for a batch of sentences\n self.I = T.matrix('I',dtype = self.int_dtype)\n self.J = T.matrix('J',dtype = self.int_dtype) # for embedding I = J\n \n self.loss_outputs = compute_mean_log_lklyhd_outputs(self.I,self.J,\n self.s0,self.V,\n self.U,self.W,\n self.b)\n\n # set up the accumulator for computing the loss in batches\n \n n_minibatch = T.cast(self.I.shape[0],self.float_dtype)\n loss_accum_ipnm = self.loss_accum_i + n_minibatch\n \n self.loss_updates = ((self.loss_accum,\n (self.loss_outputs*n_minibatch/loss_accum_ipnm\n + (self.loss_accum \n * self.loss_accum_i/loss_accum_ipnm))),\n (self.loss_accum_i,loss_accum_ipnm))\n \n # get the gradient of the loss\n \n (self.dV,\n self.dU,\n self.dW,\n self.db) = theano.grad(self.loss_outputs,\n [self.V,self.U,self.W,self.b])\n \n # get the gradient magnitudes\n \n self.dV_mag = T.sqrt(T.sum(self.dV*self.dV))\n self.dU_mag = T.sqrt(T.sum(self.dU*self.dU))\n self.dW_mag = T.sqrt(T.sum(self.dW*self.dW))\n self.db_mag = T.sqrt(T.sum(self.db*self.db))\n \n # get the sgd update function\n \n # this is the learning parameter\n self.eta = T.scalar('eta',dtype = self.float_dtype)\n \n # also including a running average of the gradient magnitudes\n \n self.sgd_i = T.scalar('sgd_i',dtype = self.float_dtype)\n \n dV_mag_accum = (self.dV_mag/(self.sgd_i+1.)\n + self.m_dV_mag*(self.sgd_i/(self.sgd_i+1.)))\n dU_mag_accum = (self.dU_mag/(self.sgd_i+1.) \n + self.m_dU_mag*(self.sgd_i/(self.sgd_i+1.)))\n dW_mag_accum = (self.dW_mag/(self.sgd_i+1.) \n + self.m_dW_mag*(self.sgd_i/(self.sgd_i+1.)))\n db_mag_accum = (self.db_mag/(self.sgd_i+1.) \n + self.m_db_mag*(self.sgd_i/(self.sgd_i+1.)))\n \n # adding here since we are taking a max of the loss - accumulators\n # do not include the latest values\n self.sgd_updates = ((self.V,self.V + self.eta*self.dV),\n (self.U,self.U + self.eta*self.dU),\n (self.W,self.W + self.eta*self.dW),\n (self.b,self.b + self.eta*self.db),\n (self.m_dV_mag,dV_mag_accum),\n (self.m_dU_mag,dU_mag_accum),\n (self.m_dW_mag,dW_mag_accum),\n (self.m_db_mag,db_mag_accum))\n\n # pointers for the compiled functions\n self.network = None\n self.loss = None\n self.grad_loss = None\n self.sgd_update = None\n self.sgd_update_w_loss = None", "def compute_losses(self, inputs, outputs,save_error=False):\n losses = {}\n total_loss = 0\n losses['perceptional_loss'] = 0\n\n \n for scale in self.opt.scales:\n #scales=[0,1,2,3]\n loss = 0\n reprojection_losses = []\n perceptional_losses = []\n\n if self.opt.v1_multiscale:\n source_scale = scale\n else:\n source_scale = 0\n\n disp = outputs[(\"disp\", scale)]\n ##add feature map\n color = inputs[(\"color\", 0, scale)]\n target = inputs[(\"color\", 0, source_scale)]\n \n #adding feature_loss\n #for frame_id in self.opt.frame_ids[1:]:\n # src_f = outputs[(\"feature\", frame_id, 0)]\n # tgt_f = self.models[\"extractor\"](inputs[(\"color\", 0, 0)])[0]\n # perceptional_losses.append(self.compute_perceptional_loss(tgt_f, src_f))\n #perceptional_loss = torch.cat(perceptional_losses, 1)\n\n #min_perceptional_loss, outputs[(\"min_index\", scale)] = torch.min(perceptional_loss, dim=1)\n #losses[('min_perceptional_loss', scale)] = self.opt.perception_weight * min_perceptional_loss.mean() / len(self.opt.scales)\n \n #losses['perceptional_loss'] += losses[('min_perceptional_loss',scale)]\n\n # photometric_loss\n for frame_id in self.opt.frame_ids[1:]:\n pred = outputs[(\"color\", frame_id, scale)]\n reprojection_losses.append(self.compute_reprojection_loss(pred, target,save_error))\n\n reprojection_losses = torch.cat(reprojection_losses, 1)\n\n if not self.opt.disable_automasking:\n identity_reprojection_losses = []\n for frame_id in self.opt.frame_ids[1:]:\n pred = inputs[(\"color\", frame_id, source_scale)]\n identity_reprojection_losses.append(\n self.compute_reprojection_loss(pred, target,save_error))\n\n identity_reprojection_losses = torch.cat(identity_reprojection_losses, 1)\n\n if self.opt.avg_reprojection:\n identity_reprojection_loss = identity_reprojection_losses.mean(1, keepdim=True)\n else:\n # save both images, and do min all at once below\n identity_reprojection_loss = identity_reprojection_losses\n if self.opt.mask_plan in [1,2,3]:\n self.distance_constraint_automask = identity_reprojection_loss.min(1,keepdim=True)\n\n elif self.opt.predictive_mask:\n mask = outputs[\"predictive_mask\"][\"predictive_mask\", scale]\n if not self.opt.v1_multiscale:\n mask = F.interpolate(\n mask, [self.opt.height, self.opt.width],\n mode=\"bilinear\", align_corners=False)\n\n reprojection_losses *= mask\n #using distance_constraint_mask\n #elif self.opt.distance_constraint_mask:\n # add a loss pushing mask to 1 (using nn.BCELoss for stability)\n weighting_loss = 0.2 * nn.BCELoss()(mask, torch.ones(mask.shape).cuda()) if torch.cuda.is_available() else 0.2 * nn.BCELoss()(mask, torch.ones(mask.shape).cpu())\n loss += weighting_loss.mean()\n\n if self.opt.avg_reprojection:\n reprojection_loss = reprojection_losses.mean(1, keepdim=True)\n else:\n reprojection_loss = reprojection_losses\n\n if not self.opt.disable_automasking:\n # add random numbers to break ties\n #identity_reprojection_loss.shape).cuda() * 0.00001\n if torch.cuda.is_available():\n identity_reprojection_loss += torch.randn(identity_reprojection_loss.shape).cuda(1) * 0.00001 if self.opt.no_cuda else torch.randn(identity_reprojection_loss.shape).cuda() * 0.00001\n else:\n identity_reprojection_loss += torch.randn(identity_reprojection_loss.shape).cpu() * 0.00001\n combined = torch.cat((identity_reprojection_loss, reprojection_loss), dim=1)\n else:\n combined = reprojection_loss\n\n if combined.shape[1] == 1:\n to_optimise = combined\n else:\n to_optimise, idxs = torch.min(combined, dim=1)\n\n if not self.opt.disable_automasking:\n outputs[\"identity_selection/{}\".format(scale)] = (\n idxs > identity_reprojection_loss.shape[1] - 1).float()\n\n loss += to_optimise.mean()\n\n mean_disp = disp.mean(2, True).mean(3, True)\n norm_disp = disp / (mean_disp + 1e-7)\n smooth_loss = get_smooth_loss(norm_disp, color)\n\n loss += self.opt.disparity_smoothness * smooth_loss / (2 ** scale)\n total_loss += loss\n losses[\"loss/{}\".format(scale)] = loss\n\n total_loss /= self.num_scales\n #total_loss = (1 - self.opt.perception_weight) * total_loss + self.opt.perception_weight * losses['perceptional_loss']\n #total_loss = total_loss + self.opt.perception_weight * losses['perceptional_loss']\n if self.opt.flipping_loss == True:\n losses['symmetry_loss'] = self.opt.flipping_loss_weight *self.flipped_loss(inputs, outputs) \n total_loss = total_loss + self.opt.perception_weight * losses['perceptional_loss'] + losses['symmetry_loss']\n else:\n total_loss = total_loss + self.opt.perception_weight * losses['perceptional_loss']\n\n \n #using new architecture\n #if self.opt.add_neighboring_frames == 1:\n # depth_loss_sum = 0\n # depth_loss_weights_sum = 0\n # if self.opt.depth_multiscale:\n # for i in self.opt.scales:\n # #testing before\n # depth_mid = torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - \\\n # self.neighboring_depth[(\"depth_next\",i)]) / 2 if self.opt.respective_depth_constraint \\\n # else torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - \\\n # self.neighboring_depth[(\"depth_next\",i)]) / 2 + self.neighboring_depth[(\"depth_next\",i)]\n # ## L2 loss\n # #depth_loss = nn.MSELoss()(torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - outputs[(\"depth\",0,i)]), depth_mid) * self.depth_mask[0] + \\\n # # nn.MSELoss()(torch.abs(self.neighboring_depth[(\"depth_next\",i)] - outputs[(\"depth\",0,i)]), depth_mid)*self.depth_mask[1] if self.opt.respective_depth_constraint \\\n # # else nn.MSELoss()(depth_mid , outputs[(\"depth\",0,i)])\n # \n # depth_loss = torch.abs(torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - outputs[(\"depth\",0,i)]) - depth_mid) * self.depth_mask[0] + \\\n # torch.abs(torch.abs(self.neighboring_depth[(\"depth_next\",i)] - outputs[(\"depth\",0,i)]) - depth_mid)*self.depth_mask[1] if self.opt.respective_depth_constraint \\\n # else torch.abs(depth_mid - outputs[(\"depth\",0,i)])\n # #depth_loss = torch.abs(torch.abs(self.neighboring_depth[(\"depth_previous\",i)] - outputs[(\"depth\",0,i)]) - depth_mid) + \\\n # # torch.abs(torch.abs(self.neighboring_depth[(\"depth_next\",i)] - outputs[(\"depth\",0,i)]) - depth_mid) if self.opt.respective_depth_constraint \\\n # # else torch.abs(depth_mid - outputs[(\"depth\",0,i)])\n # \n # if self.opt.distance_constraint_mask:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_mask).mean()\n # if not self.opt.disable_BCELoss:#when setting distance mask will doing this \n # depth_loss_weights = self.opt.distance_mask_weight* nn.BCELoss()\\\n # (self.distance_constraint_mask, \\\n # torch.ones(self.distance_constraint_mask.shape).cuda()) \\\n # if torch.cuda.is_available() \\\n # else \\\n # self.opt.distance_mask_weight * nn.BCELoss()\\\n # (self.distance_constraint_mask, \\\n # torch.ones(self.distance_constraint_mask.shape).cpu())\n # depth_loss_weights_sum += depth_loss_weights\n # if float(depth_loss_weights) == 0:\n # print(\"distance_mask is useless\")\n # else:\n # if self.opt.mask_plan == 0:\n # depth_lossing = (depth_loss * self.opt.depth_loss_weight).mean()\n # elif self.opt.mask_plan == 1:\n # depth_lossing = (depth_loss * self.distance_constraint_automask[0]).mean()\n # elif self.opt.mask_plan == 2:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_automask[0]).mean()\n # elif self.opt.mask_plan == 3:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_automask).mean()\n # depth_loss_sum += depth_lossing\n # else:\n # depth_mid = torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - \\\n # self.neighboring_depth[(\"depth_next\",0)]) / 2 if self.opt.respective_depth_constraint \\\n # else torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - \\\n # self.neighboring_depth[(\"depth_next\",0)]) / 2 + self.neighboring_depth[(\"depth_next\",0)]\n # for i in self.opt.scales:\n # ## L2 loss\n # #depth_loss = nn.MSELoss()(torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - outputs[(\"depth\",0,i)]), depth_mid) * self.depth_mask[0] + \\\n # # nn.MSELoss()(torch.abs(self.neighboring_depth[(\"depth_next\",0)] - outputs[(\"depth\",0,i)]), depth_mid)*self.depth_mask[1] if self.opt.respective_depth_constraint \\\n # # else nn.MSELoss()(depth_mid, outputs[(\"depth\",0,i)])\n # \n # depth_loss = torch.abs(torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - outputs[(\"depth\",0,i)]) - depth_mid) * self.depth_mask[0] + \\\n # torch.abs(torch.abs(self.neighboring_depth[(\"depth_next\",0)] - outputs[(\"depth\",0,i)]) - depth_mid)*self.depth_mask[1] if self.opt.respective_depth_constraint \\\n # else torch.abs(depth_mid - outputs[(\"depth\",0,i)])\n # #depth_loss = torch.abs(torch.abs(self.neighboring_depth[(\"depth_previous\",0)] - outputs[(\"depth\",0,i)]) - depth_mid) + \\\n # # torch.abs(torch.abs(self.neighboring_depth[(\"depth_next\",0)] - outputs[(\"depth\",0,i)]) - depth_mid) if self.opt.respective_depth_constraint\\\n # # else torch.abs(depth_mid - outputs[(\"depth\",0,i)])\n # if self.opt.distance_constraint_mask:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_mask).mean()\n # if not self.opt.disable_BCELoss:\n # depth_loss_weights = self.opt.distance_mask_weight* nn.BCELoss()\\\n # (self.distance_constraint_mask, \\\n # torch.ones(self.distance_constraint_mask.shape).cuda()) \\\n # if torch.cuda.is_available() \\\n # else \\\n # self.opt.distance_mask_weight * nn.BCELoss()\\\n # (self.distance_constraint_mask, \\\n # torch.ones(self.distance_constraint_mask.shape).cpu())\n # depth_loss_weights_sum += depth_loss_weights\n # else:\n # if self.opt.mask_plan == 0:\n # depth_lossing = (depth_loss * self.opt.depth_loss_weight).mean()\n # elif self.opt.mask_plan == 1:\n # depth_lossing = (depth_loss * self.distance_constraint_automask[0]).mean()\n # elif self.opt.mask_plan == 2:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_automask[0]).mean()\n # elif self.opt.mask_plan == 3:\n # depth_lossing = self.opt.depth_loss_weight * (depth_loss * self.distance_constraint_automask).mean()\n # depth_loss_sum += depth_lossing\n # depth_loss_sum /= 4\n # if depth_loss_sum == 0:\n # print(\"depth_loss is useless\")\n # depth_loss_weights_sum /= 4\n # if self.opt.combined_loss == True:\n # total_loss = (1-self.opt.depth_loss_weight) * total_loss + depth_loss_sum + depth_loss_weights_sum\n # else:\n # total_loss += depth_loss_sum + depth_loss_weights_sum\n losses[\"loss\"] = total_loss\n return losses", "def tf_loss_per_instance(self, states, internals, actions, terminal, reward, update):\n raise NotImplementedError", "def _compute_copy_loss(self, batch, output, target, align, attns):\n scores = self.generator(\n self._bottle(output), self._bottle(attns[\"copy\"]), batch[\"src_map\"]\n )\n loss = self.criterion(scores, align, target).sum()\n\n return loss, scores", "def _CreateWeightLoss(self):\n self.AssertInitialized()\n with self._BlockScope():\n return [tf.nn.l2_loss(v) for v in self._variables]", "def _build_loss_graph(self, labels, logits, weights, feed_dict):\n\n # Exclude labels with value 2 while computing loss.\n # This is needed for SST-2 prediction.\n # idx = tf.where(tf.less(labels, 2))\n # labels_chosen = tf.gather(labels, idx)\n # logits_chosen = tf.gather(logits, idx)\n\n mean_loss = self._mean_cross_entropy_loss(labels, logits, weights)\n logging.info('Mean Cross Entropy Loss: {0}'.format(mean_loss.eval(feed_dict)))\n\n # regularization_loss = self._regularization_loss()\n # logging.info('Regularization Loss: {0}'.format(regularization_loss.eval(feed_dict)))\n\n # Return Total Loss\n # total_loss = tf.add(cross_entropy_loss, regularization_loss)\n return mean_loss", "def compute_total_loss(self, trajectory):\n\n # smoothness loss and gradient\n smoothness_loss, smoothness_grad = self.compute_smooth_loss(\n trajectory.data, trajectory.start, trajectory.end\n )\n smoothness_loss_sum = smoothness_loss.sum()\n\n # obstacle loss and gradient\n (obstacle_loss, obstacle_grad) = self.compute_collision_loss(\n trajectory.data, trajectory.start, trajectory.end\n )\n\n # total cost and vectorial summation of gradient\n # cost = weighted_obs + weighted_smooth\n # grad = weighted_obs_grad + weighted_smooth_grad\n\n # weights\n weighted_smooth = self.cfg.smoothness_weight * smoothness_loss_sum\n weighted_smooth_grad = self.cfg.smoothness_weight * smoothness_grad\n\n # total costs\n cost = weighted_smooth\n grad = weighted_smooth_grad\n\n cost_trajectory = self.cfg.smoothness_weight * smoothness_loss[:-1]\n\n if self.cfg.goal_set_proj:\n # goal_distance = (\n # np.linalg.norm(\n # trajectory.data[-1] - trajectory.goal_set[trajectory.goal_idx]\n # )\n print(\"Not implemented - compute_total_loss\")\n goal_distance = 0\n else:\n goal_distance = 0\n\n # goal_distance = (\n # np.linalg.norm(\n # trajectory.data[-1] - trajectory.goal_set[trajectory.goal_idx]\n # )\n # if self.cfg.goal_set_proj\n # else 0\n # )\n\n # print(goal_distance)\n terminate = (\n self.cfg.pre_terminate\n and (goal_distance < 0.01)\n and smoothness_loss_sum < self.cfg.terminate_smooth_loss\n )\n\n info = {\n \"smooth\": smoothness_loss_sum,\n \"gradient\": grad,\n \"cost\": cost,\n \"grad\": np.linalg.norm(grad),\n \"terminate\": terminate,\n \"goal_distance\": goal_distance,\n \"cost_trajectory\": cost_trajectory,\n }\n\n return cost, grad, info", "def compute_gradients(self,loss):\n\t\tgrads = T.grad(loss, self.rnn.params)\n\t\treturn zip(self.rnn.params, grads)", "def train_one_epoch(self):\n loss, n = 0, 0\n for x, y in self.train_dl:\n y_hat = self.model.forward(x)\n batch_loss = self.model.criterion.forward(y_hat, y).sum()\n self.model.backward()\n self.optimizer.step()\n loss += batch_loss\n n += len(y)\n return loss / n", "def __deepcopy__(self, memo):\n unique_name = datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n environment_model_copy = MultilayerNNEnvModel('env_copy_'+unique_name,\n self.sess,\n self.observation_space,\n self.action_space,\n self.learning_rate,\n self.env_model_save_path,\n False)\n env_model_weights = []\n for param in self.env_model_params:\n env_model_weights.append(param.eval(session = self.sess))\n environment_model_copy.set_env_model_weights(env_model_weights)\n return environment_model_copy", "def build_computation_graph(self):\n # Defining placeholders for obs/states, actions and advantage values.\n self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()\n # Computing the logits.\n self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)\n\n # Sampling an action according to our policy.\n self.sy_sampled_ac = self.sample_action(self.policy_parameters)\n\n # Computing log_probs of chosen actions.\n self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)\n\n # Defining the loss function.\n # http://rail.eecs.berkeley.edu/deeprlcourse/static/slides/lec-5.pdf\n loss = tf.reduce_mean(self.sy_logprob_n * self.sy_adv_n)\n self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)\n\n if self.nn_baseline:\n # Create the value network.\n self.baseline_prediction = tf.squeeze(build_mlp(\n self.sy_ob_no,\n 1,\n \"nn_baseline\",\n n_layers=self.n_layers,\n size=self.size))\n # Placeholder for target values which will be used in the loss function for value network.\n self.sy_target_n = tf.placeholder(dtype=tf.float32,\n shape=[None],\n name='sy_target_n')\n # Define the loss function for value network. Basically MSE loss.\n baseline_loss = tf.reduce_mean((self.baseline_prediction - self.sy_target_n) ** 2)\n self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)", "def analyze_clone(\n patient, clone_id,\n clones_mutations, estimates,\n delta_lb=0.0, delta_ub=4.0,\n **kwargs\n):\n mutations = clones_mutations[clones_mutations == clone_id].index\n clone = patient.get_mutations_subset(mutations)\n clone_estimates = estimates.loc[mutations]\n optimizer = Random(\n clone, clone_estimates,\n delta_lb=delta_lb, delta_ub=delta_ub\n )\n return Clone(clone_id, *optimizer.optimize(**kwargs))", "def make_copy_with_grads(self):\n ps = [p.clone().detach().requires_grad_() for p in self.Parameters()]\n return Normal(*ps)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute clone losses and gradients for the given list of `Clones`.
def optimize_clones(clones, optimizer, regularization_losses=None, **kwargs): grads_and_vars = [] clones_losses = [] num_clones = len(clones) if regularization_losses is None: regularization_losses = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) for clone in clones: with tf.name_scope(clone.scope): clone_loss, clone_grad = _optimize_clone( optimizer, clone, num_clones, regularization_losses, **kwargs) if clone_loss is not None: clones_losses.append(clone_loss) grads_and_vars.append(clone_grad) # Only use regularization_losses for the first clone regularization_losses = None # Compute the total_loss summing all the clones_losses. total_loss = tf.add_n(clones_losses, name='total_loss') # Sum the gradients across clones. grads_and_vars = _sum_clones_gradients(grads_and_vars) return total_loss, grads_and_vars
[ "def _optimize_clone(optimizer, clone, num_clones, regularization_losses,\n **kwargs):\n sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)\n clone_grad = None\n if sum_loss is not None:\n with tf.device(clone.device):\n clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)\n return sum_loss, clone_grad", "def _gather_clone_loss(clone, num_clones, regularization_losses):\n # The return value.\n sum_loss = None\n # Individual components of the loss that will need summaries.\n clone_loss = None\n regularization_loss = None\n # Compute and aggregate losses on the clone device.\n with tf.device(clone.device):\n all_losses = []\n clone_losses = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.LOSSES,\n clone.scope)\n if clone_losses:\n clone_loss = tf.add_n(clone_losses, name='clone_loss')\n if num_clones > 1:\n clone_loss = tf.div(clone_loss, 1.0 * num_clones,\n name='scaled_clone_loss')\n all_losses.append(clone_loss)\n if regularization_losses:\n regularization_loss = tf.add_n(regularization_losses,\n name='regularization_loss')\n all_losses.append(regularization_loss)\n if all_losses:\n sum_loss = tf.add_n(all_losses)\n # Add the summaries out of the clone device block.\n if clone_loss is not None:\n tf.compat.v1.summary.scalar('/'.join(filter(None,\n ['Losses', clone.scope, 'clone_loss'])),\n clone_loss)\n if regularization_loss is not None:\n tf.compat.v1.summary.scalar('Losses/regularization_loss',\n regularization_loss)\n return sum_loss", "def analyze_clone(\n patient, clone_id,\n clones_mutations, estimates,\n delta_lb=0.0, delta_ub=4.0,\n **kwargs\n):\n mutations = clones_mutations[clones_mutations == clone_id].index\n clone = patient.get_mutations_subset(mutations)\n clone_estimates = estimates.loc[mutations]\n optimizer = Random(\n clone, clone_estimates,\n delta_lb=delta_lb, delta_ub=delta_ub\n )\n return Clone(clone_id, *optimizer.optimize(**kwargs))", "def manually_compute_losses(numpy_inputs, inputs_placeholder, loss, num_workers,\n params):\n batch_size = params.batch_size * params.num_gpus\n assert numpy_inputs.shape[0] % (num_workers * batch_size) == 0\n l2_loss = tf.add_n([tf.nn.l2_loss(x) for x in tf.trainable_variables()])\n total_loss = loss + params.weight_decay * l2_loss\n reported_loss = (loss if params.loss_type_to_report == 'base_loss'\n else total_loss)\n gradient_multiplier = 1\n if params.variable_update in ('replicated', 'distributed_all_reduce'):\n # In certain variable updates, tf_cnn_benchmarks add the gradients of the\n # GPUs instead of taking their mean, making the gradients effectively\n # params.num_gpu times higher.\n # TODO(b/62722498): Make all variable updates consistent.\n gradient_multiplier = params.num_gpus\n\n opt = benchmark_cnn.get_optimizer(params, params.init_learning_rate)\n grad_vars = opt.compute_gradients(\n total_loss, grad_loss=tf.constant(gradient_multiplier, dtype=tf.float32))\n grads = [g for g, _ in grad_vars]\n # We apply gradients from a placeholder. That way, we can first compute the\n # gradients from each worker, then afterwards apply them one by one by feeding\n # them into the placeholder.\n placeholder_grad_vars = [(tf.placeholder(g.dtype, g.shape), v)\n for g, v in grad_vars]\n placeholder_grads = [g for g, _ in placeholder_grad_vars]\n apply_grads_op = opt.apply_gradients(placeholder_grad_vars)\n\n batch_iterators = [_worker_batches_in_numpy_array(numpy_inputs, batch_size,\n shift_ratio=i / num_workers)\n for i in range(num_workers)]\n # Set the GPU count to 0, to avoid taking all the GPU memory. Unfortunately,\n # doing so still takes up about ~1GB for some reason.\n config = tf.ConfigProto(device_count={'GPU': 0})\n config.graph_options.rewrite_options.pin_to_host_optimization = (\n rewriter_config_pb2.RewriterConfig.OFF)\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n losses = [[] for _ in range(num_workers)]\n for i in range(params.num_batches):\n computed_grads = []\n for j in range(num_workers):\n batch_feed = next(batch_iterators[j])\n batch_feed = batch_feed / 127.5 - 1\n worker_loss, worker_grads = sess.run((reported_loss, grads),\n {inputs_placeholder: batch_feed})\n losses[j].append(worker_loss)\n computed_grads.append(worker_grads)\n for worker_grads in computed_grads:\n # TODO(reedwm): With multiple workers, applying the gradients\n # sequentially per worker is not equivalent to what tf_cnn_benchmarks\n # does when the optmizer is not SGD. Therefore, this currently does not\n # work currently when num_workers > 1 and params.optimizer != 'sgd'.\n feed_dict = dict(zip(placeholder_grads, worker_grads))\n sess.run(apply_grads_op, feed_dict)\n return losses", "def clone(self, **kwargs) -> Tuple[Tensor, Tensor]:\n clone, compas = self.will_clone, self.compas_clone\n self.cum_rewards[clone] = self.cum_rewards[compas][clone]\n self.id_walkers[clone] = self.id_walkers[compas][clone]\n self.virtual_rewards[clone] = self.virtual_rewards[compas][clone]\n return clone, compas", "def _compute_gradients(loss_tensor, list_of_input_tensors):\n\n list_of_gradient_tensors = tensorflow.gradients(\n loss_tensor, list_of_input_tensors)\n\n for i in range(len(list_of_gradient_tensors)):\n if list_of_gradient_tensors[i] is not None:\n continue\n\n list_of_gradient_tensors[i] = tensorflow.zeros_like(\n list_of_input_tensors[i]\n )\n\n return list_of_gradient_tensors", "def create_clones(config, model_fn, args=None, kwargs=None):\n clones = []\n args = args or []\n kwargs = kwargs or {}\n with slim.arg_scope([slim.model_variable, slim.variable],\n device=config.variables_device()):\n # Create clones.\n for i in range(0, config.num_clones):\n with tf.name_scope(config.clone_scope(i)) as clone_scope:\n clone_device = config.clone_device(i)\n with tf.device(clone_device):\n with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope(),\n reuse=True if i > 0 else None):\n outputs = model_fn(*args, **kwargs)\n clones.append(Clone(outputs, clone_scope, clone_device))\n return clones", "def sgd_update(trainables, learning_rate=1e-2):\n for node in trainables:\n node.value -= learning_rate * node.gradients[node]", "def clone_inputs_retaining_gradness(example_inputs):\n cloned_inputs = clone_inputs(example_inputs)\n for idx in range(len(example_inputs)):\n if isinstance(cloned_inputs[idx], torch.Tensor):\n cloned_inputs[idx].requires_grad_(example_inputs[idx].requires_grad)\n return cloned_inputs", "def plot_loss(self, loss_list: list):\n\n plt.figure(figsize=(12, 5))\n loss_G = []\n loss_D = []\n\n for loss in loss_list:\n if self.device == \"cuda\":\n loss_G.append(loss[\"G\"].cpu().detach().numpy())\n loss_D.append(loss[\"D\"].cpu().detach().numpy())\n else:\n loss_G.append(loss[\"G\"].detach().numpy())\n loss_D.append(loss[\"D\"].detach().numpy())\n\n plt.plot(loss_G, label=\"Generator\")\n plt.plot(loss_D, label=\"Discriminator\")\n\n plt.title(\"Discriminator and generator loss\")\n plt.ylabel(\"loss\")\n plt.xlabel(\"epochs\")\n plt.show()", "def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):\n # TODO(josh11b): Test that we handle weight decay in a reasonable way.\n if not callable(loss) and tape is None:\n raise ValueError(\"`tape` is required when a `Tensor` loss is passed.\")\n tape = tape if tape is not None else backprop.GradientTape()\n\n if callable(loss):\n with tape:\n if not callable(var_list):\n tape.watch(var_list)\n loss = loss()\n if callable(var_list):\n var_list = var_list()\n\n with tape:\n loss = self._transform_loss(loss)\n\n var_list = nest.flatten(var_list)\n with ops.name_scope_v2(self._name + \"/gradients\"):\n grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)\n\n self._assert_valid_dtypes([\n v for g, v in grads_and_vars\n if g is not None and v.dtype != dtypes.resource\n ])\n\n return grads_and_vars", "def concat(var_list:list):\n assert len(var_list)>0, 'Can not concatenate an empty list'\n input_dim = var_list[0].grad.shape[1] #grad shape of the first variable in the list\n concat_val, concat_grad = [], []\n for var in var_list:\n assert var.grad.shape[1] == input_dim, 'trying to concatenate variables from a different input space'\n if isinstance(var.val, float):\n concat_val.append(np.array(var.val).reshape(-1,1))\n else: #We already have an array\n concat_val.append(var.val)\n concat_grad.append(var.grad)\n #print(var.grad.shape)\n #print(len(concat_val))\n out_val = np.concatenate(concat_val)# We have list that must be changed\n out_grad = np.concatenate(concat_grad, axis=0)\n return Variable(val=out_val, grad=out_grad)", "def compute_gradients(self, loss, var_list=None, **kwargs):\n grads_and_vars = self._wrapped_optimizer.compute_gradients(loss,\n var_list=var_list,\n **kwargs)\n if self._outfeed_optimizer_mode == OutfeedOptimizerMode.AFTER_COMPUTE:\n enqueue = self._maybe_enqueue(grads_and_vars)\n if enqueue:\n # The enqueue op must be executed.\n # We cannot return it here so we must use control dependencies.\n # This puts the enqueue op into the control flow by attaching\n # it to an identity op on the first gradient.\n with ops.control_dependencies([enqueue]):\n return [(array_ops.identity(x) if i == 0 else x,\n y)\n for i, (x, y) in enumerate(grads_and_vars)]\n return grads_and_vars", "def _operate_on_datasets(self, ops_list):\n for data_source in self._config_dict['data_sources']:\n for op in ops_list:\n self.logger.info('Computing %s on %s', op, data_source)\n func = getattr(self.data_sources[data_source], op)\n func()\n self.logger.info('ds = %s', self.data_sources[data_source].ds)\n\n # write to cache\n if self.cache_data:\n if op == 'compute_mon_climatology':\n if not (self.data_sources[data_source]._is_mon_climo or self.data_sources[data_source]._is_ann_climo):\n self.data_sources[data_source].cache_dataset(self._cached_locations[data_source],\n self._cached_var_dicts[data_source])", "def forward(self, inputs: List[float]) -> List[float]:\n self.__inputs__ = inputs\n return [neuron.compute_output(self.__inputs__)\n for neuron in self.__neurons__]", "def assign_clones(titles, input_dat, region_data, user_input, dat_numbered, REGEX):\r\n\r\n if input_dat.clone_lists.renames != None:\r\n progress = 0\r\n progress_old = 0\r\n progress_total = len(input_dat.clone_lists.renames)\r\n\r\n for key, values in input_dat.clone_lists.renames.items():\r\n # Find the full names of the entries in the clone list\r\n clones = []\r\n\r\n progress += 1\r\n progress_percent = int(progress/progress_total*100)\r\n\r\n if progress_old != progress_percent:\r\n if old_windows() != True:\r\n sys.stdout.write(\"\\033[K\")\r\n print(f'* Assigning clones from clone lists... [{str(progress_percent)}%]', sep='', end='\\r', flush=True)\r\n\r\n # Compensate if the key title is missing, either because it's been\r\n # removed by Retool, or doesn't exist in the dat.\r\n if not get_raw_title(key) in titles.all:\r\n printverbose(\r\n user_input.verbose,\r\n f'{Font.warning}* Title in clone list not found in dat or selected regions: '\r\n f'{Font.warning_bold}{key}{Font.end}')\r\n key = f'|* Missing *|: {key}'\r\n titles.all[get_raw_title(key)] = []\r\n\r\n # Populate groups for the related titles in the clone list\r\n group = []\r\n\r\n for title in titles.all[get_raw_title(key)]:\r\n group.append(title.short_name_lower)\r\n if title.short_name_lower == key.lower():\r\n clones.append((title, 1))\r\n\r\n for value in values:\r\n if len(value) < 2:\r\n printverbose(\r\n user_input.verbose,\r\n f'{Font.warning}* {Font.warning}Problem in clone list: at '\r\n f'least two values are required in the array for the '\r\n f'{Font.warning_bold}{key}{Font.warning} key to assign a '\r\n f'clone. Ignoring the key.{Font.end}')\r\n elif type(value) != list:\r\n printverbose(\r\n user_input.verbose,\r\n f'{Font.warning}* {Font.warning}Problem in clone list: the '\r\n f'following clone is not in a list: '\r\n f'{Font.warning_bold}{key}{Font.warning}. Ignoring.{Font.end}')\r\n else:\r\n clone_title, clone_priority = value[0], value[1]\r\n\r\n if get_raw_title(clone_title) in titles.all:\r\n group = []\r\n\r\n for title in titles.all[get_raw_title(clone_title)]:\r\n group.append(title.short_name_lower)\r\n if title.short_name_lower == clone_title.lower():\r\n clones.append((title, clone_priority))\r\n\r\n if clone_title.lower() not in group:\r\n printverbose(\r\n user_input.verbose,\r\n f'{Font.warning}* Title in clone list not found in dat or selected regions: '\r\n f'{Font.warning_bold}{clone_title}{Font.end}')\r\n else:\r\n printverbose(\r\n user_input.verbose,\r\n f'{Font.warning}* Title in clone list not found in dat or selected regions: '\r\n f'{Font.warning_bold}{clone_title}{Font.end}')\r\n\r\n # Figure out which clone to make a parent, based on region and language\r\n priority_range = range(0, 10)\r\n\r\n parent = ''\r\n\r\n for i in priority_range:\r\n found_parent = False\r\n found_superset = False\r\n\r\n if len(clones) > 1:\r\n for region in user_input.user_region_order:\r\n for clone in sorted(clones, key=operator.itemgetter(1)):\r\n clone_title, clone_priority = clone[0], clone[1]\r\n\r\n # If a title priority is set to 0 or -1 and has the same language as a higher region\r\n # priority, don't overwrite it.\r\n if (\r\n clone_title.cloneof == ''\r\n and clone_priority >= priority_range[0]):\r\n if clone_priority == -1 or clone_priority == 0:\r\n if (\r\n bool(\r\n re.search(region_data.implied_language[region], # Not sure if this is the right thing to do.\r\n clone_title.languages)) == True\r\n and region_data.implied_language[region] != ''):\r\n # Check if title is preproduction or bad. If so, check if\r\n # production/good titles are available.\r\n if bool(re.search(REGEX.preproduction_bad, clone_title.full_name_lower)) == True:\r\n for another_clone in sorted(clones, key=operator.itemgetter(1)):\r\n if bool(re.search(REGEX.preproduction_bad, another_clone[0].full_name_lower)) == False:\r\n found_parent = True\r\n found_superset = True\r\n parent = another_clone\r\n break\r\n\r\n # Check for Sega CD 32X vs Mega-CD 32X\r\n if (\r\n bool(re.search(REGEX.sega32x, clone_title.full_name_lower)) == True\r\n and 'USA' in user_input.user_region_order\r\n and 'Europe' in user_input.user_region_order\r\n ):\r\n for another_clone in sorted(clones, key=operator.itemgetter(1)):\r\n if user_input.user_region_order.index('Europe') < user_input.user_region_order.index('USA'):\r\n if 'mega-cd 32x' in another_clone[0].full_name_lower:\r\n found_parent = True\r\n found_superset = True\r\n parent = another_clone\r\n break\r\n\r\n if found_parent == True:\r\n break\r\n else:\r\n found_parent = True\r\n found_superset = True\r\n parent = clone\r\n break\r\n elif region in clone_title.regions:\r\n # Check if title is preproduction or bad. If so, check if production/good\r\n # titles are available.\r\n if bool(re.search(REGEX.preproduction_bad, clone_title.full_name_lower)) == True:\r\n for another_clone in sorted(clones, key=operator.itemgetter(1)):\r\n if bool(re.search(REGEX.preproduction_bad, another_clone[0].full_name_lower)) == False:\r\n found_parent = True\r\n parent = another_clone\r\n break\r\n if found_parent == True:\r\n break\r\n else:\r\n found_parent = True\r\n parent = clone\r\n break\r\n if found_parent == True: break\r\n if found_parent == True: break\r\n\r\n if parent != '':\r\n # Do extra parent selection if a short name in the clone list\r\n # has more than one match, or there's more than one clone per\r\n # region.\r\n for clone in clones:\r\n if (\r\n clone[0].primary_region == parent[0].primary_region\r\n and clone[1] <= parent[1]\r\n and clone[0].full_name_lower != parent[0].full_name_lower\r\n and clone[1] >= priority_range[0]\r\n and len(clones) > 1\r\n and found_superset == False):\r\n if (\r\n 'Dreamcast' in input_dat.name\r\n or 'Saturn' in input_dat.name\r\n or 'Sega CD' in input_dat.name\r\n or 'Panasonic - 3DO' in input_dat.name):\r\n ring_code = True\r\n else:\r\n ring_code = False\r\n\r\n parents = [clone[0], parent[0]]\r\n parents = choose_parent(parents, region_data, user_input, dat_numbered, REGEX, ring_code, True)\r\n\r\n for new_parent in parents:\r\n if new_parent.cloneof == '':\r\n found_parent = True\r\n parent = (new_parent, parent[1])\r\n break\r\n\r\n if found_parent == True: break\r\n\r\n # Parent/clone assignment\r\n for clone in sorted(clones, key=operator.itemgetter(1)):\r\n clone_title, clone_priority = clone[0], clone[1]\r\n\r\n if clone_title.group in titles.all:\r\n for disc_title in titles.all[clone_title.group]:\r\n if (\r\n clone_title.full_name_lower == disc_title.full_name_lower\r\n and clone_title.full_name_lower == parent[0].full_name_lower\r\n and clone_priority >= priority_range[0]):\r\n disc_title.cloneof = ''\r\n disc_title.cloneof_group = ''\r\n elif (\r\n clone_title.full_name_lower == disc_title.full_name_lower\r\n and clone_title.full_name_lower != parent[0].full_name_lower\r\n and clone_priority >= priority_range[0]):\r\n\r\n if dat_numbered == False:\r\n disc_title.cloneof = parent[0].full_name\r\n else:\r\n disc_title.cloneof = parent[0].numbered_name\r\n\r\n disc_title.cloneof_group = parent[0].group\r\n progress_old = progress_percent\r\n return titles", "def calculate_gradients(self,inputs,deltas):\r\n part_gradients = dict()\r\n part_gradients[\"Weights\"] = [self.cells[idx].weightcompute(inputs,deltas[idx]) for idx in range(0,len(self.cells))]\r\n part_gradients[\"Baises\"] = [self.cells[idx].baiscompute(inputs,deltas[idx]) for idx in range(0,len(self.cells))]\r\n part_gradients[\"PrevLayer\"] = [self.cells[idx].lastlayercompute(inputs,deltas[idx]) for idx in range(0,len(self.cells))]\r\n return part_gradients", "def compute_gradients(self, *args, **kwargs):\n gradients = self._optimizer.compute_gradients(*args, **kwargs)\n # if os.environ.get('HOROVOD_DEBUG', False):\n # print(f\"==Debug== The model has {len(gradients)} gradient tensors\")\n if size() > 1:\n grads, vars = zip(*gradients)\n avg_grads = self._allreduce_grads(grads)\n return list(zip(avg_grads, vars))\n else:\n return gradients", "def get_loss_gradients(model, loss_blobs, weight):\n loss_gradients = {}\n for b in loss_blobs:\n loss_grad = model.net.ConstantFill(\n b, [b + '_grad'], value=1.0 * weight)\n loss_gradients[str(b)] = str(loss_grad)\n return loss_gradients" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deploys a Slimconstructed model across multiple clones. The deployment options are specified by the config object and support deploying one or several clones on different GPUs and one or several replicas of such clones. The argument `model_fn` is called `config.num_clones` times to create the model clones as `model_fn(args, kwargs)`. The optional argument `optimizer` is an `Optimizer` object. If not `None`, the deployed model is configured for training with that optimizer. If `config` specifies deployment on multiple replicas then the default tensorflow device is set appropriatly for each call to `model_fn` and for the
def deploy(config, model_fn, args=None, kwargs=None, optimizer=None, summarize_gradients=False): # Gather initial summaries. summaries = set(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.SUMMARIES)) # Create Clones. clones = create_clones(config, model_fn, args, kwargs) first_clone = clones[0] # Gather update_ops from the first clone. These contain, for example, # the updates for the batch_norm variables created by model_fn. update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS, first_clone.scope) train_op = None total_loss = None with tf.device(config.optimizer_device()): if optimizer: # Place the global step on the device storing the variables. with tf.device(config.variables_device()): global_step = tf.compat.v1.train.get_or_create_global_step() # Compute the gradients for the clones. total_loss, clones_gradients = optimize_clones(clones, optimizer) if clones_gradients: if summarize_gradients: # Add summaries to the gradients. summaries |= set(_add_gradients_summaries(clones_gradients)) # Create gradient updates. grad_updates = optimizer.apply_gradients(clones_gradients, global_step=global_step) update_ops.append(grad_updates) update_op = tf.group(*update_ops) with tf.control_dependencies([update_op]): train_op = tf.identity(total_loss, name='train_op') else: clones_losses = [] regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) for clone in clones: with tf.name_scope(clone.scope): clone_loss = _gather_clone_loss(clone, len(clones), regularization_losses) if clone_loss is not None: clones_losses.append(clone_loss) # Only use regularization_losses for the first clone regularization_losses = None if clones_losses: total_loss = tf.add_n(clones_losses, name='total_loss') # Add the summaries from the first clone. These contain the summaries # created by model_fn and either optimize_clones() or _gather_clone_loss(). summaries |= set(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.SUMMARIES, first_clone.scope)) if total_loss is not None: # Add total_loss to summary. summaries.add(tf.compat.v1.summary.scalar('total_loss', total_loss)) if summaries: # Merge all summaries together. summary_op = tf.compat.v1.summary.merge(list(summaries), name='summary_op') else: summary_op = None return DeployedModel(train_op, summary_op, total_loss, clones)
[ "def create_clones(config, model_fn, args=None, kwargs=None):\n clones = []\n args = args or []\n kwargs = kwargs or {}\n with slim.arg_scope([slim.model_variable, slim.variable],\n device=config.variables_device()):\n # Create clones.\n for i in range(0, config.num_clones):\n with tf.name_scope(config.clone_scope(i)) as clone_scope:\n clone_device = config.clone_device(i)\n with tf.device(clone_device):\n with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope(),\n reuse=True if i > 0 else None):\n outputs = model_fn(*args, **kwargs)\n clones.append(Clone(outputs, clone_scope, clone_device))\n return clones", "def create_model_deployment(\n self,\n user,\n name,\n model,\n version,\n description=None,\n stage_or_deploy=\"stage\",\n min_replicas=0,\n max_concurrent_requests=0,\n wait_for_completion=True,\n ):\n\n assert stage_or_deploy in [\n \"stage\",\n \"deploy\",\n ], \"Invalid value for stage_or_deploy parameter.\"\n\n run = DkubeServing(user, name=name, description=description)\n run.update_serving_model(model, version=version)\n run.update_autoscaling_config(min_replicas, max_concurrent_requests)\n\n dkubever = self.dkubeinfo[\"version\"]\n if pversion.parse(dkubever) < pversion.parse(\"2.3.0.0\"):\n mcitem = self.get_modelcatalog_item(\n user, modelcatalog=model, version=version\n )\n run.update_serving_image(\n image_url=mcitem[\"serving\"][\"images\"][\"serving\"][\"image\"][\"path\"]\n )\n else:\n mcitem = super().get_model_catalog(user, model)\n versions = mcitem[\"versions\"]\n for v in versions:\n if v[\"version\"] == version:\n serving_image = v[\"serving\"][\"images\"][\"serving\"][\"image\"][\"path\"]\n run.update_serving_image(image_url=serving_image)\n\n if stage_or_deploy == \"stage\":\n super().stage_model(run)\n if stage_or_deploy == \"deploy\":\n super().deploy_model(run)\n\n while wait_for_completion:\n status = super().get_run(\"inference\", run.user, run.name, fields=\"status\")\n state, reason = status[\"state\"], status[\"reason\"]\n if state.lower() in [\"complete\", \"failed\", \"error\", \"running\"]:\n print(\n \"run {} - completed with state {} and reason {}\".format(\n run.name, state, reason\n )\n )\n break\n else:\n print(\n \"run {} - waiting for completion, current state {}\".format(\n run.name, state\n )\n )\n time.sleep(self.wait_interval)", "def build_fed_avg_client_work(\n model_fn: Callable[[], model_lib.Model],\n optimizer_fn: Union[optimizer_base.Optimizer,\n Callable[[], tf.keras.optimizers.Optimizer]],\n use_experimental_simulation_loop: bool = False\n) -> client_works.ClientWorkProcess:\n with tf.Graph().as_default():\n # Wrap model construction in a graph to avoid polluting the global context\n # with variables created for this model.\n model = model_fn()\n data_type = computation_types.SequenceType(model.input_spec)\n weights_type = model_utils.weights_type_from_model(model)\n\n if isinstance(optimizer_fn, optimizer_base.Optimizer):\n\n @computations.tf_computation(weights_type, data_type)\n def client_update_computation(initial_model_weights, dataset):\n client_update = build_client_update_with_tff_optimizer(\n model_fn, use_experimental_simulation_loop)\n return client_update(optimizer_fn, initial_model_weights, dataset)\n\n else:\n\n @computations.tf_computation(weights_type, data_type)\n def client_update_computation(initial_model_weights, dataset):\n optimizer = optimizer_fn()\n client_update = build_client_update_with_keras_optimizer(\n model_fn, use_experimental_simulation_loop)\n return client_update(optimizer, initial_model_weights, dataset)\n\n @computations.federated_computation\n def init_fn():\n return intrinsics.federated_value((), placements.SERVER)\n\n @computations.federated_computation(\n init_fn.type_signature.result, computation_types.at_clients(weights_type),\n computation_types.at_clients(data_type))\n def next_fn(state, weights, client_data):\n client_result, model_outputs, stat_output = intrinsics.federated_map(\n client_update_computation, (weights, client_data))\n train_metrics = model.federated_output_computation(model_outputs)\n stat_metrics = intrinsics.federated_sum(stat_output)\n measurements = intrinsics.federated_zip(\n collections.OrderedDict(train=train_metrics, stat=stat_metrics))\n return measured_process.MeasuredProcessOutput(state, client_result,\n measurements)\n\n return client_works.ClientWorkProcess(init_fn, next_fn)", "def iterative_process_builder(\n model_fn: Callable[[], tff.learning.Model],\n ) -> tff.templates.IterativeProcess:\n\n logging.info('Trainable weights:')\n for weight in model_fn().weights.trainable:\n logging.info('name: %s shape: %s', weight.name, weight.shape)\n\n if FLAGS.uniform_weighting:\n client_weighting = tff.learning.ClientWeighting.UNIFORM\n elif FLAGS.task == 'shakespeare' or FLAGS.task == 'stackoverflow_nwp':\n\n def client_weighting(local_outputs):\n return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)\n else:\n client_weighting = None\n\n if FLAGS.noise_multiplier is None:\n if FLAGS.uniform_weighting:\n aggregation_factory = tff.aggregators.UnweightedMeanFactory()\n else:\n aggregation_factory = tff.aggregators.MeanFactory()\n if FLAGS.clip is not None:\n if FLAGS.clip <= 0:\n raise ValueError('clip must be positive if clipping is enabled.')\n if FLAGS.adaptive_clip_learning_rate is None:\n clip = FLAGS.clip\n else:\n if FLAGS.adaptive_clip_learning_rate <= 0:\n raise ValueError('adaptive_clip_learning_rate must be positive if '\n 'adaptive clipping is enabled.')\n clip = tff.aggregators.PrivateQuantileEstimationProcess.no_noise(\n initial_estimate=FLAGS.clip,\n target_quantile=FLAGS.target_unclipped_quantile,\n learning_rate=FLAGS.adaptive_clip_learning_rate)\n aggregation_factory = tff.aggregators.clipping_factory(\n clip, aggregation_factory)\n else:\n if not FLAGS.uniform_weighting:\n raise ValueError(\n 'Differential privacy is only implemented for uniform weighting.')\n if FLAGS.noise_multiplier <= 0:\n raise ValueError('noise_multiplier must be positive if DP is enabled.')\n if FLAGS.clip is None or FLAGS.clip <= 0:\n raise ValueError('clip must be positive if DP is enabled.')\n if FLAGS.adaptive_clip_learning_rate is None:\n aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(\n noise_multiplier=FLAGS.noise_multiplier,\n clients_per_round=FLAGS.clients_per_round,\n clip=FLAGS.clip)\n else:\n if FLAGS.adaptive_clip_learning_rate <= 0:\n raise ValueError('adaptive_clip_learning_rate must be positive if '\n 'adaptive clipping is enabled.')\n aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive(\n noise_multiplier=FLAGS.noise_multiplier,\n clients_per_round=FLAGS.clients_per_round,\n initial_l2_norm_clip=FLAGS.clip,\n target_unclipped_quantile=FLAGS.target_unclipped_quantile,\n learning_rate=FLAGS.adaptive_clip_learning_rate)\n\n #add stuff for compression\n if FLAGS.use_compression:\n # We create a `MeasuredProcess` for broadcast process and a\n # `MeasuredProcess` for aggregate process by providing the\n # `_broadcast_encoder_fn` and `_mean_encoder_fn` to corresponding utilities.\n # The fns are called once for each of the model weights created by\n # tff_model_fn, and return instances of appropriate encoders.\n encoded_broadcast_process = (\n tff.learning.framework.build_encoded_broadcast_process_from_model(\n tff_model_fn, example_broadcast_encoder_fn))\n encoded_mean_process = (\n tff.learning.framework.build_encoded_mean_process_from_model(\n tff_model_fn, example_mean_encoder_fn))\n else:\n encoded_broadcast_process = None\n encoded_mean_process = None\n\n return tff.learning.build_federated_averaging_process(\n model_fn=model_fn,\n server_optimizer_fn=server_optimizer_fn,\n client_weighting=client_weighting,\n client_optimizer_fn=client_optimizer_fn,\n model_update_aggregation_factory=aggregation_factory,\n aggregation_process=encoded_mean_process,\n broadcast_process=encoded_broadcast_proces)", "def build_fed_recon_eval(\n model_fn: fed_recon.ModelFn,\n *, # Callers pass below args by name.\n loss_fn: fed_recon.LossFn,\n metrics_fn: Optional[fed_recon.MetricsFn] = None,\n reconstruction_optimizer_fn: fed_recon.OptimizerFn = functools.partial(\n tf.keras.optimizers.SGD, learning_rate=0.1\n ),\n dataset_split_fn: Optional[\n reconstruction_model.ReconstructionDatasetSplitFn\n ] = None,\n model_distributor: Optional[distributors.DistributionProcess] = None,\n metrics_aggregation_process: Optional[\n aggregation_process.AggregationProcess\n ] = None,\n) -> learning_process_lib.LearningProcess:\n batch_type = None\n\n @tensorflow_computation.tf_computation\n def build_initial_model_weights():\n model = model_fn()\n if not isinstance(model, reconstruction_model.ReconstructionModel):\n raise TypeError(\n '`model_fn` must return an instance of '\n f'`tff.learning.models.ReconstructionModel`. Got a: {type(model)}'\n )\n nonlocal batch_type\n batch_type = model.input_spec\n return reconstruction_model.ReconstructionModel.get_global_variables(model)\n\n if dataset_split_fn is None:\n dataset_split_fn = (\n reconstruction_model.ReconstructionModel.build_dataset_split_fn(\n split_dataset=True\n )\n )\n\n model_weights_type = build_initial_model_weights.type_signature.result\n dataset_type = computation_types.SequenceType(batch_type)\n\n if model_distributor is None:\n model_distributor = distributors.build_broadcast_process(model_weights_type)\n\n # Metric finalizer functions that will be populated while tracing\n # `client_update` and used later in the federated computation.\n metric_finalizers: collections.OrderedDict[\n str, metrics_finalizers_lib.KerasMetricFinalizer\n ] = collections.OrderedDict()\n\n @tensorflow_computation.tf_computation(model_weights_type, dataset_type)\n def client_computation(\n incoming_model_weights: Any,\n client_dataset: tf.data.Dataset,\n ):\n \"\"\"Reconstructs and evaluates with `incoming_model_weights`.\"\"\"\n client_model = model_fn()\n client_global_weights = (\n reconstruction_model.ReconstructionModel.get_global_variables(\n client_model\n )\n )\n client_local_weights = (\n reconstruction_model.ReconstructionModel.get_local_variables(\n client_model\n )\n )\n loss_metric = tf.keras.metrics.MeanMetricWrapper(loss_fn(), name='loss')\n if metrics_fn is None:\n metrics = [loss_metric]\n else:\n metrics = metrics_fn() + [loss_metric]\n nonlocal metric_finalizers\n for metric in metrics:\n if metric.name in metric_finalizers:\n raise ValueError(\n f'Duplicate metric name detected: {metric.name}. '\n f'Already saw metrics {list(metric_finalizers.keys())}'\n )\n metric_finalizers[metric.name] = (\n metrics_finalizers_lib.create_keras_metric_finalizer(metric)\n )\n # To be used to calculate batch loss for model updates.\n client_loss = loss_fn()\n\n reconstruction_optimizer = keras_optimizer.build_or_verify_tff_optimizer(\n reconstruction_optimizer_fn,\n client_local_weights.trainable,\n disjoint_init_and_next=False,\n )\n\n @tf.function\n def reconstruction_reduce_fn(state, batch):\n \"\"\"Runs reconstruction training on local client batch.\"\"\"\n num_examples_sum, optimizer_state = state\n with tf.GradientTape() as tape:\n output = client_model.forward_pass(batch, training=True)\n batch_loss = client_loss(\n y_true=output.labels, y_pred=output.predictions\n )\n\n gradients = tape.gradient(batch_loss, client_local_weights.trainable)\n updated_optimizer_state, updated_weights = reconstruction_optimizer.next(\n optimizer_state,\n tuple(client_local_weights.trainable),\n tuple(gradients),\n )\n if not isinstance(\n reconstruction_optimizer, keras_optimizer.KerasOptimizer\n ):\n # TFF optimizers require assigning the updated tensors back into the\n # model variables. (With Keras optimizers we don't need to do this,\n # because Keras optimizers mutate the model variables within the `next`\n # step.)\n tf.nest.map_structure(\n lambda a, b: a.assign(b),\n client_local_weights.trainable,\n list(updated_weights),\n )\n\n return num_examples_sum + output.num_examples, updated_optimizer_state\n\n @tf.function\n def evaluation_reduce_fn(num_examples_sum, batch):\n \"\"\"Runs evaluation on client batch without training.\"\"\"\n output = client_model.forward_pass(batch, training=False)\n # Update each metric.\n for metric in metrics:\n metric.update_state(y_true=output.labels, y_pred=output.predictions)\n return num_examples_sum + output.num_examples\n\n @tf.function\n def tf_client_computation(incoming_model_weights, client_dataset):\n \"\"\"Reconstructs and evaluates with `incoming_model_weights`.\"\"\"\n recon_dataset, eval_dataset = dataset_split_fn(client_dataset)\n\n # Assign incoming global weights to `client_model` before reconstruction.\n tf.nest.map_structure(\n lambda v, t: v.assign(t),\n client_global_weights,\n incoming_model_weights,\n )\n\n # If needed, do reconstruction, training the local variables while keeping\n # the global ones frozen.\n if client_local_weights.trainable:\n # Ignore output number of examples used in reconstruction, since this\n # isn't included in `client_weight`.\n def initial_state_reconstruction_reduce():\n trainable_tensor_specs = tf.nest.map_structure(\n lambda v: tf.TensorSpec(v.shape, v.dtype),\n client_local_weights.trainable,\n )\n # We convert the trainable specs to tuple, as the data iteration\n # pattern might try to stack the tensors in a list.\n initial_num_examples = tf.constant(0)\n return initial_num_examples, reconstruction_optimizer.initialize(\n tuple(trainable_tensor_specs)\n )\n\n recon_dataset.reduce(\n initial_state=initial_state_reconstruction_reduce(),\n reduce_func=reconstruction_reduce_fn,\n )\n\n eval_dataset.reduce(tf.constant(0), evaluation_reduce_fn)\n\n eval_local_outputs = (\n reconstruction_model.ReconstructionModel.read_metric_variables(\n metrics\n )\n )\n return eval_local_outputs\n\n return tf_client_computation(incoming_model_weights, client_dataset)\n\n if metrics_aggregation_process is None:\n metrics_aggregation_process = aggregation_factory.SumThenFinalizeFactory(\n metric_finalizers\n ).create(client_computation.type_signature.result)\n else:\n py_typecheck.check_type(\n metrics_aggregation_process,\n _AggregationProcess,\n 'metrics_aggregation_process',\n )\n\n @federated_computation.federated_computation\n def client_initialize():\n return metrics_aggregation_process.initialize()\n\n @federated_computation.federated_computation(\n client_initialize.type_signature.result,\n computation_types.at_clients(model_weights_type),\n computation_types.at_clients(dataset_type),\n )\n def client_work(state, model_weights, client_dataset):\n unfinalized_metrics = intrinsics.federated_map(\n client_computation, (model_weights, client_dataset)\n )\n metrics_output = metrics_aggregation_process.next(\n state, unfinalized_metrics\n )\n current_round_metrics, total_rounds_metrics = metrics_output.result\n measurements = intrinsics.federated_zip(\n collections.OrderedDict(\n eval=collections.OrderedDict(\n current_round_metrics=current_round_metrics,\n total_rounds_metrics=total_rounds_metrics,\n )\n )\n )\n # Return empty result as no model update will be performed for evaluation.\n empty_client_result = intrinsics.federated_value(\n client_works.ClientResult(update=(), update_weight=()),\n placements.CLIENTS,\n )\n return measured_process_lib.MeasuredProcessOutput(\n metrics_output.state,\n empty_client_result,\n measurements,\n )\n\n client_work = client_works.ClientWorkProcess(\n initialize_fn=client_initialize, next_fn=client_work\n )\n\n # The evaluation will *not* send model updates back, only metrics; so the type\n # is simply an empty tuple.\n empty_client_work_result_type = computation_types.at_clients(\n client_works.ClientResult(update=(), update_weight=())\n )\n empty_model_update_type = empty_client_work_result_type.member.update # pytype: disable=attribute-error\n empty_model_update_weight_type = (\n empty_client_work_result_type.member.update_weight # pytype: disable=attribute-error\n )\n empty_model_aggregator = mean.MeanFactory().create(\n empty_model_update_type, empty_model_update_weight_type\n )\n\n # Identity finalizer does not update the server model state.\n identity_finalizer = finalizers.build_identity_finalizer(\n model_weights_type, update_type=empty_model_update_weight_type\n )\n\n return composers.compose_learning_process(\n build_initial_model_weights,\n model_distributor,\n client_work,\n empty_model_aggregator,\n identity_finalizer,\n )", "def deploy_applications(model, model_context, aliases, wlst_mode=WlstModes.OFFLINE):\n applications_deployer = ApplicationsDeployer(model, model_context, aliases, wlst_mode=wlst_mode)\n applications_deployer.deploy()", "def create(model_size=\"b0\", variant=\"std\", resolution=None):\n\n if variant not in (\"std\", \"aa\", \"adv-prop\", \"noisy-student\"):\n raise ValueError(f\"EfficientNet variant not supported: {variant}\")\n\n # Note that for the standard EfficientNet variant only B0-B5 architectures are\n # supported, B0-B7 for all other variants. Noisy-Student also supports L2\n # and L2_475 (with a resolution of 475).\n valid = (variant == \"std\" and model_size in {f\"b{i}\" for i in range(6)}) or \\\n (variant != \"std\" and model_size in {f\"b{i}\" for i in range(8)}) or \\\n (variant == \"noisy-student\" and model_size in (\"l2\", \"l2_475\"))\n if not valid:\n raise ValueError(\n f\"Invalid `model_size` {model_size!r} for EfficientNet `variant` \"\n f\"{variant!r}!\")\n\n if model_size.startswith(\"l2\"):\n noisy_student = hub.KerasLayer(MODEL_PATHS[variant + \"-l2\"].format(\n model_size))\n else:\n noisy_student = hub.KerasLayer(MODEL_PATHS[variant].format(model_size))\n\n @tf.function\n def model(features):\n images = features[\"image\"]\n return tf.nn.softmax(noisy_student(images), axis=-1)\n\n def preprocess_fn(features):\n # EfficientNet preprocessing with model-dependent input resolution.\n # Preprocessing mimicks that of the public EfficientNet code from\n # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/preprocessing.py\n # (both `_resize_image` and `_decode_and_center_crop` taken from that code)\n\n def _resize_image(image, image_size, method=None):\n if method is not None:\n return tf1.image.resize([image], [image_size, image_size], method)[0]\n return tf1.image.resize_bicubic([image], [image_size, image_size])[0]\n\n def _decode_and_center_crop(image, image_size, resize_method=None):\n \"\"\"Crops to center of image with padding then scales image_size.\"\"\"\n shape = tf1.shape(image)\n image_height = shape[0]\n image_width = shape[1]\n\n padded_center_crop_size = tf1.cast(\n ((image_size / (image_size + CROP_PADDING)) *\n tf.cast(tf.minimum(image_height, image_width), tf.float32)),\n tf.int32)\n\n offset_height = ((image_height - padded_center_crop_size) + 1) // 2\n offset_width = ((image_width - padded_center_crop_size) + 1) // 2\n image = tf1.image.crop_to_bounding_box(image, offset_height, offset_width,\n padded_center_crop_size,\n padded_center_crop_size)\n image = _resize_image(image, image_size, resize_method)\n return image\n\n features[\"image\"] = _decode_and_center_crop(\n features[\"image\"], EFFICIENTNET_RESOLUTIONS[model_size])\n features[\"image\"] = tf1.cast(features[\"image\"], tf1.float32)\n # We assume the modules expect pixels in [-1, 1].\n features[\"image\"] = features[\"image\"] / 127.5 - 1.0\n\n return features\n\n if resolution is not None:\n preprocess_config_fmt = \"resize_small({})|central_crop({})|value_range(-1,1)\"\n preprocess_config = preprocess_config_fmt.format(\n int(1.15 * resolution), resolution)\n preprocess_fn = pipeline_builder.get_preprocess_fn(\n preprocess_config, remove_tpu_dtypes=False)\n\n return model, preprocess_fn", "def make_model_fn(arch_fn):\n def model_fn(features, labels, mode, params):\n learning_rate = params['learning_rate']\n is_batch_norm = params.get('is_batch_norm', False)\n\n loss, pred = arch_fn(features, labels, mode, params)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n\n if is_batch_norm:\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n else:\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n\n predictions = {'prob': pred}\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(labels, tf.math.greater_equal(pred, 0.5)),\n 'auc': tf.metrics.auc(labels, pred),\n }\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops)\n\n return model_fn", "def build_server_init_fn(model_fn, client_optimizer_fn, server_optimizer_fn):\n\n @tff.tf_computation\n def server_init_tf():\n client_optimizer = client_optimizer_fn()\n server_optimizer = server_optimizer_fn()\n model = model_fn()\n _initialize_optimizer_vars(model, client_optimizer)\n _initialize_optimizer_vars(model, server_optimizer)\n return ServerState(\n model=_get_weights(model),\n client_optimizer_state=_get_optimizer_state(client_optimizer),\n server_optimizer_state=_get_optimizer_state(server_optimizer),\n round_num=0.0)\n\n return server_init_tf", "def build_iterative_process(\n model_fn: ModelBuilder,\n client_optimizer_fn: OptimizerBuilder,\n client_lr: Union[float, LRScheduleFn] = 0.1,\n server_optimizer_fn: OptimizerBuilder = tf.keras.optimizers.SGD,\n server_lr: Union[float, LRScheduleFn] = 1.0,\n optimizer_aggregation: AggregationType = 'mean',\n client_model_weight_fn: Optional[ClientWeightFn] = None,\n client_opt_weight_fn: Optional[ClientWeightFn] = None,\n) -> tff.templates.IterativeProcess: # pytype: disable=annotation-type-mismatch\n client_lr_schedule = client_lr\n if not callable(client_lr_schedule):\n client_lr_schedule = lambda round_num: client_lr\n\n server_lr_schedule = server_lr\n if not callable(server_lr_schedule):\n server_lr_schedule = lambda round_num: server_lr\n\n optimizer_aggregator = build_aggregator(optimizer_aggregation)\n\n placeholder_model = model_fn()\n\n server_init_tf = build_server_init_fn(\n model_fn,\n # Initialize with the learning rate for round zero.\n lambda: client_optimizer_fn(client_lr_schedule(0)),\n lambda: server_optimizer_fn(server_lr_schedule(0)))\n server_state_type = server_init_tf.type_signature.result\n model_weights_type = server_state_type.model\n client_optimizer_state_type = server_state_type.client_optimizer_state\n round_num_type = server_state_type.round_num\n\n tf_dataset_type = tff.SequenceType(placeholder_model.input_spec)\n\n @tff.tf_computation(tf_dataset_type, model_weights_type,\n client_optimizer_state_type, round_num_type)\n def client_update_fn(tf_dataset, initial_model_weights,\n initial_optimizer_state, round_num):\n \"\"\"Performs a client update.\"\"\"\n model = model_fn()\n client_lr = client_lr_schedule(round_num)\n client_optimizer = client_optimizer_fn(client_lr)\n # We initialize the client optimizer variables to avoid creating them\n # within the scope of the tf.function client_update.\n _initialize_optimizer_vars(model, client_optimizer)\n\n client_update = create_client_update_fn()\n return client_update(model, tf_dataset, initial_model_weights,\n initial_optimizer_state, client_optimizer,\n client_model_weight_fn, client_opt_weight_fn)\n\n @tff.tf_computation(server_state_type, model_weights_type.trainable,\n client_optimizer_state_type)\n def server_update_fn(server_state, model_delta, optimizer_delta):\n model = model_fn()\n server_lr = server_lr_schedule(server_state.round_num)\n server_optimizer = server_optimizer_fn(server_lr)\n # We initialize the server optimizer variables to avoid creating them\n # within the scope of the tf.function server_update.\n _initialize_optimizer_vars(model, server_optimizer)\n return server_update(model, server_optimizer, server_state, model_delta,\n optimizer_delta)\n\n @tff.tf_computation(client_optimizer_state_type)\n def _convert_opt_state_to_float(optimizer_state):\n return tf.nest.map_structure(lambda x: tf.cast(x, tf.float32),\n optimizer_state)\n\n @tff.tf_computation(_convert_opt_state_to_float.type_signature.result)\n def _convert_opt_state_to_int(optimizer_state):\n iterations_as_int = tf.cast(optimizer_state.iterations, tf.int64)\n return OptimizerState(\n iterations=iterations_as_int, weights=optimizer_state.weights)\n\n @tff.federated_computation(\n tff.type_at_server(server_state_type),\n tff.type_at_clients(tf_dataset_type))\n def run_one_round(server_state, federated_dataset):\n \"\"\"Orchestration logic for one round of computation.\n\n Args:\n server_state: A `ServerState`.\n federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.\n\n Returns:\n A tuple of updated `ServerState` and the result of\n `tff.learning.Model.federated_output_computation`.\n \"\"\"\n client_model = tff.federated_broadcast(server_state.model)\n client_optimizer_state = tff.federated_broadcast(\n server_state.client_optimizer_state)\n client_round_num = tff.federated_broadcast(server_state.round_num)\n client_outputs = tff.federated_map(\n client_update_fn, (federated_dataset, client_model,\n client_optimizer_state, client_round_num))\n\n client_model_weight = client_outputs.client_weight.model_weight\n client_opt_weight = client_outputs.client_weight.optimizer_weight\n\n model_delta = tff.federated_mean(\n client_outputs.weights_delta, weight=client_model_weight)\n\n # We convert the optimizer state to a float type so that it can be used\n # with thing such as `tff.federated_mean`. This is only necessary because\n # `tf.keras.Optimizer` objects have a state with an integer indicating\n # the number of times it has been applied.\n client_optimizer_state_delta = tff.federated_map(\n _convert_opt_state_to_float, client_outputs.optimizer_state_delta)\n client_optimizer_state_delta = optimizer_aggregator(\n client_optimizer_state_delta, weight=client_opt_weight)\n # We conver the optimizer state back into one with an integer round number\n client_optimizer_state_delta = tff.federated_map(\n _convert_opt_state_to_int, client_optimizer_state_delta)\n\n server_state = tff.federated_map(\n server_update_fn,\n (server_state, model_delta, client_optimizer_state_delta))\n\n aggregated_outputs = placeholder_model.federated_output_computation(\n client_outputs.model_output)\n if aggregated_outputs.type_signature.is_struct():\n aggregated_outputs = tff.federated_zip(aggregated_outputs)\n\n return server_state, aggregated_outputs\n\n @tff.federated_computation\n def initialize_fn():\n return tff.federated_value(server_init_tf(), tff.SERVER)\n\n iterative_process = tff.templates.IterativeProcess(\n initialize_fn=initialize_fn, next_fn=run_one_round)\n\n @tff.tf_computation(server_state_type)\n def get_model_weights(server_state):\n return server_state.model\n\n iterative_process.get_model_weights = get_model_weights\n return iterative_process", "def build_example_weighted_federated_averaging_process(\n model_fn: Callable[[], model_lib.Model],\n client_optimizer_fn: Union[optimizer_base.Optimizer,\n Callable[[], tf.keras.optimizers.Optimizer]],\n server_optimizer_fn: Union[optimizer_base.Optimizer, Callable[\n [], tf.keras.optimizers.Optimizer]] = DEFAULT_SERVER_OPTIMIZER_FN,\n distributor: Optional[distributors.DistributionProcess] = None,\n model_update_aggregation_factory: Optional[\n factory.WeightedAggregationFactory] = None,\n use_experimental_simulation_loop: bool = False\n) -> learning_process.LearningProcess:\n py_typecheck.check_callable(model_fn)\n\n @computations.tf_computation()\n def initial_model_weights_fn():\n return model_utils.ModelWeights.from_model(model_fn())\n\n model_weights_type = initial_model_weights_fn.type_signature.result\n\n if distributor is None:\n distributor = distributors.build_broadcast_process(model_weights_type)\n\n if model_update_aggregation_factory is None:\n model_update_aggregation_factory = mean.MeanFactory()\n py_typecheck.check_type(model_update_aggregation_factory,\n factory.WeightedAggregationFactory)\n aggregator = model_update_aggregation_factory.create(\n model_weights_type.trainable, computation_types.TensorType(tf.float32))\n process_signature = aggregator.next.type_signature\n input_client_value_type = process_signature.parameter[1]\n result_server_value_type = process_signature.result[1]\n if input_client_value_type.member != result_server_value_type.member:\n raise TypeError('`model_update_aggregation_factory` does not produce a '\n 'compatible `AggregationProcess`. The processes must '\n 'retain the type structure of the inputs on the '\n f'server, but got {input_client_value_type.member} != '\n f'{result_server_value_type.member}.')\n\n client_work = build_fed_avg_client_work(model_fn, client_optimizer_fn,\n use_experimental_simulation_loop)\n finalizer = finalizers.build_apply_optimizer_finalizer(\n server_optimizer_fn, model_weights_type)\n return composers.compose_learning_process(initial_model_weights_fn,\n distributor, client_work,\n aggregator, finalizer)", "def build_client_update_with_keras_optimizer(\n model_fn, use_experimental_simulation_loop: bool = False):\n model = model_fn()\n dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(\n use_experimental_simulation_loop)\n\n @tf.function\n def client_update(optimizer, initial_weights, data):\n model_weights = model_utils.ModelWeights.from_model(model)\n tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,\n initial_weights)\n\n def reduce_fn(num_examples_sum, batch):\n \"\"\"Trains a `tff.learning.Model` on a batch of data.\"\"\"\n with tf.GradientTape() as tape:\n output = model.forward_pass(batch, training=True)\n\n gradients = tape.gradient(output.loss, model_weights.trainable)\n grads_and_vars = zip(gradients, model_weights.trainable)\n optimizer.apply_gradients(grads_and_vars)\n\n # TODO(b/199782787): Add a unit test for a model that does not compute\n # `num_examples` in its forward pass.\n if output.num_examples is None:\n num_examples_sum += tf.shape(output.predictions, out_type=tf.int64)[0]\n else:\n num_examples_sum += tf.cast(output.num_examples, tf.int64)\n\n return num_examples_sum\n\n def initial_state_for_reduce_fn():\n return tf.zeros(shape=[], dtype=tf.int64)\n\n num_examples = dataset_reduce_fn(\n reduce_fn, data, initial_state_fn=initial_state_for_reduce_fn)\n client_update = tf.nest.map_structure(tf.subtract,\n initial_weights.trainable,\n model_weights.trainable)\n model_output = model.report_local_outputs()\n stat_output = collections.OrderedDict(num_examples=num_examples)\n\n # TODO(b/122071074): Consider moving this functionality into\n # tff.federated_mean?\n client_update, has_non_finite_delta = (\n tensor_utils.zero_all_if_any_non_finite(client_update))\n # Zero out the weight if there are any non-finite values.\n if has_non_finite_delta > 0:\n client_weight = tf.constant(0.0)\n else:\n client_weight = tf.cast(num_examples, tf.float32)\n\n return client_works.ClientResult(\n update=client_update,\n update_weight=client_weight), model_output, stat_output\n\n return client_update", "def _serve():\n model_config_path = os.path.join(MODEL_PATH, \"MLmodel\")\n m = Model.load(model_config_path)\n\n if DEPLOYMENT_CONFIG_KEY_FLAVOR_NAME in os.environ:\n serving_flavor = os.environ[DEPLOYMENT_CONFIG_KEY_FLAVOR_NAME]\n else:\n # Older versions of mlflow may not specify a deployment configuration\n serving_flavor = pyfunc.FLAVOR_NAME\n\n if serving_flavor == mleap.FLAVOR_NAME:\n _serve_mleap()\n elif pyfunc.FLAVOR_NAME in m.flavors:\n _serve_pyfunc(m)\n else:\n raise Exception(\"This container only supports models with the MLeap or PyFunc flavors.\")", "def compress_and_finetune(config: argparse.Namespace):\n\n # 1. Instantiates Data Pipeline for evaluation and training\n data_pipeline = ImageNetDataPipeline(config)\n\n # 2. Loads the pretrained resnet50 keras model\n input_shape = (image_net_config.dataset['image_width'],\n image_net_config.dataset['image_height'],\n image_net_config.dataset['image_channels'])\n\n tf.keras.backend.clear_session()\n tf_config = tf.ConfigProto()\n tf_config.gpu_options.allow_growth = True\n tf.keras.backend.set_session(tf.Session(config=tf_config))\n model = ResNet50(weights='imagenet', input_shape=input_shape)\n update_ops_name = [op.name for op in model.updates]\n model = update_keras_bn_ops_trainable_flag(model, trainable=False, load_save_path=config.logdir)\n sess = tf.keras.backend.get_session()\n add_image_net_computational_nodes_in_graph(sess, model.output.name, image_net_config.dataset['images_classes'])\n\n # 3. Calculates floating point accuracy\n accuracy = data_pipeline.evaluate(sess)\n logger.info(\"Original Model Top-1 accuracy = %.2f\", accuracy)\n\n # 4. Compression\n logger.info(\"Starting Model Compression\")\n\n # 4.1. Compresses the model using AIMET Spatial SVD\n compressed_sess, stats = aimet_spatial_svd(sess=sess,\n input_op_names=['input_1'],\n output_op_names=[model.output.name.split(\":\")[0]],\n evaluator=data_pipeline.evaluate, working_dir=config.logdir)\n\n # 4.2. Logs the statistics\n logger.info(stats)\n with open(os.path.join(config.logdir, 'log.txt'), \"w\") as outfile:\n outfile.write(\"%s\\n\\n\" % stats)\n\n # 4.3. Saves the compressed model\n save_model_to_meta(compressed_sess, meta_path=os.path.join(config.logdir, 'ssvd_compressed_model'))\n\n # 4.4. Calculates and logs the accuracy of compressed model\n accuracy = data_pipeline.evaluate(compressed_sess)\n logger.info(\"Compressed Model Top-1 accuracy = %.2f\", accuracy)\n\n\n # 4.5. Compresses the model using AIMET Channel Pruning\n # Here 'labels' has been added into input_op_names as the data_loader.data_set gives\n # a tuple of (images, labels) and aimet channel pruning API checks the length of\n # input_op_names against the length of data_set output. The 'labels' value will be\n # fed but not utilized though.\n compressed_sess, stats = aimet_channel_pruning(sess=compressed_sess,\n input_op_names=['input_1', 'labels'],\n output_op_names=[model.output.name.split(\":\")[0]],\n data_loader=data_pipeline.data_loader(),\n evaluator=data_pipeline.evaluate, working_dir=config.logdir)\n\n # 4.6. Logs the statistics\n logger.info(stats)\n with open(os.path.join(config.logdir, 'log.txt'), \"w\") as outfile:\n outfile.write(\"%s\\n\\n\" % stats)\n\n # 4.7. Saves the compressed model\n save_model_to_meta(compressed_sess, meta_path=os.path.join(config.logdir, 'ssvd_cp_compressed_model'))\n\n # 4.8. Calculates and logs the accuracy of compressed model\n accuracy = data_pipeline.evaluate(compressed_sess)\n logger.info(\"Compressed Model Top-1 accuracy = %.2f\", accuracy)\n logger.info(\"Model Compression Complete\")\n\n # 5. Finetuning\n logger.info(\"Starting Model Finetuning\")\n\n # 5.1. Finetunes the compressed model\n # Since Channel Pruning replaces few BNs by different BNs with 'reduced_' added in their original name,\n # update_ops_name list should be updated accordingly\n compr_graph_all_ops_name = [op.name for op in compressed_sess.graph.get_operations()]\n update_ops_name_after_CP = []\n for op_name in update_ops_name:\n if 'reduced_'+op_name in compr_graph_all_ops_name:\n update_ops_name_after_CP.append('reduced_'+op_name)\n else:\n update_ops_name_after_CP.append(op_name)\n data_pipeline.finetune(compressed_sess, update_ops_name=update_ops_name_after_CP)\n\n # 5.2. Calculates and logs the accuracy of compressed-finetuned model\n accuracy = data_pipeline.evaluate(compressed_sess)\n logger.info(\"Finetuned Compressed Model Top-1 accuracy = %.2f\", accuracy)\n logger.info(\"Model Finetuning Complete\")", "def build_client_update_with_tff_optimizer(\n model_fn, use_experimental_simulation_loop: bool = False):\n model = model_fn()\n dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(\n use_experimental_simulation_loop)\n\n @tf.function\n def client_update(optimizer, initial_weights, data):\n model_weights = model_utils.ModelWeights.from_model(model)\n tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,\n initial_weights)\n\n def reduce_fn(state, batch):\n \"\"\"Trains a `tff.learning.Model` on a batch of data.\"\"\"\n num_examples_sum, optimizer_state = state\n with tf.GradientTape() as tape:\n output = model.forward_pass(batch, training=True)\n\n gradients = tape.gradient(output.loss, model_weights.trainable)\n optimizer_state, updated_weights = optimizer.next(optimizer_state,\n model_weights.trainable,\n gradients)\n tf.nest.map_structure(lambda a, b: a.assign(b), model_weights.trainable,\n updated_weights)\n\n if output.num_examples is None:\n num_examples_sum += tf.shape(output.predictions, out_type=tf.int64)[0]\n else:\n num_examples_sum += tf.cast(output.num_examples, tf.int64)\n\n return num_examples_sum, optimizer_state\n\n def initial_state_for_reduce_fn():\n trainable_tensor_specs = tf.nest.map_structure(\n lambda v: tf.TensorSpec(v.shape, v.dtype), model_weights.trainable)\n return tf.zeros(\n shape=[],\n dtype=tf.int64), optimizer.initialize(trainable_tensor_specs)\n\n num_examples, _ = dataset_reduce_fn(\n reduce_fn, data, initial_state_fn=initial_state_for_reduce_fn)\n client_update = tf.nest.map_structure(tf.subtract,\n initial_weights.trainable,\n model_weights.trainable)\n model_output = model.report_local_outputs()\n stat_output = collections.OrderedDict(num_examples=num_examples)\n\n # TODO(b/122071074): Consider moving this functionality into\n # tff.federated_mean?\n client_update, has_non_finite_delta = (\n tensor_utils.zero_all_if_any_non_finite(client_update))\n # Zero out the weight if there are any non-finite values.\n if has_non_finite_delta > 0:\n client_weight = tf.constant(0.0)\n else:\n client_weight = tf.cast(num_examples, tf.float32)\n\n return client_works.ClientResult(\n update=client_update,\n update_weight=client_weight), model_output, stat_output\n\n return client_update", "def build_estimator(model_fn):\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.estimator.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.model_dir,\n tf_random_seed=FLAGS.tf_random_seed,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n tpu_config=tf.estimator.tpu.TPUConfig(\n iterations_per_loop=FLAGS.tpu_iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n return tf.estimator.tpu.TPUEstimator(\n params={\"gradient_accumulation_steps\": FLAGS.gradient_accumulation_steps},\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size // \\\n FLAGS.gradient_accumulation_steps,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)", "def deploy(\n self,\n initial_instance_count=None,\n instance_type=None,\n serializer=None,\n deserializer=None,\n accelerator_type=None,\n endpoint_name=None,\n tags=None,\n kms_key=None,\n wait=True,\n data_capture_config=None,\n async_inference_config=None,\n serverless_inference_config=None,\n volume_size=None,\n model_data_download_timeout=None,\n container_startup_health_check_timeout=None,\n inference_recommendation_id=None,\n explainer_config=None,\n **kwargs,\n ):\n removed_kwargs(\"update_endpoint\", kwargs)\n\n self._init_sagemaker_session_if_does_not_exist(instance_type)\n # Depending on the instance type, a local session (or) a session is initialized.\n self.role = resolve_value_from_config(\n self.role,\n MODEL_EXECUTION_ROLE_ARN_PATH,\n sagemaker_session=self.sagemaker_session,\n )\n self.vpc_config = resolve_value_from_config(\n self.vpc_config,\n MODEL_VPC_CONFIG_PATH,\n sagemaker_session=self.sagemaker_session,\n )\n self._enable_network_isolation = resolve_value_from_config(\n self._enable_network_isolation,\n MODEL_ENABLE_NETWORK_ISOLATION_PATH,\n sagemaker_session=self.sagemaker_session,\n )\n\n tags = add_jumpstart_tags(\n tags=tags,\n inference_model_uri=self.model_data if isinstance(self.model_data, str) else None,\n inference_script_uri=self.source_dir,\n )\n\n if self.role is None:\n raise ValueError(\"Role can not be null for deploying a model\")\n\n if (\n inference_recommendation_id is not None\n or self.inference_recommender_job_results is not None\n ):\n instance_type, initial_instance_count = self._update_params(\n instance_type=instance_type,\n initial_instance_count=initial_instance_count,\n accelerator_type=accelerator_type,\n async_inference_config=async_inference_config,\n serverless_inference_config=serverless_inference_config,\n explainer_config=explainer_config,\n inference_recommendation_id=inference_recommendation_id,\n inference_recommender_job_results=self.inference_recommender_job_results,\n )\n\n is_async = async_inference_config is not None\n if is_async and not isinstance(async_inference_config, AsyncInferenceConfig):\n raise ValueError(\"async_inference_config needs to be a AsyncInferenceConfig object\")\n\n is_explainer_enabled = explainer_config is not None\n if is_explainer_enabled and not isinstance(explainer_config, ExplainerConfig):\n raise ValueError(\"explainer_config needs to be a ExplainerConfig object\")\n\n is_serverless = serverless_inference_config is not None\n if not is_serverless and not (instance_type and initial_instance_count):\n raise ValueError(\n \"Must specify instance type and instance count unless using serverless inference\"\n )\n\n if is_serverless and not isinstance(serverless_inference_config, ServerlessInferenceConfig):\n raise ValueError(\n \"serverless_inference_config needs to be a ServerlessInferenceConfig object\"\n )\n\n if instance_type and instance_type.startswith(\"ml.inf\") and not self._is_compiled_model:\n LOGGER.warning(\n \"Your model is not compiled. Please compile your model before using Inferentia.\"\n )\n\n compiled_model_suffix = None if is_serverless else \"-\".join(instance_type.split(\".\")[:-1])\n if self._is_compiled_model and not is_serverless:\n self._ensure_base_name_if_needed(\n image_uri=self.image_uri,\n script_uri=self.source_dir,\n model_uri=self.model_data,\n )\n if self._base_name is not None:\n self._base_name = \"-\".join((self._base_name, compiled_model_suffix))\n\n self._create_sagemaker_model(\n instance_type=instance_type,\n accelerator_type=accelerator_type,\n tags=tags,\n serverless_inference_config=serverless_inference_config,\n )\n\n serverless_inference_config_dict = (\n serverless_inference_config._to_request_dict() if is_serverless else None\n )\n production_variant = sagemaker.production_variant(\n self.name,\n instance_type,\n initial_instance_count,\n accelerator_type=accelerator_type,\n serverless_inference_config=serverless_inference_config_dict,\n volume_size=volume_size,\n model_data_download_timeout=model_data_download_timeout,\n container_startup_health_check_timeout=container_startup_health_check_timeout,\n )\n if endpoint_name:\n self.endpoint_name = endpoint_name\n else:\n base_endpoint_name = self._base_name or utils.base_from_name(self.name)\n if self._is_compiled_model and not is_serverless:\n if not base_endpoint_name.endswith(compiled_model_suffix):\n base_endpoint_name = \"-\".join((base_endpoint_name, compiled_model_suffix))\n self.endpoint_name = utils.name_from_base(base_endpoint_name)\n\n data_capture_config_dict = None\n if data_capture_config is not None:\n data_capture_config_dict = data_capture_config._to_request_dict()\n\n async_inference_config_dict = None\n if is_async:\n if (\n async_inference_config.output_path is None\n or async_inference_config.failure_path is None\n ):\n async_inference_config = self._build_default_async_inference_config(\n async_inference_config\n )\n async_inference_config.kms_key_id = resolve_value_from_config(\n async_inference_config.kms_key_id,\n ENDPOINT_CONFIG_ASYNC_KMS_KEY_ID_PATH,\n sagemaker_session=self.sagemaker_session,\n )\n async_inference_config_dict = async_inference_config._to_request_dict()\n\n explainer_config_dict = None\n if is_explainer_enabled:\n explainer_config_dict = explainer_config._to_request_dict()\n\n self.sagemaker_session.endpoint_from_production_variants(\n name=self.endpoint_name,\n production_variants=[production_variant],\n tags=tags,\n kms_key=kms_key,\n wait=wait,\n data_capture_config_dict=data_capture_config_dict,\n explainer_config_dict=explainer_config_dict,\n async_inference_config_dict=async_inference_config_dict,\n )\n\n if self.predictor_cls:\n predictor = self.predictor_cls(self.endpoint_name, self.sagemaker_session)\n if serializer:\n predictor.serializer = serializer\n if deserializer:\n predictor.deserializer = deserializer\n if is_async:\n return AsyncPredictor(predictor, self.name)\n return predictor\n return None", "def create_savedmodel(\n models_dir,\n model_version=1,\n dims=16,\n model_name=\"sig_tag\",\n tag_name=\"testTag\",\n signature_def_name=\"testSigDef\",\n different_io=False,\n):\n model_version_dir = models_dir + \"/\" + model_name + \"/\" + str(model_version)\n\n try:\n os.makedirs(model_version_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with tf.Session() as sess:\n input_tensor = tf.placeholder(tf.float32, [dims], \"TENSOR_INPUT\")\n\n # tag:\"serve\", signature_def:\"serving_default\"\n multiplier_0 = tf.constant(1.0, name=\"multiplier_0\")\n # tag:\"serve\", signature_def:signature_def_name\n multiplier_1 = tf.constant(2.0, name=\"multiplier_1\")\n # tag:tag_name, signature_def:\"serving_default\"\n multiplier_2 = tf.constant(3.0, name=\"multiplier_2\")\n # tag:tag_name, signature_def:signature_def_name\n multiplier_3 = tf.constant(4.0, name=\"multiplier_3\")\n\n output_tensor_0 = tf.multiply(multiplier_0, input_tensor, name=\"TENSOR_OUTPUT\")\n output_tensor_1 = tf.multiply(multiplier_1, input_tensor, name=\"TENSOR_OUTPUT\")\n output_tensor_2 = tf.multiply(multiplier_2, input_tensor, name=\"TENSOR_OUTPUT\")\n output_tensor_3 = tf.multiply(multiplier_3, input_tensor, name=\"TENSOR_OUTPUT\")\n\n # build_tensor_info_op could be used if build_tensor_info is deprecated\n input_tensor_info = tf.saved_model.utils.build_tensor_info(input_tensor)\n output_tensor_info_0 = tf.saved_model.utils.build_tensor_info(output_tensor_0)\n output_tensor_info_1 = tf.saved_model.utils.build_tensor_info(output_tensor_1)\n output_tensor_info_2 = tf.saved_model.utils.build_tensor_info(output_tensor_2)\n output_tensor_info_3 = tf.saved_model.utils.build_tensor_info(output_tensor_3)\n\n # Using predict method name because simple save uses it\n # tag:\"serve\", signature_def:\"serving_default\"\n signature_0 = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={\"INPUT\": input_tensor_info},\n outputs={\"OUTPUT\": output_tensor_info_0},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME,\n )\n # tag:\"serve\", signature_def:signature_def_name\n signature_1 = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={\"INPUT\": input_tensor_info},\n outputs={\"OUTPUT\": output_tensor_info_1},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME,\n )\n # tag:tag_name, signature_def:\"serving_default\"\n signature_2 = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={\"INPUT\": input_tensor_info},\n outputs={\"OUTPUT\": output_tensor_info_2},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME,\n )\n # tag:tag_name, signature_def:signature_def_name\n signature_3 = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={\"INPUT\": input_tensor_info},\n outputs={\"OUTPUT\": output_tensor_info_3},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME,\n )\n # tag:tag_name, signature_def:signature_def_name, two inputs/outputs\n signature_4 = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={\"INPUT\": input_tensor_info, \"INPUT1\": input_tensor_info},\n outputs={\"OUTPUT\": output_tensor_info_0, \"OUTPUT1\": output_tensor_info_1},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME,\n )\n\n b = builder.SavedModelBuilder(model_version_dir + \"/model.savedmodel\")\n\n if different_io:\n b.add_meta_graph_and_variables(\n sess,\n tags=[tag_name],\n signature_def_map={signature_def_name: signature_0},\n assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),\n clear_devices=True,\n )\n b.add_meta_graph(\n tags=[tag_constants.SERVING],\n signature_def_map={\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_4\n },\n assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),\n clear_devices=True,\n )\n else:\n signature_def_map_0 = {\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_0,\n signature_def_name: signature_1,\n }\n signature_def_map_1 = {\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_2,\n signature_def_name: signature_3,\n }\n\n b.add_meta_graph_and_variables(\n sess,\n tags=[tag_constants.SERVING],\n signature_def_map=signature_def_map_0,\n assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),\n clear_devices=True,\n )\n b.add_meta_graph(\n tags=[tag_name],\n signature_def_map=signature_def_map_1,\n assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),\n clear_devices=True,\n )\n\n b.save()", "def __init__(self,\n model_path,\n predictor_mode=\"Analysis\",\n config_type=\"cpu\",\n batch_size=1,\n min_subgraph_size=1,\n trt_dynamic_shape_info=None):\n configs = DeployConfig(\n model_path=model_path,\n batch_size=batch_size,\n min_subgraph_size=min_subgraph_size,\n trt_dynamic_shape_info=trt_dynamic_shape_info)\n analysis_predictor_config = configs.analysis_config(config_type)\n\n logger.debug(\"analysis_predictor_config : {}\".format(\n analysis_predictor_config))\n configs.summary_config(analysis_predictor_config) # summary configs\n\n if predictor_mode == \"Analysis\":\n logger.info(\"current config is Analysis config\")\n predictor0 = fluid.core.create_paddle_predictor(\n analysis_predictor_config)\n # clone main predictor to test predictor.clone api\n self.predictor = predictor0.clone()\n logger.info(\"analysis predictor create and clone successful\")\n elif predictor_mode == \"Native\":\n native_predictor_config = DeployConfig(model_path).native_config(\n config_type)\n logger.info(native_predictor_config)\n logger.info(\"current config is Native config\")\n # use analysis predictor to retrive number of inputs\n analysis_predictor_config.disable_glog_info()\n self.analysis_predictor = fluid.core.create_paddle_predictor(\n analysis_predictor_config)\n # use native predictor to predict\n self.native_predictor = fluid.core.create_paddle_predictor(\n native_predictor_config)\n logger.info(\"native predictor create successful\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the sum gradient for each shared variable across all clones. This function assumes that the clone_grads has been scaled appropriately by 1 / num_clones.
def _sum_clones_gradients(clone_grads): sum_grads = [] for grad_and_vars in zip(*clone_grads): # Note that each grad_and_vars looks like the following: # ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN)) grads = [] var = grad_and_vars[0][1] for g, v in grad_and_vars: assert v == var if g is not None: grads.append(g) if grads: if len(grads) > 1: sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads') else: sum_grad = grads[0] sum_grads.append((sum_grad, var)) return sum_grads
[ "def _optimize_clone(optimizer, clone, num_clones, regularization_losses,\n **kwargs):\n sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)\n clone_grad = None\n if sum_loss is not None:\n with tf.device(clone.device):\n clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)\n return sum_loss, clone_grad", "def optimize_clones(clones, optimizer,\n regularization_losses=None,\n **kwargs):\n grads_and_vars = []\n clones_losses = []\n num_clones = len(clones)\n if regularization_losses is None:\n regularization_losses = tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)\n for clone in clones:\n with tf.name_scope(clone.scope):\n clone_loss, clone_grad = _optimize_clone(\n optimizer, clone, num_clones, regularization_losses, **kwargs)\n if clone_loss is not None:\n clones_losses.append(clone_loss)\n grads_and_vars.append(clone_grad)\n # Only use regularization_losses for the first clone\n regularization_losses = None\n # Compute the total_loss summing all the clones_losses.\n total_loss = tf.add_n(clones_losses, name='total_loss')\n # Sum the gradients across clones.\n grads_and_vars = _sum_clones_gradients(grads_and_vars)\n return total_loss, grads_and_vars", "def average_gradients_D(self, num_nodes):\n for param in self.dA.parameters():\n #print(param.grad.data)\n tensor0 = param.grad.data.cpu()\n dist.all_reduce(tensor0, op=dist.reduce_op.SUM)\n tensor0 /= float(num_nodes)\n param.grad.data = tensor0.cuda()\n\n for param in self.dB.parameters():\n #print(param.grad.data)\n tensor1 = param.grad.data.cpu()\n dist.all_reduce(tensor1, op=dist.reduce_op.SUM)\n tensor1 /= float(num_nodes)\n param.grad.data = tensor1.cuda()", "def average_gradients_G(self, num_nodes):\n\n for param in self.gA.parameters():\n #print(param.grad.data)\n tensor2 = param.grad.data.cpu()\n dist.all_reduce(tensor2, op=dist.reduce_op.SUM)\n tensor2 /= float(num_nodes)\n param.grad.data = tensor2.cuda()\n\n for param in self.gB.parameters():\n #print(param.grad.data)\n tensor3 = param.grad.data.cpu()\n dist.all_reduce(tensor3, op=dist.reduce_op.SUM)\n tensor3 /= float(num_nodes)\n param.grad.data = tensor3.cuda()", "def _gather_clone_loss(clone, num_clones, regularization_losses):\n # The return value.\n sum_loss = None\n # Individual components of the loss that will need summaries.\n clone_loss = None\n regularization_loss = None\n # Compute and aggregate losses on the clone device.\n with tf.device(clone.device):\n all_losses = []\n clone_losses = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.LOSSES,\n clone.scope)\n if clone_losses:\n clone_loss = tf.add_n(clone_losses, name='clone_loss')\n if num_clones > 1:\n clone_loss = tf.div(clone_loss, 1.0 * num_clones,\n name='scaled_clone_loss')\n all_losses.append(clone_loss)\n if regularization_losses:\n regularization_loss = tf.add_n(regularization_losses,\n name='regularization_loss')\n all_losses.append(regularization_loss)\n if all_losses:\n sum_loss = tf.add_n(all_losses)\n # Add the summaries out of the clone device block.\n if clone_loss is not None:\n tf.compat.v1.summary.scalar('/'.join(filter(None,\n ['Losses', clone.scope, 'clone_loss'])),\n clone_loss)\n if regularization_loss is not None:\n tf.compat.v1.summary.scalar('Losses/regularization_loss',\n regularization_loss)\n return sum_loss", "def _aggregate_gradients(self, grads_and_vars):\n return self.gradient_aggregator(grads_and_vars)", "def grad_add(grads_batch):\n y = grads_batch[0].deepcopy()\n for xx in grads_batch:\n for item1,item2 in zip(xx,y):\n item2 += item1\n return y # return the parameters with a different gradient", "def accumulate_gradients(self):\n for k in range(self.last_layer, -1, -1):\n self.g[k] = self.g[k] + np.matmul(self.d[k].T, self.a[k])", "def compute_gradients(self, *args, **kwargs):\n gradients = self._optimizer.compute_gradients(*args, **kwargs)\n grads, vars = zip(*gradients)\n if self._agg_helper:\n avg_grads = self._agg_helper.compute_gradients(grads)\n else:\n avg_grads = self._allreduce_grads(grads)\n return list(zip(avg_grads, vars))", "def grad_norm(grads):\n a=0\n for item in grads:\n a += nd.sum(item ** 2).asscalar()\n return a ** 0.5", "def _apply_sparse_duplicate_indices(self, grad, var):\n summed_values, unique_indices = _deduplicate_indexed_slices(\n values=grad.values, indices=grad.indices)\n gradient_no_duplicate_indices = indexed_slices.IndexedSlices(\n indices=unique_indices,\n values=summed_values,\n dense_shape=grad.dense_shape)\n return self._apply_sparse(gradient_no_duplicate_indices, var)", "def update_gradient(self):\n \n #Seed robots need no further update.\n if self.seed:\n return\n \n #Gradient distance\n G = 3*self.radius\n \n #Only consider neighbors closer than G\n grad_vals = [s[2] for s in self.world.scan(self.ID) if s[0]<G]\n \n if grad_vals:\n self.grad_val = min(grad_vals)+1+self.rand_nonce", "def sgd_update(trainables, learning_rate=1e-2):\n for node in trainables:\n node.value -= learning_rate * node.gradients[node]", "def apply_gradients(self, grads_and_vars, stddev): # pylint: disable=arguments-differ\n\n with tf.device(_gpu_device_name(self.replica_id)):\n ops_ = [tf.assign(\n var,\n (var\n - self.learning_rate*grad\n + tf.random_normal(var.shape, stddev=stddev)))\n for grad, var in grads_and_vars]\n train_op = tf.group(ops_)\n self._grads = [g for g, v in grads_and_vars]\n return train_op", "def compute_value_gradient(grads, rewards):\n\n assert len(grads)>0\n assert len(rewards)>0\n\n baseline_b = 0\n for sample_index in range(0,len(rewards)):\n total_reward_for_one_trajectory = 0\n current_reward_trajectory = rewards[sample_index]\n\n for time_index in range(0,len(current_reward_trajectory)):\n total_reward_for_one_trajectory += current_reward_trajectory[time_index]\n baseline_b += total_reward_for_one_trajectory\n\n baseline_b = baseline_b/len(rewards)\n\n total_grad = np.zeros((grads[0][0].shape))\n\n for sample_index in range(0,len(grads)):\n total_grad_sum_one_trajectory = np.zeros((grads[0][0].shape))\n current_grad_trajectory = grads[sample_index]\n current_reward_trajectory = rewards[sample_index]\n \n\n for time_index_h in range(0,len(current_grad_trajectory)):\n \n\n current_reward = 0\n for time_index_t in range(time_index_h,len(current_reward_trajectory)):\n current_reward += current_reward_trajectory[time_index_t]\n adjusted_reward = current_reward-baseline_b\n\n total_grad_sum_one_trajectory += current_grad_trajectory[time_index_h]*adjusted_reward\n\n total_grad += total_grad_sum_one_trajectory/len(current_grad_trajectory)\n\n return total_grad/len(grads)", "def clone_inputs_retaining_gradness(example_inputs):\n cloned_inputs = clone_inputs(example_inputs)\n for idx in range(len(example_inputs)):\n if isinstance(cloned_inputs[idx], torch.Tensor):\n cloned_inputs[idx].requires_grad_(example_inputs[idx].requires_grad)\n return cloned_inputs", "def make_copy_with_grads(self):\n ps = [p.clone().detach().requires_grad_() for p in self.Parameters()]\n return Normal(*ps)", "def calculate_gradients(self,inputs,deltas):\r\n part_gradients = dict()\r\n part_gradients[\"Weights\"] = [self.cells[idx].weightcompute(inputs,deltas[idx]) for idx in range(0,len(self.cells))]\r\n part_gradients[\"Baises\"] = [self.cells[idx].baiscompute(inputs,deltas[idx]) for idx in range(0,len(self.cells))]\r\n part_gradients[\"PrevLayer\"] = [self.cells[idx].lastlayercompute(inputs,deltas[idx]) for idx in range(0,len(self.cells))]\r\n return part_gradients", "def _compute_grad_weights(self, grads):\r\n if self.input_dim == 2:\r\n return F.adaptive_avg_pool2d(grads, 1)\r\n else:\r\n return F.adaptive_avg_pool3d(grads, 1)", "def compute_gradients(self, loss, var_list=None, **kwargs):\n grads_and_vars = self._wrapped_optimizer.compute_gradients(loss,\n var_list=var_list,\n **kwargs)\n if self._outfeed_optimizer_mode == OutfeedOptimizerMode.AFTER_COMPUTE:\n enqueue = self._maybe_enqueue(grads_and_vars)\n if enqueue:\n # The enqueue op must be executed.\n # We cannot return it here so we must use control dependencies.\n # This puts the enqueue op into the control flow by attaching\n # it to an identity op on the first gradient.\n with ops.control_dependencies([enqueue]):\n return [(array_ops.identity(x) if i == 0 else x,\n y)\n for i, (x, y) in enumerate(grads_and_vars)]\n return grads_and_vars" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add histogram summaries to gradients.
def _add_gradients_summaries(grads_and_vars): summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
[ "def update_hist_list(self):\n for layer in self.layer_list:\n self.histogram_weight_history[layer]['weight'].append(np.histogram(layer.weight.detach().cpu().numpy().reshape(-1), bins=self.bins))\n self.histogram_weight_history[layer]['bias'].append(np.histogram(layer.bias.detach().cpu().numpy().reshape(-1), bins=self.bins))\n\n if self.epoch == 0: # Model is untrained; no gradients exist yet\n self.histogram_gradient_history[layer]['weight'].append(None)\n self.histogram_gradient_history[layer]['bias'].append(None)\n else:\n self.histogram_gradient_history[layer]['weight'].append(np.histogram(layer.weight.grad.detach().cpu().numpy().reshape(-1), bins=self.bins))\n self.histogram_gradient_history[layer]['bias'].append(np.histogram(layer.bias.grad.detach().cpu().numpy().reshape(-1), bins=self.bins))", "def add_histograms(self, histo_config: DictConfig) -> None:\n self.histograms = Histograms(histo_config)", "def make_histogram(self): # connected to make histogram (btn_histogram)\n print(\"make hist\")\n# self.calculate_images()\n self.intensitys = np.linspace(0,10,10)\n self.intensitys2 = self.intensitys\n try:\n self.intensitys2 = np.concatenate((self.intensitys,\n self.intensitys2))\n except:\n self.intensitys2 = self.intensitys\n self.doit()\n\n self.histo_data = True", "def addSeriesHistogram(self, dataSrc, fieldValue, countBin = None):\n self.graphSeries.append(_SeriesHistogram(dataSrc, fieldValue, countBin))", "def summarize_hist(self, tensor_list):\n # Seems to be a bug in Tensorflow 1.4.1\n # related to summary variable names.\n # Added replace ':'->'_' to avoid warning\n # messages.\n for tensor in tensor_list:\n tf.summary.histogram(tensor.name.replace(\n ':','_'), tensor)", "def grad_hook(self, layer, input, output):\n self.gradients.append(output[0])", "def _apply_gradient(self, loss, var_list, grad_dict):\n raise NotImplementedError(\"Please use subclass with specific algorithms, like boomdiff.optimize.GD\")", "def _draw_histogram(self):\n self.range = npy.arange(0, 100)\n all_data = [item for sublist in self.data for item in sublist]\n plt.hist(all_data)\n plt.show()", "def __add__(self, other):\n raise NotImplementedError(\"SummaryHistogram objects can't be used, use either the continuous or discrete implementation\")", "def _histogram(self):\n\n title = f\"k Nearest Neighbors\\nK values chosen by 10-fold CV\" \n \n sns.set_palette(self.palette, self.n_colors)\n sns.set_style(self.style)\n\n fig, axs = plt.subplots(figsize=(12,5)) \n \n sns.histplot(x=self.k_values, ax=axs).set_title(title, weight=\"bold\") \n fig.tight_layout()", "def _aggregate_gradients(self, grads_and_vars):\n return self.gradient_aggregator(grads_and_vars)", "def update_fitness_histogram(self, fitness_values_herbivores,\n fitness_values_carnivores, fitness_hist_specs):\n self.fitness_hist_ax.clear()\n self.fitness_hist_ax.set_title(\"Fitness\", fontstyle='italic')\n bins = get_bins(fitness_hist_specs)\n self.fitness_hist_ax.hist((fitness_values_herbivores, fitness_values_carnivores), bins,\n (0, fitness_hist_specs['max']), histtype='step', linewidth=1,\n label=('Herbivores', 'Carnivores'), color=('b', 'r'))\n self.fitness_hist_ax.patch.set_facecolor('gainsboro')", "def gradient_plot(self):\n ave_grads = []\n max_grads= []\n layers = []\n for n, p in self.model.named_parameters():\n if(p.requires_grad) and (\"bias\" not in n):\n if p.grad is None:\n print(\"Error n={} has None grads\".format(n))\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n fig = plt.figure()\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color=\"c\")\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color=\"b\")\n plt.hlines(0, 0, len(ave_grads)+1, lw=2, color=\"k\" )\n plt.xticks(range(0,len(ave_grads), 1), layers, rotation=\"vertical\")\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom = -0.001, top=0.02) # zoom in on the lower gradient regions\n plt.xlabel(\"Layers\")\n plt.ylabel(\"average gradient\")\n plt.title(\"Gradient flow\")\n plt.grid(True)\n plt.legend([Line2D([0], [0], color=\"c\", lw=4),\n Line2D([0], [0], color=\"b\", lw=4),\n Line2D([0], [0], color=\"k\", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])\n plt.savefig(self.params.graph_dir+\"gradflow_%d.png\" % (self.epoch),bbox_inches='tight')\n self.writer.add_figure(\"grad_flow\",fig,self.epoch)\n plt.clf()", "def histogram(self, dataset):\n pass", "def log_loss_values(self, writer: SummaryWriter, loss_values: np.ndarray, epoch: int) -> None:\n writer.add_histogram('loss/all', loss_values, epoch)", "def update_gradients_and_hessians(self, gradients, hessians, y_true,\n raw_predictions, sample_weight):", "def add_histogram_panel(self):\n\n self.ax_hist = plt.axes(cfg.position_histogram_t1_mri)\n self.ax_hist.set_xticks(cfg.xticks_histogram_t1_mri)\n self.ax_hist.set_yticks([])\n self.ax_hist.set_autoscaley_on(True)\n self.ax_hist.set_prop_cycle('color', cfg.color_histogram_t1_mri)\n self.ax_hist.set_title(cfg.title_histogram_t1_mri, fontsize='small')", "def aggregate_hist (self, hist, shard_hist):\n for key, val in shard_hist:\n if key not in hist:\n hist[key] = val\n else:\n hist[key] += val", "def plotFeederHist(dflist, binrange=(0.5,100.5), labels=('', ''), cols=('nb_feeders', 'nb_feeders'), \\\n title=\"Number of Feeders Histogram\", alpha=(.5,.5)):\n feed1, feed2 = dflist\n nbins = int(binrange[1] - binrange[0])\n ax = sns.histplot(data=feed1, x=cols[0], hue=None, binwidth=1, binrange=(binrange[0],binrange[0]+nbins), color='red', alpha=alpha[0], label=labels[0])\n sns.histplot(data=feed2, x=cols[1], hue=None, binwidth=1, binrange=(binrange[0],binrange[0]+nbins), color='blue', alpha=alpha[1], label=labels[1])\n ax.set_xlim(-.5,binrange[1])\n plt.title(title, fontsize=20)\n plt.legend()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the device to use for caching variables. Variables are cached on the worker CPU when using replicas.
def caching_device(self): if self._num_ps_tasks > 0: return lambda op: op.device else: return None
[ "def get_device():\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "def variables_device(self):\n device = ''\n if self._num_ps_tasks > 0:\n device += self._ps_device\n\n device += _get_device(self._clone_on_cpu).name\n\n class _PSDeviceChooser(object):\n \"\"\"Slim device chooser for variables when using PS.\"\"\"\n\n def __init__(self, device, tasks):\n self._device = device\n self._tasks = tasks\n self._task = 0\n\n def choose(self, op):\n if op.device:\n return op.device\n node_def = op if isinstance(op, tf.NodeDef) else op.node_def\n if node_def.op.startswith('Variable'):\n t = self._task\n self._task = (self._task + 1) % self._tasks\n d = '%s/task:%d' % (self._device, t)\n return d\n else:\n return op.device\n\n if not self._num_ps_tasks:\n return device\n else:\n chooser = _PSDeviceChooser(device, self._num_ps_tasks)\n return chooser.choose", "def get_device() -> torch.device:\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "def device(use_gpu=False):\n if use_gpu:\n return ops.device(\"/gpu:0\")\n return ops.device(\"/cpu:0\")", "def optimizer_device(self):\n if self._num_ps_tasks > 0 or self._num_clones > 0:\n device = self._worker_device\n device += _get_device(self._clone_on_cpu).name\n return device\n else:\n return ''", "def get_device(gpus=None):\n if not gpus:\n parallel = False\n device = torch.device(\"cpu\")\n return parallel, device\n if len(gpus) > 1:\n parallel = True\n device = torch.device(\"cpu\")\n else:\n parallel = False\n device = torch.device(gpus[0])\n return parallel, device", "def default_device():\n global DEFAULT_DEVICE\n if not ivy.exists(DEFAULT_DEVICE):\n DEFAULT_DEVICE = 'gpu:0' if ivy.gpu_is_available() else 'cpu'\n return DEFAULT_DEVICE", "def inputs_device(self):\n device = ''\n if self._num_ps_tasks > 0:\n device += self._worker_device\n device += '/device:CPU:0'\n return device", "def get_device_from_fit_dictionary(X: Dict[str, Any]) -> torch.device:\n if not torch.cuda.is_available():\n return torch.device(\"cpu\")\n\n return torch.device(X.get(\"device\", \"cpu\"))", "def device(self):\n return str(self.dummy_param.device)", "def get_tf_device(MAX_LOAD):\n DEVICE = None\n MAX_LOAD = 5 # hardcode this to actually spread computation\n # Set memory growth and list logical devices\n if int(tf.__version__[0]) < 2:\n physical_gpu = tf.config.experimental.list_physical_devices('GPU')\n for gpu in physical_gpu:\n tf.config.experimental.set_memory_growth(gpu, True)\n gpu_devices = tf.config.experimental.list_logical_devices('GPU')\n cpu_devices = tf.config.experimental.list_logical_devices('CPU')\n else:\n physical_gpu = tf.config.list_physical_devices('GPU')\n for gpu in physical_gpu:\n tf.config.experimental.set_memory_growth(gpu, True)\n gpu_devices = tf.config.list_logical_devices('GPU')\n cpu_devices = tf.config.list_logical_devices('CPU')\n\n # Select suitable GPU or default to CPU\n if gpu_devices:\n nvidia_command = [\n \"nvidia-smi\",\n \"--query-gpu=index,utilization.gpu\",\n \"--format=csv\"]\n nvidia_output = subprocess.run(\n nvidia_command, text=True, capture_output=True).stdout\n gpu_loads = np.array(re.findall(r\"(\\d+), (\\d+) %\", nvidia_output),\n dtype=np.int) # tuple (id, load%)\n eligible_gpu = np.where(gpu_loads[:, 1] < MAX_LOAD)\n if len(eligible_gpu[0]) == 0:\n print(\"No GPUs with less than 20% load. Check nvidia-smi.\")\n exit(0)\n else:\n # Choose the highest id eligible GPU\n # Assuming a lot of people use default allocation which is\n # lowest id.\n gpu_id = np.amax(gpu_loads[eligible_gpu, 0])\n DEVICE = gpu_devices[gpu_id].name\n print(\"CHOSEN GPU IS:\", DEVICE)\n else:\n # Default to CPU\n DEVICE = cpu_devices[0].name\n print(\"NO GPU FOUND, DEFAULTING TO CPU.\")\n\n return DEVICE", "def resolve_device(device = None) -> torch.device:\n if device is None or device == 'gpu':\n device = 'cuda'\n if isinstance(device, str):\n device = torch.device(device)\n if not torch.cuda.is_available() and device.type == 'cuda':\n device = torch.device('cpu')\n print('No cuda devices were available. The model runs on CPU')\n return device", "def get_device(tensor_or_module, default=None):\n if hasattr(tensor_or_module, \"device\"):\n return tensor_or_module.device\n elif hasattr(tensor_or_module, \"parameters\"):\n return next(tensor_or_module.parameters()).device\n elif default is None:\n raise TypeError(f\"Don't know how to get device of {type(tensor_or_module)} object\")\n else:\n return torch.device(default)", "def get_device_id():\n cpuinfo = open('/proc/cpuinfo', 'r')\n for line in cpuinfo:\n if line[0:6] == 'Serial':\n cpuserial = line[10:26]\n cpuinfo.close()\n return cpuserial", "def default_cpu_device():\n import bempp.api\n import os\n\n # pylint: disable=W0603\n global _DEFAULT_CPU_DEVICE\n global _DEFAULT_CPU_CONTEXT\n\n if \"BEMPP_CPU_DRIVER\" in os.environ:\n name = os.environ[\"BEMPP_CPU_DRIVER\"]\n else:\n name = None\n\n if _DEFAULT_CPU_DEVICE is None:\n try:\n ctx, device = find_cpu_driver(name)\n except:\n raise RuntimeError(\"Could not find suitable OpenCL CPU driver.\")\n _DEFAULT_CPU_CONTEXT = ctx\n _DEFAULT_CPU_DEVICE = device\n bempp.api.log(f\"OpenCL CPU Device set to: {_DEFAULT_CPU_DEVICE.name}\")\n return _DEFAULT_CPU_DEVICE", "def device(self):\n return self.broker.device(**{\"IfAddrID\": self.IfAddrID})", "def get_model_device(model):\n return model.parameters().__next__().device", "def get_current_device_index() -> int:\r\n if torch.cuda.device_count() > 0:\r\n return torch.cuda._current_device()\r\n return -1", "def get_default_output_device():\n read_access = wr.KEY_READ | wr.KEY_WOW64_64KEY if is_os_64bit() else wr.KEY_READ\n audio_path = r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\MMDevices\\Audio\\Render'\n audio_key = wr.OpenKeyEx(wr.HKEY_LOCAL_MACHINE, audio_path, 0, read_access)\n num_devices = wr.QueryInfoKey(audio_key)[0]\n active_last_used, active_device_name = -1, None\n for i in range(num_devices):\n device_key_path = f'{audio_path}\\\\{wr.EnumKey(audio_key, i)}'\n device_key = wr.OpenKeyEx(wr.HKEY_LOCAL_MACHINE, device_key_path, 0, read_access)\n if wr.QueryValueEx(device_key, 'DeviceState')[0] == 1: # if enabled\n properties_path = f'{device_key_path}\\\\Properties'\n properties = wr.OpenKeyEx(wr.HKEY_LOCAL_MACHINE, properties_path, 0, read_access)\n device_name = wr.QueryValueEx(properties, '{b3f8fa53-0004-438e-9003-51a46e139bfc},6')[0]\n device_type = wr.QueryValueEx(properties, '{a45c254e-df1c-4efd-8020-67d146a850e0},2')[0]\n pa_name = f'{device_type} ({device_name})' # name shown in PyAudio\n last_used = wr.QueryValueEx(device_key, 'Level:0')[0]\n if last_used > active_last_used: # the bigger the number, the more recent it was used\n active_last_used = last_used\n active_device_name = pa_name\n return active_device_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Device used to create the clone and all the ops inside the clone.
def clone_device(self, clone_index): if clone_index >= self._num_clones: raise ValueError('clone_index must be less than num_clones') device = '' if self._num_ps_tasks > 0: device += self._worker_device device_type = _get_device(self._clone_on_cpu).device_type device += '/device:%s:%d' % (device_type, clone_index) return device
[ "def clonedev(self):\n try:\n try:\n self.device_hidrep\n except:\n self.showMessage(\"Claim the interfaces before trying to clone the device. We need some info\",color='red')\n return \"Cloning Failed\"\n try:\n self.devcfg.bmAttributes\n except:\n self.showMessage(\"Claim the interfaces before trying to clone the device. We need some info\",color='red')\n return \"Cloning Failed\"\n try:\n self.devcfg.bMaxPower\n except:\n self.showMessage(\"Claim the interfaces before trying to clone the device. We need some info\",color='red')\n return \"Cloning Failed\"\n cloner = open(\"clones/%s\" % self.SelectedDevice, 'w')\n print(\"setting up: %s\" % self.manufacturer)\n print(\"Creating backup of device\\n\")\n self.devJson = json.dumps({\"idVen\":'0x{:04X}'.format(self.device.idVendor),\\\n \"idProd\":'0x{:04X}'.format(self.device.idProduct),\\\n \"manufacturer\":self.manufacturer,\\\n \"bcdDev\":'0x{:04X}'.format(self.device.bcdDevice),\\\n \"bcdUSB\":'0x{:04X}'.format(self.device.bcdUSB),\\\n \"serial\":self.device.serial_number,\\\n \"bDevClass\":'0x{:02X}'.format(self.device.bDeviceClass),\\\n \"bDevSubClass\":'0x{:02X}'.format(self.device.bDeviceSubClass),\\\n \"protocol\":'0x{:02X}'.format(self.device.bDeviceProtocol),\\\n \"MaxPacketSize\":'0x{:02X}'.format(self.device.bMaxPacketSize0),\\\n \"hidreport\":','.join([i.decode('utf-8') for i in self.device_hidrep]),\\\n \"bmAttributes\":'0x{:02X}'.format(self.devcfg.bmAttributes),\\\n \"MaxPower\":'0x{:02X}'.format(self.devcfg.bMaxPower),\n \"product\":self.device.product})\n cloner.write(self.devJson)\n cloner.write('\\n++++++\\n')\n cloner.write(str(self.device)+\"\\n\\n\")\n print(\"- Done: Device settings copied to file.\\n\")\n cloner.close()\n except Exception as e:\n self.showMessage(\"Cannot clone the device!\\n\", color='red',blink='y')", "def start_device(self) -> None:\n if self.connection is None:\n self.connection = self.node.build_ssh_client()\n\n self.startup_tasks()\n self.generate_id()\n\n self.connection.exec_command(\"{} -c {}\".format(\n path.join(self.node.build_path, self.device_type), \n self.config_path), get_pty=True)[0]", "def _CrossDeviceCopy(name=None, attr=None, out=None, **kwargs):\n return (0,)", "def make_device():\n\n def device(name=\"device1\", site_name=\"site1\", **kwargs):\n \"\"\"Provide an instance of a Device model.\"\"\"\n return Device(name=name, site_name=site_name, **kwargs)\n\n return device", "def _DeviceAssignment(self):\n mesh_shape = [8, 8, 1, 2]\n device_coordinates = np.zeros([16, 8, 4], dtype=np.int32)\n for i in range(np.prod(mesh_shape)):\n x = i // 16\n y = i % 16 // 2\n core = i % 2\n task = x // 2 * 4 + y // 2\n device = x % 2 * 4 + y % 2 * 2 + core\n device_coordinates[task, device] = [x, y, 0, core]\n topology = tf.tpu.experimental.Topology(\n mesh_shape=mesh_shape, device_coordinates=device_coordinates)\n assignment = device_assignment.device_assignment(\n topology, computation_shape=[1, 1, 1, 1], num_replicas=128)\n py_utils.SetTpuDeviceAssignment(assignment)\n try:\n yield\n finally:\n py_utils.SetTpuDeviceAssignment(None)", "def __init__(self, platform=\"generic\", device=\"generic\", rootfs=\"\"):\n self.platform = platform\n self.device = device\n self.rootfs = rootfs", "def create_device_action(self, netsim, device):\n self.log.info('Creating new netsim network with device ', device)\n response = None\n while True:\n # Create the network\n create_response = netsim.create_device(device)\n response = create_response\n if create_response.error:\n break\n # Init netsim device configuration\n init_response = netsim.init_config(device)\n if init_response.error:\n response = init_response\n break\n # Load init configuration to cdb\n load_response = netsim.load_config()\n if load_response.error:\n response = load_response\n break\n # all operations finished\n break\n\n return response", "def new_device(mac, ip, name):\n return Device(mac, ip, name)", "def create_device(device):\n return FoobotDevice(auth_header=self.auth_header,\n user_id=device['userId'],\n uuid=device['uuid'],\n name=device['name'],\n mac=device['mac'], base_url=self.BASE_URL)", "def _open_device(self):\n pass", "def test_create_asset_managed_device(self):\n pass", "def _create_device(device, host, username, password):\n module_name = 'acts.controllers.pdu_lib.' + device\n module = importlib.import_module(module_name)\n return module.PduDevice(host, username, password)", "def snap_cow_device(self):\n\n if not self.snap_cow_name:\n return None\n if not self.exists():\n return None\n\n dev = DeviceMapperDevice(\n dm_name=self.snap_cow_name,\n appname=self.appname,\n verbose=self.verbose,\n base_dir=self.base_dir,\n use_stderr=self.use_stderr,\n simulate=self.simulate,\n sudo=self.sudo,\n quiet=self.quiet,\n )\n return dev", "def create_testbed_device_instance(self, dev_name_info, hint):\n testbed_dev = None\n if hint == \"AP\":\n testbed_dev = TestBedAP(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"AP\"\n if hint == \"STA\":\n testbed_dev = TestBedSTA(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"STA\"\n if hint == \"DUT\":\n testbed_dev = DUT(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"DUT\"\n if hint == \"SNIFFER\":\n testbed_dev = Sniffer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"SNIFFER\"\n if hint == \"PCENDPOINT\":\n testbed_dev = PCEndpoint(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"PCENDPOINT\"\n if hint == \"APCONFIG\":\n testbed_dev = APConfig(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"APCONFIG\"\n if hint == \"RADIUSSERVER\":\n testbed_dev = RadiusServer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"RADIUSSERVER\"\n if hint == \"OSUSERVER\":\n testbed_dev = OSUServer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"OSUSERVER\"\n if hint == \"ATTENUATOR\":\n testbed_dev = Attenuator(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"ATTENUATOR\"\n if hint == \"POWERSWITCH\":\n testbed_dev = PowerSwitch(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"POWERSWITCH\"\n if hint == \"WFAEMT\":\n testbed_dev = WFAEMT(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"WFAEMT\"\n return testbed_dev", "def device(use_gpu=False):\n if use_gpu:\n return ops.device(\"/gpu:0\")\n return ops.device(\"/cpu:0\")", "def generate_context_build_program(self, device):\n\n print(\"DEVICE_NAME: {0}\".format(device.name))\n self.device_name = device.name\n \"\"\"print device info\"\"\"\n if self.verbose:\n print(\"DEVICE_GLOBAL_MEM_SIZE: {0}\".format(device.global_mem_size//1024//1024), 'MB')\n print(\"MAX_WORK_GROUP_SIZE: \", device.max_work_group_size)\n print(\"MAX_WORK_ITEM_SIZES: \", device.max_work_item_sizes)\n\n self.work_group_size = device.max_work_group_size\n if self.verbose:\n print(\"Work Group Size: \", self.work_group_size)\n\n self.context = cl.Context([device])\n \"\"\"create context\"\"\"\n\n if self.verbose:\n print(\"Building kernel from source code: \")\n print(\"***********************************\")\n print(srcKernel)\n\n self.program = cl.Program(self.context, srcKernel)\n \"\"\"associates kernel source code to program\"\"\"\n\n try:\n self.program.build([\"-cl-no-signed-zeros\"])\n \"\"\"build program with the specific flag \"\"\"\n except:\n print(\"Build log:\")\n print(self.program.get_build_info(device, cl.program_build_info.LOG))\n \"\"\"\"in case of failure prints error to stdout\"\"\"\n raise\n else:\n if self.printonce:\n print(\"Program has been built!\")\n\n self.queue = cl.CommandQueue(self.context)\n \"\"\"associates context to queue\"\"\"\n self.kernel = cl.Kernel(self.program, \"knapsack\")\n \"\"\"creates kernel from built program\"\"\"", "def store(self):\r\n store_devices(self)", "def Add(cls,tree,name,add_source=False):\n if cls is Device: raise Exception(\"Cannot add super class Device.\")\n parent = tree\n if isinstance(tree, TreeNode): tree = tree.tree\n _TreeShr._TreeStartConglomerate(tree.ctx,len(cls.parts)+1)\n if isinstance(name,_scr.Ident):\n name=name.data()\n head=parent.addNode(name,'DEVICE')\n if add_source is None:\n add_source = _mds.getenv(\"PYDEVICE_ADD_SOURCE\",\"no\").lower()\n add_source = not (add_source == \"no\" or add_source == \"0\" or add_source == \"off\")\n if add_source:\n try: cls.__add_source(head,cls.__name__,cls)\n except Exception as e:\n _sys.stderr.write(\"Could not find source code for %s: %s\\n\"%(cls.__name__,str(e)))\n head.record=_cmp.Conglom('__python__',cls.__name__,None,cls.getImportString())\n else:\n head.record=_cmp.Conglom('__python__',cls.__name__,None,cls.getImportString())\n head=TreeNode(head)\n head.write_once=True\n glob = _mimport('__init__').load_package({})\n glob['tree'] = tree\n glob['path'] = head.path\n glob['head'] = head\n for elt in cls.parts: # first add all nodes\n node=head.addNode(elt['path'],elt['type'])\n for elt in cls.parts: # then you can reference them in valueExpr\n try:\n node=head.getNode(elt['path'])\n if 'value' in elt:\n if Device.debug: print(node,node.usage,elt['value'])\n node.record = elt['value']\n elif 'valueExpr' in elt:\n glob['node'] = node\n if Device.debug: print(node,node.usage,elt['valueExpr'])\n node.record = eval(elt['valueExpr'], glob)\n if 'options' in elt:\n for option in elt['options']:\n node.__setattr__(option,True)\n except:\n _sys.stderr.write('ERROR: %s\\n'%str(elt))\n raise\n _TreeShr._TreeEndConglomerate(tree.ctx)\n return head", "def mknod_ops(self):\n return self._mknod_ops" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Name scope to create the clone.
def clone_scope(self, clone_index): if clone_index >= self._num_clones: raise ValueError('clone_index must be less than num_clones') scope = '' if self._num_clones > 1: scope = 'clone_%d' % clone_index return scope
[ "def new_scope(self):\n self.append(Scope(self.peek()))\n return", "def scope_name(self):\n return self.fun.scope_name", "def instance(origin, copy, identifier):\n newInstance = ObName()\n newInstance.origin = origin\n newInstance.copy = copy\n newInstance.identifier = identifier\n return newInstance", "def clone(self, name='', datastore_id=-1):\n self.client.call(self.METHODS['clone'], self.id, name, datastore_id)", "def copy(self, new_name):\n return self.__deepcopy(name=new_name, memo={})", "def create_copy(self):\n raise NotImplementedError(\n \"\"\"method self.new_copy() not implemented\n for symbol {!s} of type {}\"\"\".format(\n self, type(self)\n )\n )", "def sub_scope(self, kind, name, node, lineno):\n generator = kind(self.space, name, node, lineno, self.symbols,\n self.compile_info)\n return generator.assemble()", "def makeTab(self, name):\n\t\tself.divies.append(JPLDivies('makeTab',name=name))", "def create_placeholder(cls, name):\n pass", "def scope_name(self) -> str:\n return self._scope_name", "def duplicate(self, new_name):\n\t\treturn Node(self.node_type, new_name)", "def create(self):\n name, term = self.new_named_terminal()\n return self._finish_create(name, term)", "def clone_pool(self, pool_name, cloned_for, avi_config, userprefix=None):\n pools = [pool for pool in avi_config['Pool'] if\n pool['name'] == pool_name]\n if pools:\n pool_obj = copy.deepcopy(pools[0])\n pname = pool_obj['name']\n pool_name = re.sub('[:]', '-', '%s-%s' % (pname, cloned_for))\n pool_obj['name'] = pool_name\n avi_config['Pool'].append(pool_obj)\n LOG.info(\n \"Same pool reference to other object. Clone Pool %s for %s\" %\n (pool_name, cloned_for))\n return pool_obj['name']\n return None", "def clone(self, **kwargs):\r\n return DecoratedKey(self, **kwargs)", "def duplicateCurve(local=bool, range=bool, name=\"string\", constructionHistory=bool, object=bool):\n pass", "def clone(self, name, **attr):\n\t\tobj = copy.deepcopy(self._objects.get(name))\n\t\tobj.__dict__.update(attr)\n\t\treturn obj", "def __repr__(self) -> str:\n return \"Scope({})\".format(self.__dict__)", "def filter_name(scope_generator):\r\n def handle_for_loops(loop):\r\n # Take the first statement (for has always only\r\n # one, remember `in`). And follow it.\r\n if not loop.inputs:\r\n return []\r\n result = get_iterator_types(follow_statement(loop.inputs[0]))\r\n if len(loop.set_vars) > 1:\r\n commands = loop.set_stmt.get_commands()\r\n # loops with loop.set_vars > 0 only have one command\r\n result = assign_tuples(commands[0], result, name_str)\r\n return result\r\n\r\n def process(name):\r\n \"\"\"\r\n Returns the parent of a name, which means the element which stands\r\n behind a name.\r\n \"\"\"\r\n result = []\r\n no_break_scope = False\r\n par = name.parent\r\n exc = pr.Class, pr.Function\r\n until = lambda: par.parent.parent.get_parent_until(exc)\r\n is_array_assignment = False\r\n\r\n if par is None:\r\n pass\r\n elif par.isinstance(pr.Flow):\r\n if par.command == 'for':\r\n result += handle_for_loops(par)\r\n else:\r\n debug.warning('Flow: Why are you here? %s' % par.command)\r\n elif par.isinstance(pr.Param) \\\r\n and par.parent is not None \\\r\n and isinstance(until(), pr.Class) \\\r\n and par.position_nr == 0:\r\n # This is where self gets added - this happens at another\r\n # place, if the var_args are clear. But sometimes the class is\r\n # not known. Therefore add a new instance for self. Otherwise\r\n # take the existing.\r\n if isinstance(scope, er.InstanceElement):\r\n inst = scope.instance\r\n else:\r\n inst = er.Instance(er.Class(until()))\r\n inst.is_generated = True\r\n result.append(inst)\r\n elif par.isinstance(pr.Statement):\r\n def is_execution(calls):\r\n for c in calls:\r\n if isinstance(c, (unicode, str)):\r\n continue\r\n if c.isinstance(pr.Array):\r\n if is_execution(c):\r\n return True\r\n elif c.isinstance(pr.Call):\r\n # Compare start_pos, because names may be different\r\n # because of executions.\r\n if c.name.start_pos == name.start_pos \\\r\n and c.execution:\r\n return True\r\n return False\r\n\r\n is_exe = False\r\n for assignee, op in par.assignment_details:\r\n is_exe |= is_execution(assignee)\r\n\r\n if is_exe:\r\n # filter array[3] = ...\r\n # TODO check executions for dict contents\r\n is_array_assignment = True\r\n else:\r\n details = par.assignment_details\r\n if details and details[0][1] != '=':\r\n no_break_scope = True\r\n\r\n # TODO this makes self variables non-breakable. wanted?\r\n if isinstance(name, er.InstanceElement) \\\r\n and not name.is_class_var:\r\n no_break_scope = True\r\n\r\n result.append(par)\r\n else:\r\n # TODO multi-level import non-breakable\r\n if isinstance(par, pr.Import) and len(par.namespace) > 1:\r\n no_break_scope = True\r\n result.append(par)\r\n return result, no_break_scope, is_array_assignment\r\n\r\n flow_scope = scope\r\n result = []\r\n # compare func uses the tuple of line/indent = line/column\r\n comparison_func = lambda name: (name.start_pos)\r\n\r\n for nscope, name_list in scope_generator:\r\n break_scopes = []\r\n # here is the position stuff happening (sorting of variables)\r\n for name in sorted(name_list, key=comparison_func, reverse=True):\r\n p = name.parent.parent if name.parent else None\r\n if isinstance(p, er.InstanceElement) \\\r\n and isinstance(p.var, pr.Class):\r\n p = p.var\r\n if name_str == name.get_code() and p not in break_scopes:\r\n r, no_break_scope, is_array_assignment = process(name)\r\n if is_goto:\r\n if not is_array_assignment: # shouldn't goto arr[1] =\r\n result.append(name)\r\n else:\r\n result += r\r\n # for comparison we need the raw class\r\n s = nscope.base if isinstance(nscope, er.Class) else nscope\r\n # this means that a definition was found and is not e.g.\r\n # in if/else.\r\n if result and not no_break_scope:\r\n if not name.parent or p == s:\r\n break\r\n break_scopes.append(p)\r\n\r\n while flow_scope:\r\n # TODO check if result is in scope -> no evaluation necessary\r\n n = dynamic.check_flow_information(flow_scope, name_str,\r\n position)\r\n if n:\r\n result = n\r\n break\r\n\r\n if result:\r\n break\r\n if flow_scope == nscope:\r\n break\r\n flow_scope = flow_scope.parent\r\n flow_scope = nscope\r\n if result:\r\n break\r\n\r\n if not result and isinstance(nscope, er.Instance):\r\n # __getattr__ / __getattribute__\r\n result += check_getattr(nscope, name_str)\r\n debug.dbg('sfn filter \"%s\" in (%s-%s): %s@%s' % (name_str, scope,\r\n nscope, u(result), position))\r\n return result", "def clone(self, name: str = None) -> \"Network\":\n # pylint: disable=protected-access\n net = object.__new__(Network)\n net._init_fields()\n net.name = name if name is not None else self.name\n net.static_kwargs = util.EasyDict(self.static_kwargs)\n net._build_module_src = self._build_module_src\n net._build_func_name = self._build_func_name\n net._build_func = self._build_func\n net._init_graph()\n net.copy_vars_from(self)\n return net" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Device to use with the optimizer.
def optimizer_device(self): if self._num_ps_tasks > 0 or self._num_clones > 0: device = self._worker_device device += _get_device(self._clone_on_cpu).name return device else: return ''
[ "def device(use_gpu=False):\n if use_gpu:\n return ops.device(\"/gpu:0\")\n return ops.device(\"/cpu:0\")", "def get_device() -> torch.device:\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "def _init_device(self, cuda_device: torch.device = torch.device('cpu')):\n if self.config.cuda_enabled() and torch.cuda.is_available():\n return torch.device(cuda_device)\n else:\n # Force usage of CPU\n torch.cuda.is_available = lambda: False\n return torch.device(\"cpu\")", "def get_device():\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "def inputs_device(self):\n device = ''\n if self._num_ps_tasks > 0:\n device += self._worker_device\n device += '/device:CPU:0'\n return device", "def get_module_device(module: nn.Module) -> torch.device:\n return next(module.parameters()).device", "def set_device(self, device):\n self.device = device\n self.model = self.model.to(device)", "def device_op(self):\n return self.args[0].device_op", "def device(self):\n return str(self.dummy_param.device)", "def get_model_device(model):\n return model.parameters().__next__().device", "def InitDevice(self):\n # No need to init the device.\n pass", "def cuda(self: T, device: Optional[int] = None) -> T:\n return self.to(torch.device(f\"cuda:{device}\" if device is not None else \"cuda\"))", "def hook_device_target(self, x):\n self.device_target = x", "def get_device(gpus=None):\n if not gpus:\n parallel = False\n device = torch.device(\"cpu\")\n return parallel, device\n if len(gpus) > 1:\n parallel = True\n device = torch.device(\"cpu\")\n else:\n parallel = False\n device = torch.device(gpus[0])\n return parallel, device", "def test_device_method(self, mocker):\n with JacobianTape() as tape:\n qml.RX(0.543, wires=[0])\n qml.RY(-0.654, wires=[0])\n qml.expval(qml.PauliY(0))\n\n dev = qml.device(\"default.qubit\", wires=1)\n\n dev.jacobian = mocker.Mock()\n tape.device_pd(dev)\n dev.jacobian.assert_called_once()\n\n dev.jacobian = mocker.Mock()\n tape.jacobian(dev, method=\"device\")\n dev.jacobian.assert_called_once()", "def setup_device(\n model: nn.Module, target_devices: List[int]\n) -> Tuple[torch.device, List[int]]:\n available_devices = list(range(torch.cuda.device_count()))\n\n if not available_devices:\n log.warning(\n \"There's no GPU available on this machine. Training will be performed on CPU.\"\n )\n device = torch.device(\"cpu\")\n model = model.to(device)\n return model, device\n\n if not target_devices:\n log.info(\"No GPU selected. Training will be performed on CPU.\")\n device = torch.device(\"cpu\")\n model = model.to(device)\n return model, device\n\n max_target_gpu = max(target_devices)\n max_available_gpu = max(available_devices)\n\n if max_target_gpu > max_available_gpu:\n msg = (\n f\"Configuration requests GPU #{max_target_gpu} but only {max_available_gpu} \"\n \"available. Check the configuration and try again.\"\n )\n log.critical(msg)\n raise Exception(msg)\n\n log.info(f\"Using devices {target_devices} of available devices {available_devices}\")\n device = torch.device(f\"cuda:{target_devices[0]}\")\n if len(target_devices) > 1:\n model = nn.DataParallel(model, device_ids=target_devices)\n else:\n model = model.to(device)\n return model, device", "def default_device():\n global DEFAULT_DEVICE\n if not ivy.exists(DEFAULT_DEVICE):\n DEFAULT_DEVICE = 'gpu:0' if ivy.gpu_is_available() else 'cpu'\n return DEFAULT_DEVICE", "def to(self, device: str) -> None:\r\n \r\n if device == 'cpu':\r\n self.weights0 = cp.asnumpy(self.weights0)\r\n self.weights1 = cp.asnumpy(self.weights1)\r\n elif device == 'gpu':\r\n self.weights0 = cp.asarray(self.weights0)\r\n self.weights1 = cp.asarray(self.weights1)\r\n else:\r\n raise ValueError(\"`device` must be either 'cpu' or 'gpu'\")\r\n self.xp = cp.get_array_module(self.weights0)", "def resolve_device(device = None) -> torch.device:\n if device is None or device == 'gpu':\n device = 'cuda'\n if isinstance(device, str):\n device = torch.device(device)\n if not torch.cuda.is_available() and device.type == 'cuda':\n device = torch.device('cpu')\n print('No cuda devices were available. The model runs on CPU')\n return device" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Device to use to build the inputs.
def inputs_device(self): device = '' if self._num_ps_tasks > 0: device += self._worker_device device += '/device:CPU:0' return device
[ "def listInputDevices():\n pass", "def assignInputDevice(multiple=bool, clutch=\"string\", continuous=bool, immediate=bool, device=\"string\"):\n pass", "def InitDevice(self):\n # No need to init the device.\n pass", "def convert_device(self):\n\n from spira.yevon.geometry.ports import Port\n\n for n in self.g.nodes():\n convert = False\n \n P = self.g.node[n]['process_polygon']\n\n if P.layer.purpose.symbol == 'DEVICE_METAL':\n for i in self.g.neighbors(n):\n if 'device_reference' in self.g.node[i]:\n D = self.g.node[i]['device_reference']\n # print(D)\n if D.purpose.symbol == 'P':\n convert = True\n \n if convert is True:\n port = Port(\n name='Djj{}'.format(n),\n midpoint=P.center,\n process=P.layer.process,\n )\n self.g.node[n]['device_reference'] = port\n return self", "def __device(self, name_ids):\n if self.current_symbol.id == self.names.query('SWITCH'):\n self.current_symbol = self.scanner.get_symbol()\n if self.current_symbol.id == self.names.query('0'):\n switch_state = self.devices.LOW\n elif self.current_symbol.id == self.names.query('1'):\n switch_state = self.devices.HIGH\n else:\n self.scanner.display_error(\"Expected 0 or 1 for switch state\")\n return False\n self.current_symbol = self.scanner.get_symbol()\n if self.current_symbol.id != self.names.query('state'):\n self.scanner.display_error(\"Expected keyword 'state'\")\n return False\n for name_id in name_ids:\n self.devices.make_device(name_id, self.devices.SWITCH, switch_state)\n # errors all covered by syntax, no need to separately check semantics\n\n elif self.current_symbol.id in [self.names.query('NAND'), self.names.query('AND'),\n self.names.query('OR'), self.names.query('NOR')]:\n gate_id = self.current_symbol.id\n self.current_symbol = self.scanner.get_symbol()\n try:\n num_inputs = int(self.names.get_name_string(self.current_symbol.id))\n except Exception:\n self.scanner.display_error(\"Expected integer number of inputs.\")\n return False\n self.current_symbol = self.scanner.get_symbol()\n if self.current_symbol.id != self.names.query('inputs'):\n self.scanner.display_error(\"Expected keyword 'inputs'\")\n return False\n for name_id in name_ids:\n error_type = self.devices.make_device(name_id, gate_id, num_inputs)\n if error_type == self.devices.INVALID_QUALIFIER:\n self.scanner.display_error(\"Number of inputs must be integer in range(1, 17)\")\n return False\n\n elif self.current_symbol.id == self.names.query('CLOCK'):\n self.current_symbol = self.scanner.get_symbol()\n if self.current_symbol.id != self.names.query('period'):\n self.scanner.display_error(\"Expected keyword 'period'\")\n return False\n self.current_symbol = self.scanner.get_symbol()\n try:\n # zeros at start will be truncated\n clock_period = int(self.names.get_name_string(self.current_symbol.id))\n except ValueError:\n self.scanner.display_error(\"Expected integer period.\")\n return False\n for name_id in name_ids:\n error_type = self.devices.make_device(name_id, self.devices.CLOCK, clock_period//2)\n if error_type == self.devices.INVALID_QUALIFIER: # check semantic error\n self.scanner.display_error(\"Expected half period >= 1 simulation cycle\")\n return False\n\n elif self.current_symbol.id == self.names.query('DTYPE'):\n for name_id in name_ids:\n self.devices.make_device(name_id, self.devices.D_TYPE)\n\n elif self.current_symbol.id == self.names.query('XOR'):\n for name_id in name_ids:\n self.devices.make_device(name_id, self.devices.XOR)\n\n elif self.current_symbol.id == self.names.query('SIGGEN'):\n definition_complete = False\n waveform = []\n self.current_symbol = self.scanner.get_symbol()\n while self.current_symbol.id != self.names.query('waveform'):\n if (self.current_symbol.id != self.names.query('0')\n and self.current_symbol.id != self.names.query('1')):\n self.scanner.display_error(\"Expected 0 or 1 or keyword 'waveform'\")\n return False\n level = int(self.names.get_name_string(self.current_symbol.id))\n self.current_symbol = self.scanner.get_symbol()\n if self.current_symbol.id != self.names.query('for'):\n self.scanner.display_error(\"Expected keyword 'for'\")\n return False\n self.current_symbol = self.scanner.get_symbol()\n try: # check semantic error, not zero\n multiple = int(self.names.get_name_string(self.current_symbol.id))\n except Exception:\n self.scanner.display_error(\"Expected integer number of cycles\")\n return False\n if multiple <= 0:\n self.scanner.display_error(\"Number of cycles must be greater than 0\")\n return False\n self.current_symbol = self.scanner.get_symbol()\n if self.current_symbol.id != self.names.query('cycles'):\n self.scanner.display_error(\"Expected keyword 'cycles'\")\n return False\n waveform = waveform + [level]*multiple\n definition_complete = True # at least one iteration defined so can build waveform\n self.current_symbol = self.scanner.get_symbol()\n if not definition_complete: # 'waveform' appears prematurely\n self.scanner.display_error(\"Require waveform definition before keyword 'waveform'\")\n return False\n if waveform == []:\n self.scanner.display_error(\"Blank waveform received\")\n return False\n for name_id in name_ids:\n self.devices.make_device(name_id, self.devices.SIGGEN, waveform)\n\n else:\n self.scanner.display_error('Expected device type')\n return False\n\n return True", "def set_device(self, device):\n self.device = device\n self.model = self.model.to(device)", "def initiate_hardware():\n\tglobal devices # use global device list\n\tprint \"init input devices\"\n\tfor dev in devices[\"input\"]:\n\t\ttry:\n\t\t\tdev.initialize()\n\t\texcept:\n\t\t\tprint \"ERROR:\", dev.ID, \"is not responding! Rescan of hardware might solve the problem..\"\n\tprint \"init axis devices\"\n\tfor dev in devices[\"axis\"]:\n\t\ttry:\n\t\t\tdev.initialize()\n\t\texcept:\n\t\t\tprint \"ERROR:\", dev.ID, \"is not responding! Rescan of hardware might solve the problem..\"", "def _open_device(self):\n pass", "def device(use_gpu=False):\n if use_gpu:\n return ops.device(\"/gpu:0\")\n return ops.device(\"/cpu:0\")", "def setup_device(\n model: nn.Module, target_devices: List[int]\n) -> Tuple[torch.device, List[int]]:\n available_devices = list(range(torch.cuda.device_count()))\n\n if not available_devices:\n log.warning(\n \"There's no GPU available on this machine. Training will be performed on CPU.\"\n )\n device = torch.device(\"cpu\")\n model = model.to(device)\n return model, device\n\n if not target_devices:\n log.info(\"No GPU selected. Training will be performed on CPU.\")\n device = torch.device(\"cpu\")\n model = model.to(device)\n return model, device\n\n max_target_gpu = max(target_devices)\n max_available_gpu = max(available_devices)\n\n if max_target_gpu > max_available_gpu:\n msg = (\n f\"Configuration requests GPU #{max_target_gpu} but only {max_available_gpu} \"\n \"available. Check the configuration and try again.\"\n )\n log.critical(msg)\n raise Exception(msg)\n\n log.info(f\"Using devices {target_devices} of available devices {available_devices}\")\n device = torch.device(f\"cuda:{target_devices[0]}\")\n if len(target_devices) > 1:\n model = nn.DataParallel(model, device_ids=target_devices)\n else:\n model = model.to(device)\n return model, device", "def to(self, device: str) -> None:\r\n \r\n if device == 'cpu':\r\n self.weights0 = cp.asnumpy(self.weights0)\r\n self.weights1 = cp.asnumpy(self.weights1)\r\n elif device == 'gpu':\r\n self.weights0 = cp.asarray(self.weights0)\r\n self.weights1 = cp.asarray(self.weights1)\r\n else:\r\n raise ValueError(\"`device` must be either 'cpu' or 'gpu'\")\r\n self.xp = cp.get_array_module(self.weights0)", "def deviceManager(axisName=bool, numDevices=bool, axisIndex=int, deviceNameFromIndex=int, axisOffset=bool, deviceIndex=int, axisCoordChanges=bool, axisScale=bool, numAxis=bool, attachment=bool):\n pass", "def build_kernel(self):\n ...", "def init(driver_configs, context_manager):\n drivers = []\n for driver_config in driver_configs:\n driver_name = driver_config[\"driver\"]\n driver_module = importlib.import_module(\"input.drivers.\"+driver_name)\n args = driver_config.get(\"args\", [])\n kwargs = driver_config.get(\"kwargs\", {})\n driver = driver_module.InputDevice(*args, **kwargs)\n drivers.append(driver)\n i = InputProcessor(drivers, context_manager)\n dm = DeviceManager(i)\n return i, dm", "def device(self):\n return str(self.dummy_param.device)", "def devicePanel():\n pass", "def _generate_device_configuration(self):\n cfg = gplay.DeviceConfigurationProto()\n cfg.touchScreen = 3\n cfg.keyboard = 1\n cfg.navigation = 1\n cfg.screenLayout = 2\n cfg.hasHardKeyboard = False\n cfg.screenDensity = 400\n cfg.hasFiveWayNavigation = False\n cfg.glEsVersion = int(\n format(self.higherOpenGLVersion, \"04x\")\n + format(self.lowerOpenGLVersion,\"04x\"), 16)\n cfg.nativePlatform.extend([\"armeabi-v7l\", \"armeabi\"])\n cfg.systemSharedLibrary.extend([\n \"android.test.runner\", \"com.android.future.usb.accessory\",\n \"com.android.location.provider\", \"com.android.nfc_extras\",\n \"com.google.android.maps\", \"com.google.android.media.effects\",\n \"com.google.widevine.software.drm\", \"javax.obex\"])\n cfg.systemAvailableFeature.extend([\n \"android.hardware.bluetooth\", \"android.hardware.camera\",\n \"android.hardware.camera.autofocus\",\n \"android.hardware.camera.flash\", \"android.hardware.camera.front\",\n \"android.hardware.faketouch\", \"android.hardware.location\",\n \"android.hardware.location.gps\",\n \"android.hardware.location.network\", \"android.hardware.microphone\",\n \"android.hardware.nfc\", \"android.hardware.screen.landscape\",\n \"android.hardware.screen.portrait\",\n \"android.hardware.sensor.accelerometer\",\n \"android.hardware.sensor.barometer\",\n \"android.hardware.sensor.compass\",\n \"android.hardware.sensor.gyroscope\",\n \"android.hardware.sensor.light\",\n \"android.hardware.sensor.proximity\", \"android.hardware.telephony\",\n \"android.hardware.telephony.gsm\", \"android.hardware.touchscreen\",\n \"android.hardware.touchscreen.multitouch\",\n \"android.hardware.touchscreen.multitouch.distinct\",\n \"android.hardware.touchscreen.multitouch.jazzhand\",\n \"android.hardware.usb.accessory\", \"android.hardware.usb.host\",\n \"android.hardware.wifi\", \"android.hardware.wifi.direct\",\n \"android.software.live_wallpaper\", \"android.software.sip\",\n \"android.software.sip.voip\", \"com.cyanogenmod.android\",\n \"com.cyanogenmod.nfc.enhanced\",\n \"com.google.android.feature.GOOGLE_BUILD\", \"com.nxp.mifare\",\n \"com.tmobile.software.themes\"])\n #cfg.screenWidth = 1080\n #cfg.screenHeight = 1920\n return cfg", "def example_to_device(self, example, device=None, memo=None):\n return example_to_device(example, device, memo)", "def start_device(self) -> None:\n if self.connection is None:\n self.connection = self.node.build_ssh_client()\n\n self.startup_tasks()\n self.generate_id()\n\n self.connection.exec_command(\"{} -c {}\".format(\n path.join(self.node.build_path, self.device_type), \n self.config_path), get_pty=True)[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the device to use for variables created inside the clone.
def variables_device(self): device = '' if self._num_ps_tasks > 0: device += self._ps_device device += _get_device(self._clone_on_cpu).name class _PSDeviceChooser(object): """Slim device chooser for variables when using PS.""" def __init__(self, device, tasks): self._device = device self._tasks = tasks self._task = 0 def choose(self, op): if op.device: return op.device node_def = op if isinstance(op, tf.NodeDef) else op.node_def if node_def.op.startswith('Variable'): t = self._task self._task = (self._task + 1) % self._tasks d = '%s/task:%d' % (self._device, t) return d else: return op.device if not self._num_ps_tasks: return device else: chooser = _PSDeviceChooser(device, self._num_ps_tasks) return chooser.choose
[ "def device(self):\n return str(self.dummy_param.device)", "def get_device():\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "def get_device() -> torch.device:\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "def device(use_gpu=False):\n if use_gpu:\n return ops.device(\"/gpu:0\")\n return ops.device(\"/cpu:0\")", "def get_model_device(model):\n return model.parameters().__next__().device", "def device(self) -> JvcProjector:\n return self.coordinator.device", "def get_module_device(module: nn.Module) -> torch.device:\n return next(module.parameters()).device", "def device(self):\n return self.broker.device(**{\"IfAddrID\": self.IfAddrID})", "def read_kit_device(self):\n self._is_hid_tool_not_connected_raise()\n\n dap_info = read_tool_info(self.housekeeper)\n\n device_name = dap_info['device_name'].lower()\n\n if device_name == '':\n device_name = None\n\n return device_name", "def default_device():\n global DEFAULT_DEVICE\n if not ivy.exists(DEFAULT_DEVICE):\n DEFAULT_DEVICE = 'gpu:0' if ivy.gpu_is_available() else 'cpu'\n return DEFAULT_DEVICE", "def optimizer_device(self):\n if self._num_ps_tasks > 0 or self._num_clones > 0:\n device = self._worker_device\n device += _get_device(self._clone_on_cpu).name\n return device\n else:\n return ''", "def inputs_device(self):\n device = ''\n if self._num_ps_tasks > 0:\n device += self._worker_device\n device += '/device:CPU:0'\n return device", "def get_device(tensor_or_module, default=None):\n if hasattr(tensor_or_module, \"device\"):\n return tensor_or_module.device\n elif hasattr(tensor_or_module, \"parameters\"):\n return next(tensor_or_module.parameters()).device\n elif default is None:\n raise TypeError(f\"Don't know how to get device of {type(tensor_or_module)} object\")\n else:\n return torch.device(default)", "def _get_device_diffuse():\n device = Mock(spec=DysonPureHotCoolLink)\n load_mock_device(device)\n device.state.focus_mode = FocusMode.FOCUS_OFF.value\n return device", "def device(self):\n ret = self._get_attr(\"device\")\n return IUSBDevice(ret)", "def _next_device(self):\n if self.model_config.num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self.model_config.num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self.model_config.num_gpus - 1)\n return dev", "def device_number(self):\n ret = self._get_attr(\"deviceNumber\")\n return ret", "def get_device(self, device_name: NodeName) -> Device:\n try:\n dev_data = self.graph.nodes[device_name]['md']\n except KeyError:\n logger.error(f'requested device ({device_name}) not in facility')\n return\n\n if dev_data.dev is not None:\n return dev_data.dev\n elif dev_data.res is not None:\n # not instantiated yet, create and fill\n try:\n dev = dev_data.res.get()\n self.graph.nodes[device_name]['md'].dev = dev\n return dev\n except Exception:\n logger.error(f'Device {device_name} failed to load, '\n 'attempting to make a mock device')\n dev = make_mock_device(dev_data.res)\n self.graph.nodes[device_name]['md'].dev = dev\n return dev", "def clone_device(self, clone_index):\n if clone_index >= self._num_clones:\n raise ValueError('clone_index must be less than num_clones')\n device = ''\n if self._num_ps_tasks > 0:\n device += self._worker_device\n\n device_type = _get_device(self._clone_on_cpu).device_type\n device += '/device:%s:%d' % (device_type, clone_index)\n\n return device" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collects and stores any xBlock and modulestore data into the block_structure that's necessary for later execution of the transformer's transform method. Transformers should store such
def collect(cls, block_structure): pass # lint-amnesty, pylint: disable=unnecessary-pass
[ "def transform(self, usage_info, block_structure):\n raise NotImplementedError", "def save_to_block(self, data):\n self.refresh()\n\n if self.frame_start_time is None:\n # Get min frame time out of all blocks\n self.frame_start_time = time.time()\n for _, b in data.items():\n if b['timestamps']:\n self.frame_start_time = min(self.frame_start_time, b['timestamps'][0])\n\n self.log.debug('data passed to block: {d}', d=data)\n verified = self._verify_provider_data(data)\n\n if not verified:\n self.log.info('rebuilding data containing invalid field name')\n data = self._rebuild_invalid_data(data)\n self.log.debug('data after rebuild: {d}', d=data)\n\n for key, block in data.items():\n try:\n b = self.blocks[key]\n except KeyError:\n self.blocks[key] = Block(\n key, block['data'].keys(),\n )\n b = self.blocks[key]\n\n b.extend(block)\n self.last_block_received = key", "def __init__(self, obj):\n\t\tif not obj.type == 'block':\n\t\t\traise TypeError, \"Wrong type %s for block object!\" %obj.type\n\n\t\tself.type = obj.type\n\t\tself.name = obj.name\n\t\tself.data = obj.data[:]\n\n\t\t# required data\n\t\tself.flags = getit(obj, 70, 0)\n\t\tself.anonim = self.flags & 1 #anonymous block generated by hatching, associative dimensioning, other\n\t\tself.atrib = self.flags & 2 # has attribute definitions\n\t\tself.xref = self.flags & 4 # is an external reference (xref)\n\t\tself.xref_lay = self.flags & 8 # is an xref overlay \n\t\tself.dep_ext = self.flags & 16 # is externally dependent\n\t\tself.dep_res = self.flags & 32 # resolved external reference\n\t\tself.xref_ext = self.flags & 64 # is a referenced external reference xref\n\t\t#--todo--- if self.flag > 4: self.xref = True\n\n\t\t# optional data (with defaults)\n\t\tself.path = getit(obj, 1, '') # Xref path name\n\t\tself.discription = getit(obj, 4, '')\n\n\t\tself.entities = dxfObject('block_contents') #creates empty entities_container for this block\n\t\tself.entities.data = objectify([ent for ent in obj.data if type(ent) != list])\n\n\t\tself.layer = getit(obj, 8, None)\n\t\tself.loc = self.get_loc(obj)\n\n\t\t#print 'deb:Block %s data:\\n%s' %(self.name, self.data) #------------\n\t\t#print 'deb:Block %s self.entities.data:\\n%s' %(self.name, self.entities.data) #------------", "def transform(self, usage_info, block_structure):\n for block_key in block_structure.topological_traversal():\n parents = block_structure.get_parents(block_key)\n if parents:\n block_depth = min(\n self.get_block_depth(block_structure, parent_key)\n for parent_key in parents\n ) + 1\n else:\n block_depth = 0\n block_structure.set_transformer_block_field(\n block_key,\n self,\n self.BLOCK_DEPTH,\n block_depth\n )\n\n if self.requested_depth is not None:\n block_structure.remove_block_traversal(\n lambda block_key: self.get_block_depth(block_structure, block_key) > self.requested_depth\n )", "def _load_blocks(self, filename):\n contents = unpack(filename)\n blocks = contents['BlockTypesData']\n for block in blocks:\n if block is None:\n continue\n block_id = block['id']\n items = self._session.query(Item).filter_by(id=block_id).all()\n if len(items) == 0:\n print('no item matches block id \"{}\"'.format(block_id))\n try:\n item = items[0]\n if self._args.verbose:\n print('processing block \"{}\"'.format(item.name()))\n item.prestige = block['prestige']\n item.build_xp = block['buildXP']\n item.mine_xp = block['mineXP']\n except IndexError:\n if self._args.verbose:\n print('no item for block id={}'.format(block_id))\n\n self._session.commit()", "def transform_block_filters(self, usage_info, block_structure):\n raise NotImplementedError", "def _traverse_blocks_in_course(self, course, access_all_block_fields):\n all_blocks = []\n stack = [course]\n while stack:\n curr_block = stack.pop()\n all_blocks.append(curr_block)\n if curr_block.has_children:\n for block in reversed(curr_block.get_children()):\n stack.append(block)\n\n if access_all_block_fields:\n # Read the fields on each block in order to ensure each block and its definition is loaded.\n for xblock in all_blocks:\n for __, field in xblock.fields.items():\n if field.is_set_on(xblock):\n __ = field.read_from(xblock)", "def add_data(self, Blocks) :\n\n if not hasattr(Blocks, '__iter__') :\n self._add_single_block(Blocks)\n else :\n for Block in Blocks :\n self._add_single_block(Block)", "def __place(self, coords, block):\r\n \r\n self.__local_ids[coords], self.__local_data[coords] = self.__block2pair(block)", "def extend(self, block):\n if block['data'].keys() != self.data.keys():\n raise Exception(\"Block structure does not match: {}\".format(self.name))\n\n self.timestamps.extend(block['timestamps'])\n for k in self.data:\n self.data[k].extend(block['data'][k])", "def finalize_block(self, block_header):\n summary = block_header.hex()\n LOGGER.debug('FINALIZE BLOCK CANDIDATE: block_id=%s summary=%s',_short_id(self._block_id),_short_id(summary))\n if isinstance(block_header, bytes):\n \"\"\"\n At this point _block_id is previous and summary for current block\n save state with block_header key\n \"\"\"\n \"\"\"\n state = ConsensusState.consensus_state_for_block_id(\n block_id=summary,\n block_cache=self._block_cache,\n state_view_factory=self._state_view_factory,\n consensus_state_store=self._consensus_state_store,\n force=True\n )\n\n\n LOGGER.debug('FINALIZE BLOCK CANDIDATE: state=%s',state)\n # save in store\n state.set_consensus_state_for_block_id(summary,self._consensus_state_store)\n \"\"\"\n \"\"\"\n state_view = BlockWrapper.state_view_for_block(\n block_wrapper=self._block_cache.block_store.chain_head,\n state_view_factory=self._state_view_factory)\n \"\"\"\n # We need to create a wait certificate for the block and\n # then serialize that into the block header consensus field.\n if _VREG_:\n active_key = self._pbft_key_state_store.active_key\n pbft_key_state = self._pbft_key_state_store[active_key]\n sealed_signup_data = pbft_key_state.sealed_signup_data\n consensus = b'pbft' \n LOGGER.debug('PbftBlockPublisher::finalize_block isinstance DONE')\n return consensus\n\n # To compute the block hash, we are going to perform a hash using the\n # previous block ID and the batch IDs contained in the block\n hasher = hashlib.sha256(block_header.previous_block_id.encode())\n for batch_id in block_header.batch_ids:\n hasher.update(batch_id.encode())\n block_hash = hasher.hexdigest()\n\n # Using the current chain head, we need to create a state view so we\n # can create a PBFT enclave.\n state_view = BlockWrapper.state_view_for_block(\n block_wrapper=self._block_cache.block_store.chain_head,\n state_view_factory=self._state_view_factory)\n if _VREG_:\n # We need to create a wait certificate for the block and then serialize\n # that into the block header consensus field.\n active_key = self._pbft_key_state_store.active_key\n pbft_key_state = self._pbft_key_state_store[active_key]\n sealed_signup_data = pbft_key_state.sealed_signup_data\n\n #block_header.consensus = b'pbft' \n LOGGER.debug('PbftBlockPublisher::finalize_block: DONE')\n\n return True", "def transform(self):\n self._prepare()\n self._cluster()\n self._store()\n self.logger.debug('Transformation is done...')", "def initialize_block(self, block_header):\n LOGGER.debug('PbftBlockPublisher::initialize_block previous_block_id=%s (%s)',_short_id(block_header.previous_block_id),block_header)\n # If the previous block ID matches our cached one, that means that we\n # have already determined that even if we initialize the requested\n # block we would not be able to claim it. So, instead of wasting time\n # doing all of the checking again, simply short-circuit the failure so\n # that the validator can go do something more useful.\n if block_header.previous_block_id == PbftBlockPublisher._previous_block_id:\n LOGGER.debug(\"PbftBlockPublisher::initialize_block block_header.previous_block_id == PbftBlockPublisher._previous_block_id TRUE\")\n return False\n PbftBlockPublisher._previous_block_id = block_header.previous_block_id\n # Using the current chain head, we need to create a state view so we\n # can create a PBFT enclave.\n if False:\n state_view = BlockWrapper.state_view_for_block(\n block_wrapper=self._block_cache.block_store.chain_head,\n state_view_factory=self._state_view_factory)\n\n pbft_settings_view = PbftSettingsView(state_view)\n LOGGER.debug(\"PbftBlockPublisher::pbft_settings_view node=%s\",pbft_settings_view.pbft_node)\n #self._node = pbft_settings_view.pbft_node\n \n consensus_state = ConsensusState.consensus_state_for_block_id(\n block_id=block_header.previous_block_id,\n block_cache=self._block_cache,\n state_view_factory=self._state_view_factory,\n consensus_state_store=self._consensus_state_store,\n node=self._node\n )\n # shift into PrePrepare state\n consensus_state.next_step()\n #consensus_state.mark_as_own()\n consensus_state.set_consensus_state_for_block_id(block_header.previous_block_id,self._consensus_state_store)\n self._block_id = block_header.previous_block_id\n #consensus_state.set_node(self._node)\n LOGGER.debug(\"PbftBlockPublisher::initialize_block GET CONSENSUS_STATE=%s for block_id=%s \",consensus_state,_short_id(block_header.previous_block_id))\n # start \n # Get our validator registry entry to see what PBFT public key\n # other validators think we are using.\n\n if _VREG_:\n validator_registry_view = ValidatorRegistryView(state_view)\n validator_info = None\n\n try:\n validator_id = block_header.signer_public_key\n validator_info = validator_registry_view.get_validator_info(validator_id=validator_id)\n except KeyError:\n pass\n\n # If we don't have a validator registry entry, then check the active\n # key. If we don't have one, then we need to sign up. If we do have\n # one, then our validator registry entry has not percolated through the\n # system, so nothing to to but wait.\n active_pbft_public_key = self._pbft_key_state_store.active_key\n if validator_info is None:\n if active_pbft_public_key is None:\n LOGGER.debug('PbftBlockPublisher::initialize_block No public key found, so going to register new signup information')\n self._register_signup_information(block_header=block_header)\n\n else: # Check if we need to give up on this registration attempt\n try:\n nonce = self._pbft_key_state_store[active_pbft_public_key].signup_nonce\n except (ValueError, AttributeError):\n self._pbft_key_state_store.active_key = None\n LOGGER.warning('PbftBlockPublisher::initialize_block Pbft Key State Store had inaccessible or '\n 'corrupt active key [%s] clearing '\n 'key.', active_pbft_public_key)\n return False\n LOGGER.debug('PbftBlockPublisher::initialize_block Check if we need to give up on this registration attempt')\n self._handle_registration_timeout(\n block_header=block_header,\n pbft_enclave_module=None,#pbft_enclave_module,\n state_view=state_view,\n signup_nonce=nonce,\n pbft_public_key=active_pbft_public_key\n )\n LOGGER.debug(\"PbftBlockPublisher::initialize_block validator_info NONE\")\n return True #False\n\n # Retrieve the key state corresponding to the PBFT public key in our\n # validator registry entry.\n pbft_key_state = None\n try:\n pbft_key_state = self._pbft_key_state_store[validator_info.signup_info.pbft_public_key]\n except (ValueError, KeyError):\n pass\n\n # If there is no key state associated with the PBFT public key that\n # other validators think we should be using, then we need to create\n # new signup information as we have no way whatsoever to publish\n # blocks that other validators will accept.\n LOGGER.debug(\"PbftBlockPublisher::check pbft_key_state=%s\",pbft_key_state)\n if pbft_key_state is None:\n LOGGER.debug('PbftBlockPublisher::initialize_block PBFT public key %s...%s in validator registry not found in key state store. Sign up again',\n validator_info.signup_info.pbft_public_key[:8],\n validator_info.signup_info.pbft_public_key[-8:])\n self._register_signup_information(block_header=block_header)\n\n # We need to put fake information in the key state store for the\n # PBFT public key the other validators think we are using so that\n # we don't try to keep signing up. However, we are going to mark\n # that key state store entry as being refreshed so that we will\n # never actually try to use it.\n dummy_data = b64encode(b'No sealed signup data').decode('utf-8')\n self._pbft_key_state_store[validator_info.signup_info.pbft_public_key] = PbftKeyState(\n sealed_signup_data=dummy_data,\n has_been_refreshed=True,\n signup_nonce='unknown')\n\n return False\n\n # Check the key state. If it is marked as being refreshed, then we are\n # waiting until our PBFT public key is updated in the validator\n # registry and therefore we cannot publish any blocks.\n if _VREG_ and pbft_key_state.has_been_refreshed:\n LOGGER.debug(\n 'PBFT public key %s...%s has been refreshed. Wait for new '\n 'key to show up in validator registry.',\n validator_info.signup_info.pbft_public_key[:8],\n validator_info.signup_info.pbft_public_key[-8:])\n\n # Check if we need to give up on this registration attempt\n self._handle_registration_timeout(\n block_header=block_header,\n pbft_enclave_module=pbft_enclave_module,\n state_view=state_view,\n signup_nonce=pbft_key_state.signup_nonce,\n pbft_public_key=active_pbft_public_key\n )\n return False\n\n # If the PBFT public key in the validator registry is not the active\n # one, then we need to switch the active key in the key state store.\n if _VREG_:\n if validator_info.signup_info.pbft_public_key != active_pbft_public_key:\n active_pbft_public_key = validator_info.signup_info.pbft_public_key\n self._pbft_key_state_store.active_key = active_pbft_public_key\n\n # Ensure that the enclave is using the appropriate keys\n try:\n signup_data = json2dict(base64.b64decode(pbft_key_state.sealed_signup_data.encode()).decode())\n unsealed_pbft_public_key = signup_data.get('pbft_public_key')\n except SystemError:\n # Signup data is unuseable\n LOGGER.error(\n 'Could not unseal signup data associated with PPK: %s..%s',\n active_pbft_public_key[:8],\n active_pbft_public_key[-8:])\n self._pbft_key_state_store.active_key = None\n return False\n LOGGER.debug(\"PbftBlockPublisher::unsealed_pbft_public_key=%s ~ %s signup_data=%s\",unsealed_pbft_public_key,active_pbft_public_key,signup_data)\n assert active_pbft_public_key == unsealed_pbft_public_key\n\n LOGGER.debug('Using PBFT public key: %s...%s',active_pbft_public_key[:8],active_pbft_public_key[-8:])\n LOGGER.debug('Unseal signup data: %s...%s',pbft_key_state.sealed_signup_data[:8],pbft_key_state.sealed_signup_data[-8:])\n \"\"\"\n LOGGER.debug(\"PbftBlockPublisher::initialize_block ADD CONSENSUS_STATE for block_id=%s\",block_header.previous_block_id)\n consensus_state = ConsensusState.consensus_state_for_block_id(\n block_id=block_header.previous_block_id,\n block_cache=self._block_cache,\n state_view_factory=self._state_view_factory,\n consensus_state_store=self._consensus_state_store,\n pbft_enclave_module=None,\n )\n \"\"\"\n #pbft_settings_view = PbftSettingsView(state_view)\n #LOGGER.debug(\"PbftBlockPublisher::pbft_settings_view node=%s\",pbft_settings_view.pbft_node)\n\n # If our signup information does not pass the freshness test, then we\n # know that other validators will reject any blocks we try to claim so\n # we need to try to sign up again.\n\n # Using the consensus state for the block upon which we want to\n # build, check to see how many blocks we have claimed on this chain\n # with this PBFT key. If we have hit the key block claim limit, then\n # we need to check if the key has been refreshed.\n # We need to create a wait timer for the block...this is what we\n # will check when we are asked if it is time to publish the block\n pbft_key_state = self._pbft_key_state_store[active_pbft_public_key]\n sealed_signup_data = pbft_key_state.sealed_signup_data\n\n # At this point, we know that if we are able to claim the block we are\n # initializing, we will not be prevented from doing so because of PBFT\n # policies.\n\n self._wait_timer = 20\n self._wait_timer = 20\n PbftBlockPublisher._previous_block_id = None\n block_header.consensus = b\"pbft\"\n LOGGER.debug('PbftBlockPublisher::initialize_block DONE _wait_timer=%s',self._wait_timer)\n self._block_header = block_header\n return True", "def __init__(self, root_block):\n self.root_block = root_block\n self.blocks = {'@': root_block}\n self.block_names = {\"default\":[]}\n #registering blocks by id\n self.register_blocks(root_block.ch_blocks)\n self.register_block_names()", "def _save_block_data(self, block, path='/data/data'):\n # Serialize the block\n data = Block.serialize(block)\n\n # Track the data file to be written\n self._index = self._count // self._threshold\n\n # Create a new file for incoming data if we have too many block in this data file\n if self._count != 0 and self._count % self._threshold == 0:\n self._data_file.close()\n self._data_file = open(f'{self.base_dir}/data-{self.index}', 'w+')\n\n self._count += 1\n self._data_file.write(data + '\\n')\n self._save_metadata()", "def _initialize(self) -> None:\n # use random indices for blocks\n indices = self.random_state.choice(\n self._dimension, self.block_dimension * self.num_blocks, replace=False # type: ignore\n ).tolist()\n indices.sort() # keep the indices sorted sorted so that blocks do not overlap\n # Caution this is also important for split, so that splitted arrays end un in the same block\n for transform_inds in tools.grouper(indices, n=self.block_dimension):\n self._transforms.append(\n utils.Transform(\n transform_inds,\n translation_factor=self.translation_factor,\n rotation=self.rotation,\n random_state=self.random_state,\n expo=self.expo,\n )\n )", "def _process_block(block, visited, inferred_types, backend):\n for op in block.instructions:\n log_debug('handle op {}'.format(op))\n if isinstance(op, instructions.PrimOp):\n if not all(_is_determined(inferred_types[var]) for var in op.vars_in):\n continue\n types_in = [inferred_types[var] for var in op.vars_in]\n # Offer type hints for cases where we need to type non-Tensor literals.\n preferred_types_out = instructions.pattern_map(\n lambda var: inferred_types[var], op.vars_out)\n with _type_inferring():\n objs_out = backend.run_on_dummies(\n op.function, _add_incompatible_batch_dim(types_in))\n types_out = _strip_batch_dim(instructions.pattern_map2(\n lambda tp, val: type_of_pattern(val, backend, preferred_type=tp),\n preferred_types_out, objs_out, leaf_type=instructions.Type))\n _merge_vars(op.vars_out, types_out, inferred_types, backend,\n log_message='update PrimOp vars_out')\n elif isinstance(op, instructions.FunctionCallOp):\n if not all(_is_determined(inferred_types[var]) for var in op.vars_in):\n continue\n # First, bind op.vars_in to op.function.vars_in.\n types_in = [inferred_types[var].tensors for var in op.vars_in]\n _merge_vars(op.function.vars_in, types_in, inferred_types, backend,\n log_message='init function vars_in')\n # Execute type inference.\n types_out = op.function.type_inference(types_in)\n for leaf in instructions.pattern_traverse(\n types_out, leaf_type=instructions.TensorType):\n if not isinstance(leaf, instructions.TensorType):\n msg = ('Expected function output type to be '\n 'a nested list or tuple of TensorType, found {}.').format(leaf)\n raise TypeError(msg)\n # To help with typing recursive base-case return literals, we seed\n # return_vars types before stepping into the function.\n _merge_vars(op.function.vars_out, types_out, inferred_types, backend,\n log_message='update function vars_out')\n # Finally, update op.vars_out with the results of type inference.\n _merge_vars(op.vars_out, types_out, inferred_types, backend,\n log_message='update FunctionCall vars_out')\n # Step into function. Note: it will only be visited once, if recursive.\n _process_graph(op.function.graph, visited, inferred_types, backend)\n # No need to process block.terminator, because all the information\n # that carries about types is already carried by the variable names\n # being the same across blocks", "def genBlocksList(self, blocksJson):\n # script is an object containing objects indexed by a unique identifier for \n # each block, and each block object contains links to parent (previous) and next\n # identifier. E.g.:\n # \n # {\n # \"h2blUU?#$l!dd*n}-Q1Y\": {\n # \"opcode\": \"event_whenflagclicked\",\n # \"next\": \"%?R0lmqrvySH00}u~j,l\",\n # \"parent\": null,\n # \"inputs\": {},\n # \"fields\": {},\n # \"topLevel\": true,\n # \"shadow\": false,\n # \"x\": 53,\n # \"y\": 56\n # },\n # \"%?R0lmqrvySH00}u~j,l\": {\n # \"opcode\": \"motion_movesteps\",\n # \"next\": \"T:Al*H@POT=8dOCzpm0(\",\n # \"parent\": \"h2blUU?#$l!dd*n}-Q1Y\",\n # \"inputs\": {\n # \"STEPS\": [\n # 1,\n # [\n # 4,\n # \"10\"\n # ]\n # ]\n # },\n # ... etc ...\n\n allBlocks = {} # Map of blockId to Block object.\n\n # Create all the block objects first\n for blockId in blocksJson:\n vals = blocksJson[blockId]\n block = Block(blockId, vals['opcode'])\n allBlocks[blockId] = block\n # print('adding block with id to collection', blockId, vals['opcode'])\n if vals['inputs']:\n block.setInputs(vals['inputs'])\n if vals['fields']:\n block.setFields(vals['fields'])\n if vals['topLevel']:\n block.setTopLevel(vals['topLevel'])\n if 'mutation' in vals:\n if 'proccode' in vals['mutation']:\n block.setProcCode(vals['mutation']['proccode'])\n if 'argumentids' in vals['mutation']:\n block.setProcCallArgIds(vals['mutation']['argumentids'])\n if 'argumentnames' in vals['mutation']:\n block.setProcDefnParamNames(vals['mutation']['argumentnames'])\n\n # Link the blocks together.\n for blockId in blocksJson:\n blockJson = blocksJson[blockId]\n block = allBlocks[blockId]\n if blockJson['next'] != None:\n nextBlock = allBlocks[blockJson['next']]\n print('setting next block of %s to be %s' % (str(block), str(nextBlock)))\n block.setNext(nextBlock)\n inputs = blockJson['inputs']\n for inputKey in inputs:\n # inputs is like this:\n # \"OPERAND1\": [\n # 3, \n # \"#70%(-M,b|(xTdgz(p@p\", <-- here is the child at index 1\n # [\n # 10,\n # \"\"\n # ]\n # ],\n # \"OPERAND2\": [\n # 1,\n # [\n # 10,\n # \"50\"\n # ]\n # ]\n if isinstance(inputs[inputKey][1], str) and inputs[inputKey][1] in allBlocks:\n block.setChild(inputKey, allBlocks[inputs[inputKey][1]])\n print('setting child block of %s with key %s to %s' %\n (str(block), inputKey, str(allBlocks[inputs[inputKey][1]])))\n\n listOfTopLevelBlocks = [block for block in allBlocks.values() if block.isTopLevel()]\n return listOfTopLevelBlocks", "def _build_formulation(self):\n add_formulation_to_block(\n block=self.block,\n model_definition=self.network_definition,\n input_vars=self.block.inputs_list,\n output_vars=self.block.outputs_list,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transforms the given block_structure for the given usage_info, assuming the block_structure contains cached data from a prior call to the collect method of the latest version of the Transformer. No access to the modulestore nor instantiation of xBlocks should be performed during the execution of this method. However, accesses to userspecific data (outside of the modulestore and not via xBlocks) is permitted in order to apply the transform for the given usage_info.
def transform(self, usage_info, block_structure): raise NotImplementedError
[ "def transform(self, usage_info, block_structure):\n for block_key in block_structure.topological_traversal():\n parents = block_structure.get_parents(block_key)\n if parents:\n block_depth = min(\n self.get_block_depth(block_structure, parent_key)\n for parent_key in parents\n ) + 1\n else:\n block_depth = 0\n block_structure.set_transformer_block_field(\n block_key,\n self,\n self.BLOCK_DEPTH,\n block_depth\n )\n\n if self.requested_depth is not None:\n block_structure.remove_block_traversal(\n lambda block_key: self.get_block_depth(block_structure, block_key) > self.requested_depth\n )", "def transform_block_filters(self, usage_info, block_structure):\n raise NotImplementedError", "def _transform_with_filters(self, block_structure):\n if not self._transformers['supports_filter']:\n return\n\n filters = []\n for transformer in self._transformers['supports_filter']:\n filters.extend(transformer.transform_block_filters(self.usage_info, block_structure))\n\n combined_filters = combine_filters(block_structure, filters)\n block_structure.filter_topological_traversal(combined_filters)", "def fit_from_transformed(self, transformed_blocks: Blocks):\n blocks = self.combine.transform(transformed_blocks)\n blocks = self.adapter.transform(blocks)\n return self.model.fit(**blocks)", "def replace_transformer_layers(model,\n NewLayer,\n blocks=range(12),\n block_parts=['attention.self.query',\n 'attention.self.key',\n 'attention.self.value',\n 'attention.output.dense',\n 'intermediate.dense',\n 'output.dense'],\n *args, **kwargs\n *args, **kwargs):\n\n for transformer_layer_ind in tqdm(blocks):\n block = model.bert.encoder.layer[transformer_layer_ind]\n for layer in block_parts:\n recursive_setattr(block,\n layer,\n NewLayer(recursive_getattr(block, layer),\n *args, **kwargs))", "def collect(cls, block_structure):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def _apply_transformation_on_transformed_link(self, item, transformation):\n if getattr(item, \"current_transformation\"):\n relative_transformation = transformation * item.current_transformation.inverse()\n else:\n relative_transformation = transformation\n for native_geometry in item.native_geometry or []:\n self.transform(native_geometry, relative_transformation)\n item.current_transformation = transformation", "def updateTransforms(self, node):\n transforms = []\n for _, transform in self.instrument.positioning_stack.model():\n transforms.append(transform)\n\n for detector in self.instrument.detectors.values():\n for _, transform in detector.model():\n transforms.append(transform)\n\n for _, transform in self.instrument.jaws.model():\n transforms.append(transform)\n\n node.per_object_transform[:len(transforms)] = transforms", "def performTransformation(self, granularity=0):\r\n # Prepare for a lookup table to contain the order of randomized BBs\r\n # TODO - Later, this has to be used for de-randomization\r\n self._generateRandTable(granularity)\r\n\r\n # [Step 1] Set the first basic block after randomization\r\n beginOffset = self.EI.getBasicBlocks()[0].offsetFromBase\r\n firstRandBBL = self.randomizedBBContainer[0]\r\n base = firstRandBBL.VA - firstRandBBL.offsetFromBase\r\n firstRandBBL.newOffsetFromSection = beginOffset\r\n firstRandBBL.newVA = base + firstRandBBL.newOffsetFromSection\r\n firstRandBBL.adjustedBytes = firstRandBBL.newVA - firstRandBBL.VA\r\n\r\n # [Step 2] Set the following basic blocks accordingly: newVA, adjustedBytes\r\n for idx, bbi in enumerate(self.randLayout):\r\n if idx > 0:\r\n prevBBL = self.EI.getBasicBlock(self.randLayout[idx - 1])\r\n BBL = self.EI.getBasicBlock(bbi)\r\n BBL.newOffsetFromSection = prevBBL.newOffsetFromSection + prevBBL.size\r\n BBL.newVA = base + BBL.newOffsetFromSection\r\n BBL.adjustedBytes = BBL.newVA - BBL.VA\r\n\r\n # [Step 3] Compute newOffset, newRefVal, newVA and newRefTo in .text section\r\n jumpTables = dict() # VA: (numJTEntries, jtEntrySz)\r\n if self.hasFixupsInText():\r\n for FI in self.getFixupsText():\r\n # For the fixups in standalone assembly, just skip them to update\r\n if FI.parent.parent.parent.srcKind == C.SRC_TYPE_ASSEMBLY:\r\n continue\r\n\r\n # For fixups in .text, newVA needs to be updated\r\n FIOffsetBBL = FI.VA - FI.parent.VA\r\n FI.newVA = FI.parent.newVA + FIOffsetBBL\r\n FI.newOffset = FI.newVA - base\r\n\r\n # If the fixup contains the absolute address and C2C type,\r\n # update it to the reordered address (pointing to the original BBL)\r\n # otherwise leave it as it is. (C2D type)\r\n if not FI.isRela:\r\n if FI.type == C.FT_C2C:\r\n try:\r\n FI.newRefVal = FI.refBB.newVA\r\n FI.newRefTo = FI.newRefVal\r\n except AttributeError:\r\n # Exception when any orphan fixup exists\r\n FI.newRefVal = FI.newRefTo = FI.refTo\r\n logging.warning(\"\\t(%s) [Possibly CFI/LTO] Check out Fixup [%d] RefBB: %s, newRefTo: 0x%x, Type: %d\" % \\\r\n (C.SEC_TEXT, FI.idx, FI.refBB, FI.newRefVal, FI.type))\r\n else:\r\n FI.newRefVal = FI.derefVal\r\n FI.newRefTo = FI.refTo\r\n\r\n else:\r\n # FI.VA + refVal + derefSz = RefTo\r\n # newRefVal = RefTo - FI.newVA - derefSz\r\n FI.newRefTo = FI.refBB.newVA if FI.refBB else FI.refTo\r\n FI.newRefVal = FI.newRefTo - FI.newVA - FI.derefSz\r\n\r\n # The following information will be used to update entries in .rodata\r\n if FI.numJTEntries > 0:\r\n jumpTables[FI.refTo] = (FI.parent.parent, FI.numJTEntries, FI.jtEntrySz)\r\n\r\n def updateFixupRefs1(fixups, secName, jumpTables):\r\n \"\"\" Update the fixups for .rodata and .data.rel.ro sections \"\"\"\r\n pivot, numJTEntries, jtEntrySz = 0x0, 0, 0\r\n for FI in fixups:\r\n if FI.type == C.FT_D2D: # Do not touch the case of D2D\r\n FI.newRefVal = FI.derefVal\r\n FI.newRefTo = FI.refTo\r\n continue\r\n\r\n # If the fixup corresponds to any jump table\r\n if FI.VA in jumpTables:\r\n pivot = FI.VA\r\n fixupFunc, numJTEntries, jtEntrySz = jumpTables[pivot]\r\n\r\n # If the fixup contains absolute value, it is straightforward to update\r\n if not FI.isRela:\r\n try:\r\n FI.newRefVal = FI.refBB.newVA\r\n FI.newRefTo = FI.newRefVal\r\n logging.debug(\"\\t(%s) Fixup [%d] RefBB: %s, RefVal: 0x%x, RefTo: 0x%x, Type: %d\" % \\\r\n (secName, FI.idx, FI.refBB, FI.newRefVal, FI.newRefTo, FI.type))\r\n\r\n '''\r\n # [NEW] For cross reference trace, use the jump table (indirect pointers)\r\n if pivot <= FI.VA < pivot + (numJTEntries * jtEntrySz):\r\n refFunc = self.EI.getBBlByVA(FI.derefVal).parent\r\n fixupFunc.refTos.add(refFunc)\r\n refFunc.refFroms.add(fixupFunc)\r\n '''\r\n\r\n except AttributeError:\r\n # Exception when any orphan fixup exists\r\n FI.newRefVal = FI.newRefTo = FI.refTo\r\n logging.warning(\"\\t(%s) [Possibly CFI/LTO] Check out Fixup [%d] RefBB: %s, newRefVal: 0x%x, Type: %d\" % \\\r\n (secName, FI.idx, FI.refBB, FI.newRefVal, FI.type))\r\n\r\n # If the fixup contains relative value [RV] (pie/pic)\r\n # a) non-JT: newRV = BBL(VA + RV).newVA - VA\r\n # b) JT: newRV = BBL(pivot + RV).newVA - pivot (where pivot = JT location)\r\n # PIE/PIC (position independent) binary falls into this category\r\n else:\r\n # If Fixup is the entry of this jump table, adjust the relative value accordingly\r\n if pivot <= FI.VA < pivot + (numJTEntries * jtEntrySz):\r\n FI.newRefTo = self.EI.getBBlByVA(pivot + FI.derefVal).newVA\r\n FI.newRefVal = FI.newRefTo - pivot\r\n logging.debug(\"\\t(%s) [PIE] Fixup@0x%x: RV=0x%x, Pivot=0x%x, newRefTo=0x%x, newRefVal=0x%x\"\r\n % (secName, FI.VA, FI.derefVal, pivot, FI.newRefTo, FI.newRefVal))\r\n\r\n '''\r\n # [NEW] For cross reference trace, use the jump table (indirect pointers)\r\n refFunc = self.EI.getBBlByVA(pivot + FI.derefVal).parent\r\n fixupFunc.refTos.add(refFunc)\r\n refFunc.refFroms.add(fixupFunc)\r\n '''\r\n\r\n else:\r\n FI.newRefTo = self.EI.getBBlByVA(FI.VA + FI.derefVal).newVA\r\n FI.newRefVal = FI.newRefTo - FI.VA\r\n logging.debug(\"\\t(%s) [PIE] Fixup@0x%x: RV=0x%x, newRefTo=0x%x, newRefVal=0x%x\"\r\n % (secName, FI.VA, FI.derefVal, FI.newRefTo, FI.newRefVal))\r\n\r\n # [Step 4] Compute newRefVal and newRefTo in .rodata/.data.rel.ro section\r\n if self.hasFixupsInRodata():\r\n updateFixupRefs1(self.getFixupsRodata(), C.SEC_RODATA, jumpTables)\r\n\r\n if self.hasFixupsInDataRel():\r\n updateFixupRefs1(self.getFixupsDataRel(), C.SEC_DATA_REL, jumpTables)\r\n\r\n # FIXME - Did not combine updateFixupRefs2 with updateFixupRefs1 for better readability\r\n def updateFixupRefs2(fixups, secName):\r\n \"\"\" Update the fixups for .data and .init_array sections \"\"\"\r\n for FI in fixups:\r\n if FI.type == C.FT_D2D and secName is not C.SEC_INIT_ARR: # Do not touch the case of D2D\r\n FI.newRefVal = FI.derefVal\r\n FI.newRefTo = FI.refTo\r\n continue\r\n\r\n if not FI.isRela:\r\n try:\r\n FI.newRefVal = FI.refBB.newVA\r\n FI.newRefTo = FI.newRefVal\r\n logging.debug(\"\\t(%s) Fixup [%d] RefBB: %s, RefVal: 0x%x, RefTo: 0x%x, Type: %d\" % \\\r\n (secName, FI.idx, FI.refBB, FI.newRefVal, FI.newRefTo, FI.type))\r\n except AttributeError:\r\n # Exception when any orphan fixup exists\r\n FI.newRefVal = FI.newRefTo = FI.refTo\r\n additionalMsg = ' [Possibly CFI / LTO]' if not secName == C.SEC_INIT_ARR else ''\r\n logging.warning(\"\\t(%s)%s Check out Fixup [%d] RefBB: %s, newRefTo: 0x%x, Type: %d\" % \\\r\n (secName, additionalMsg, FI.idx, FI.refBB, FI.newRefVal, FI.type))\r\n else:\r\n # Have not found any case that falls into this category\r\n # All fixup entries in .data seems absolute addresses even under PIE\r\n logging.critical(\"\\t(%s) Relative fixup in this section?! [NEW]\" % (secName))\r\n pass\r\n\r\n # [Step 5] Compute newRefVal and newRefTo in the following sections: .data and .init_array\r\n if self.hasFixupsInData():\r\n updateFixupRefs2(self.getFixupsData(), C.SEC_DATA)\r\n if self.hasFixupsInInitArray():\r\n updateFixupRefs2(self.getFixupsInitArray(), C.SEC_INIT_ARR)", "def import_transform_info(cls, use_global_pos=False):\n\n with open(cls.transform_info_temp_file_path) as f:\n data = f.readlines()\n\n for i, node in enumerate(pm.ls(sl=1, type=\"transform\")):\n j = i * 15\n if use_global_pos:\n # print('using global position')\n\n # import pivots first\n # rotatePivot\n pm.xform(\n node,\n ws=1,\n rp=(float(data[j + 9]), float(data[j + 10]), float(data[j + 11])),\n )\n\n # scalePivot\n pm.xform(\n node,\n ws=1,\n sp=(float(data[j + 12]), float(data[j + 13]), float(data[j + 14])),\n )\n\n pm.xform(\n node,\n ws=1,\n t=(float(data[j]), float(data[j + 1]), float(data[j + 2])),\n )\n pm.xform(\n node,\n ws=1,\n ro=(float(data[j + 3]), float(data[j + 4]), float(data[j + 5])),\n )\n pm.xform(\n node,\n ws=1,\n s=(float(data[j + 6]), float(data[j + 7]), float(data[j + 8])),\n )\n\n else:\n print(\"using local position\")\n\n # set pivots first\n # rotatePivot\n node.rotatePivot.set(\n float(data[j + 9]), float(data[j + 10]), float(data[j + 11])\n )\n\n # scalePivot\n node.scalePivot.set(\n float(data[j + 12]), float(data[j + 13]), float(data[j + 14])\n )\n\n try:\n node.t.set(float(data[j]), float(data[j + 1]), float(data[j + 2]))\n except RuntimeError:\n pass\n\n try:\n node.r.set(\n float(data[j + 3]), float(data[j + 4]), float(data[j + 5])\n )\n except RuntimeError:\n pass\n\n try:\n node.s.set(\n float(data[j + 6]), float(data[j + 7]), float(data[j + 8])\n )\n except RuntimeError:\n pass\n\n # print('tra: %0.3f %0.3f %0.3f' %\n # (float(data[j]), float(data[j + 1]), float(data[j + 2])))\n # print('rot: %0.3f %0.3f %0.3f' %\n # (float(data[j + 3]), float(data[j + 4]), float(data[j + 5])))\n # print('sca: %0.3f %0.3f %0.3f' %\n # (float(data[j + 6]), float(data[j + 7]), float(data[j + 8])))\n # print('pivr: %0.3f %0.3f %0.3f' %\n # (float(data[j + 9]), float(data[j + 10]), float(data[j + 11])))\n # print('pivs: %0.3f %0.3f %0.3f' %\n # (float(data[j + 12]), float(data[j + 13]), float(data[j + 14])))", "def optimize(self, block):\n blocks_module = self.blocks_module\n \n # Clear parent references to break reference loops\n # NOTE: Such references can only be used by the compilation and\n # postprocessing phases, they must not be used by the\n # optimizations or the formatting methods. All information \n # must be stored in the blocks by now.\n block.element = None\n block.attribute = None\n \n # Optimize all the child blocks first\n block.apply_transformation(self.optimize)\n \n # Extract leading and trailing invariant markup whenever possible\n if (isinstance(block, (base_blocks.WithBlock, blocks_module.AttributeValueBlock)) and\n (block.children and\n (block.children[0].is_invariant() or\n block.children[-1].is_invariant()))):\n \n leading_invariant_blocks = []\n while block.children and block.children[0].is_invariant():\n leading_invariant_blocks.append(block.children.pop(0))\n \n trailing_invariant_blocks = []\n while block.children and block.children[-1].is_invariant():\n trailing_invariant_blocks.insert(0, block.children.pop(-1))\n\n block = blocks_module.DummyBlock(\n block.lineno,\n children=(\n leading_invariant_blocks +\n ([] if block.is_empty() else [block]) +\n trailing_invariant_blocks))\n\n # Optimize the newly added block\n block.apply_transformation(self.optimize)\n \n # Collide nested py:with directives (single child only)\n if (isinstance(block, base_blocks.WithBlock) and\n len(block.children) == 1 and\n isinstance(block.children[0], base_blocks.WithBlock)):\n \n block.data = '%s; %s' % (block.data.rstrip(';'), block.children[0].data)\n block.children = block.children[0].children\n \n # Foreign element optimizations\n if isinstance(block, base_blocks.ElementBlock):\n \n # Put the start and end tags into the list of children\n # blocks if the tags cannot be stripped out. It allows for\n # colliding the tags with the tail and head of static contents.\n if block.strip_expression is None:\n\n # Inline the start tag\n if block.start_tag:\n if constants.GENERATE_DEBUG_COMMENTS:\n block.template_line = (\n (block.start_tag.template_line or '') +\n block.template_line)\n block.children[0:0] = block.start_tag.children\n block.start_tag = None\n \n # Inline the end tag\n if block.end_tag:\n if constants.GENERATE_DEBUG_COMMENTS:\n block.template_line += (block.end_tag.template_line or '')\n block.children.extend(block.end_tag.children)\n block.end_tag = None\n \n # Remove the ElementBlock container,\n # it allows for colliding it with the surrounding markup\n element_block = block\n block = base_blocks.DummyBlock(\n element_block.lineno,\n children=element_block.children)\n \n if constants.GENERATE_DEBUG_COMMENTS:\n block.template_line = element_block.template_line\n \n # Optimize the newly added block\n block.apply_transformation(self.optimize)\n \n # Concatenate subsequent child blocks emitting static markup\n children = block.children\n if len(children) > 1:\n \n for index in xrange(len(children) - 1, 0, -1):\n \n first_block = children[index - 1]\n second_block = children[index]\n \n if (isinstance(first_block, base_blocks.InvariantBlock) and\n isinstance(second_block, base_blocks.InvariantBlock)):\n \n concatenated_markup = (\n first_block.get_markup() + second_block.get_markup())\n \n concatenated_block = self.blocks_module.MarkupBlock(\n lineno=first_block.lineno,\n data=concatenated_markup)\n \n if constants.GENERATE_DEBUG_COMMENTS:\n concatenated_block.template_line = (\n (first_block.template_line or '') + \n (second_block.template_line or ''))\n \n children[index - 1] = concatenated_block\n del children[index]\n \n # Static markup and text content optimizations,\n # these do not affect attribute values\n if isinstance(block, (base_blocks.MarkupBlock, base_blocks.TextBlock)):\n \n # Redundant whitespace elimination (HTML minimizer)\n if self.reduce_whitespace and block.data:\n \n if block.data.strip():\n # Reduce the heading and trailing whitespace\n heading_whitespace, text, trailing_whitespace = (\n constants.RX_WHITESPACE_HEAD_TAIL.match(block.data).groups())\n block.data = (\n util.reduce_whitespace(heading_whitespace) +\n text +\n util.reduce_whitespace(trailing_whitespace))\n \n else:\n # Reduce whitespace markup\n block.data = util.reduce_whitespace(block.data)\n \n # Remove unnecessary level of block nesting\n if isinstance(block, base_blocks.DummyBlock):\n return block.children\n \n # Drop empty blocks not affecting the output of the generated code\n if block.is_empty():\n return []\n \n return [block]", "def import_component_transform_info(cls):\n with open(cls.transform_info_temp_file_path) as f:\n data = f.readlines()\n\n for i, node in enumerate(pm.ls(sl=1, fl=1)):\n j = i * 3\n pm.xform(\n node, ws=1, t=(float(data[j]), float(data[j + 1]), float(data[j + 2]))\n )", "def transform(self):\n self._prepare()\n self._cluster()\n self._store()\n self.logger.debug('Transformation is done...')", "def partial_transform(self, blocks: Blocks, fit=False):\n blocks = self.transform_blocks(blocks, fit=fit)\n blocks = self.combine.transform(blocks)\n blocks = self.adapter.transform(blocks)\n return blocks", "def apply_transform(self, transform, include_scatter=False):\n self._transformed_events = self._transform(transform, include_scatter=include_scatter)\n self._include_scatter_option = include_scatter\n self.transform = transform", "def apply_transforms(self):\n\n transform_view = self.plugin.transform_view\n\n for i in range(transform_view.topLevelItemCount()):\n item = transform_view.topLevelItem(i)\n\n if hasattr(item.channel_item, 'pvc'):\n if self.creat_copies_cb.isChecked():\n dup_ch_item = item.channel_item.create_duplicate()\n for transform in item.temporary_chain[:]:\n transform.visual_container = dup_ch_item.pvc\n else:\n for transform in item.temporary_chain[:]:\n transform.visual_container = item.channel_item.pvc\n else:\n for j in range(item.childCount()):\n item = item.child(j)\n if not hasattr(item.channel_item, 'pvc'):\n continue\n\n if self.creat_copies_cb.isChecked():\n ch_item = item.channel_item.create_duplicate()\n else:\n ch_item = item.channel_item\n\n for transform in item.temporary_chain[:]:\n ch_item.pvc.transoform_chain_add(transform)\n\n self.plugin.transform_view.clear()\n self.visible_channels.update_plot_positions()\n self.visible_channels.items_added.emit()\n\n self.plugin.delete_plugin_data()", "def _update_template_fcp_raw_usage(self, raw_usage, raw_item):\n (fcp_id, template_id, path_id, assigner_id, connections,\n reserved, wwpn_npiv, wwpn_phy, chpid, state, owner,\n tmpl_id) = raw_item\n if not raw_usage.get(template_id, None):\n raw_usage[template_id] = {}\n if not raw_usage[template_id].get(path_id, None):\n raw_usage[template_id][path_id] = []\n # remove path_id from raw data, keep the last templ_id to\n # represent from which template this FCP has been allocated out.\n return_raw = (fcp_id, template_id, assigner_id, connections,\n reserved, wwpn_npiv, wwpn_phy, chpid, state,\n owner, tmpl_id)\n raw_usage[template_id][path_id].append(return_raw)\n return raw_usage", "def _initialize(self) -> None:\n # use random indices for blocks\n indices = self.random_state.choice(\n self._dimension, self.block_dimension * self.num_blocks, replace=False # type: ignore\n ).tolist()\n indices.sort() # keep the indices sorted sorted so that blocks do not overlap\n # Caution this is also important for split, so that splitted arrays end un in the same block\n for transform_inds in tools.grouper(indices, n=self.block_dimension):\n self._transforms.append(\n utils.Transform(\n transform_inds,\n translation_factor=self.translation_factor,\n rotation=self.rotation,\n random_state=self.random_state,\n expo=self.expo,\n )\n )", "def transform(self, ctx, modules):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is an alternative to the standard transform method. Returns a list of filter functions to be used for filtering out any unwanted blocks in the given block_structure. In addition to the commonly used methods listed above, the following
def transform_block_filters(self, usage_info, block_structure): raise NotImplementedError
[ "def _transform_with_filters(self, block_structure):\n if not self._transformers['supports_filter']:\n return\n\n filters = []\n for transformer in self._transformers['supports_filter']:\n filters.extend(transformer.transform_block_filters(self.usage_info, block_structure))\n\n combined_filters = combine_filters(block_structure, filters)\n block_structure.filter_topological_traversal(combined_filters)", "def filter_empties(text_blocks, _config):\n return [tb for tb in text_blocks if tb and tb['blockText']]", "def filter_functions(items):\n return [item for item in items if isinstance(item, FunctionType)]", "def filter_vertical(text_blocks, _config):\n new_text_blocks=[]\n for tb in text_blocks:\n new_tb=copy.copy(tb)\n new_tb['blockText']=[]\n for t in tb['blockText']:\n if t['width'] > 0:\n new_tb['blockText'].append(t)\n if new_tb['blockText']:\n new_text_blocks.append(new_tb)\n return new_text_blocks", "def _filter_pdb_by_alt_chain(structures, structure_alternate_model=None, structure_chain=None):\n\n # Filter by alternate model if given\n if structure_alternate_model:\n structures = structures[\n structures[\"structure.alternate_model\"] == structure_alternate_model\n ]\n # Filter by chain if given\n if structure_chain:\n structures = structures[structures[\"structure.chain\"] == structure_chain]\n return structures", "def transform(self, usage_info, block_structure):\n for block_key in block_structure.topological_traversal():\n parents = block_structure.get_parents(block_key)\n if parents:\n block_depth = min(\n self.get_block_depth(block_structure, parent_key)\n for parent_key in parents\n ) + 1\n else:\n block_depth = 0\n block_structure.set_transformer_block_field(\n block_key,\n self,\n self.BLOCK_DEPTH,\n block_depth\n )\n\n if self.requested_depth is not None:\n block_structure.remove_block_traversal(\n lambda block_key: self.get_block_depth(block_structure, block_key) > self.requested_depth\n )", "def filter_unrelated_lines(text_blocks, _config):\n new_text_blocks=[]\n for tb in text_blocks:\n new_tb=copy.copy(tb)\n new_tb['blockText']=[]\n next_top=tb['blockTop']\n for t in tb['blockText']:\n if t['top'] < next_top + t['height'] / 2:\n next_top=t['top'] + t['height']\n new_tb['blockText'].append(t)\n if new_tb['blockText']:\n new_text_blocks.append(new_tb)\n return new_text_blocks", "def collect(cls, block_structure):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def _filter_chain(accumulated, additional):\n return lambda block_key: accumulated(block_key) and additional(block_key)", "def get_template_filters(self):\n models = Pool._pool[self.database_name]['model']\n filters = []\n\n for model_name, model in models.iteritems():\n for f_name, f in inspect.getmembers(\n model, predicate=inspect.ismethod):\n\n if hasattr(f, '_template_filter'):\n filter = getattr(Pool().get(model_name), f_name)\n filters.append((filter.func_name, filter))\n\n return filters", "def partial_transform(self, blocks: Blocks, fit=False):\n blocks = self.transform_blocks(blocks, fit=fit)\n blocks = self.combine.transform(blocks)\n blocks = self.adapter.transform(blocks)\n return blocks", "def _filterOutput(self, pipelines, filter_dict, bIn):\n filtered = []\n for line in pipelines[:]:\n check = False # \"check\" means \"match\"\n # This inner for loop is deceiving: the filter_dict usually has a\n # single key:value and then the break/else is pure confusion.\n for key, value in filter_dict.items():\n if 'any' in value or value == ['']:\n check = True if key in line.keys() else False\n else:\n # Use full match for numerical values, and use substring\n # match for string values\n if str(line[key]).isdigit():\n check = str(line[key]) in value\n else:\n check = any([em in str(line[key]) for em in value])\n if check is bIn:\n break\n else:\n # No 'break': include this pipeline\n filtered.append(line)\n return filtered", "def runFilters(filters, events):\n for f in filters:\n if len(events) == 0:\n return []\n for event in events:\n event.hide = False\n events = sortEvents(events)\n events = f.process(events)\n \n events = sortEvents(events)\n return events", "def fosfilter(b, a, order, signal, states=None):\n if not states:\n states = zeros(order, dtype=np.complex128)\n\n for i in range(order):\n state = [states[i]]\n signal, state = lfilter(b, a, signal, zi=state)\n states[i] = state[0]\n b = ones_like(b)\n return signal, states", "def filter_(func, seq):\n return tuple(filter(func, seq))", "def filter_bcf(self, filter_dict=None):\n if filter_dict:\n self.filters = filter_dict\n variants = [Variant(v) for v in self if self._filter_line(v)]\n # Rewind the iterator\n self.reset()\n return Filtered_freebayes(variants, self)", "def filter(self, filterfn):\n new_edges = [_filter_remote.remote(filterfn, chunk)\n for chunk in self.edges]\n new_buf = np.array(filter(filterfn, self.buf))\n new_edges.extend(new_buf)\n\n return new_edges", "def filter_template_list(template_list, output_filter):\n output_filter = [re.compile(flt) for flt in output_filter]\n template_list = [\n templ\n for templ in template_list\n for rex in output_filter if rex.match(templ)\n ]\n LOG.debug('Filtered template files list: %s', template_list)\n return template_list", "def fast_filter(self, ast):\n return ConstantBinaryExpressionFilter().dfs_iter(ast)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given two functions that take a block_key and return a boolean, yield a function that takes a block key, and 'ands' the functions together
def _filter_chain(accumulated, additional): return lambda block_key: accumulated(block_key) and additional(block_key)
[ "def _binary(func, b):\n @wraps(func)\n def wrapper(a):\n return func(a, b)\n\n return wrapper", "def combine_and(expect_fn1, expect_fn2):\r\n def combine_fn(x1, x2):\r\n return min(x1, x2)\r\n return Expect.combine(expect_fn1, expect_fn2, combine_fn)", "def __block_equal(self, a, b):\r\n \r\n return self.__block2pair(a) == self.__block2pair(b)", "def lor(*fns):\n\n def _f(*args, **kwargs):\n for f in fns:\n v = f(*args, **kwargs)\n if v:\n return v\n return False\n\n return _f", "def matchKeys(keyA, keyB):\r\n match = [False if k == -1 else True for k in keyB]\r\n keyB = [keyB[k] for k in range(len(keyB)) if match[k]]\r\n\r\n while len(match) < len(keyA):\r\n match.append(True)\r\n keyA = [keyA[k] for k in range(len(keyA)) if match[k]]\r\n\r\n return (keyA, keyB)", "def _do_function(self,e1,e2):\n return self._in1._e|self._in2._e", "def compose_keys(f, g):\n return lambda v: f(g(v))", "def _do_function(self,e1,e2):\n return self._in1._e&self._in2._e", "def function_from_block(block):\n return Function(block.fields.get('Function', None),\n block.fields.get('Purpose', None), block.fields.get('Inputs', None),\n block.fields.get('Outputs', None))", "def _lock_and_transform(func):\n\n @wraps(func)\n def wrapper(self, key):\n with self._lock:\n return func(self, _transform_key(key))\n\n return wrapper", "def generate(self, block1: Block, block2: Block, output_block: Block = None):\n block = BlockGenerator._generate_args_check(output_block)\n return block", "def next_node(match_fn: Callable[[torch.fx.Node], bool]) -> Callable[[torch.fx.Node], bool]:\n\n def fn(node):\n for next_n in node.users.keys():\n if match_fn(next_n):\n return True\n return False\n\n return fn", "def linkblock_terminals(block):\n return _helper_00(block)", "def _operators(self):\n for row in self._blocks:\n for entry in row:\n if entry is not None:\n yield entry", "def filter_fir_shared(clock, reset, x, y, b):\n assert isinstance(x, Samples)\n assert isinstance(y, Samples)\n\n ntaps = len(b)\n scnt = Signal(intbv(ntaps+1, min=0, max=ntaps+2))\n pmax = x.data.max * x.data.max\n sop = Signal(intbv(0, min=-pmax, max=pmax))\n scale = int(len(x.data)-1)\n\n xd = [Signal(intbv(0, min=x.data.min, max=x.data.max))\n for _ in range(len(b))]\n\n @always_seq(clock.posedge, reset=reset)\n def beh_sop():\n y.valid.next = False\n if scnt == ntaps+1 and x.valid:\n # tap update loop\n xd[0].next = x.data\n for ii in range(1, len(b)-1):\n xd[ii].next = xd[ii-1]\n # compute the first product \n c = b[0]\n sop.next = c * x.data\n scnt.next = 1\n elif scnt == ntaps:\n assert not x.valid\n y.data.next = sop >> scale\n y.valid.next = True\n scnt.next = scnt + 1\n elif scnt < ntaps:\n assert not x.valid\n c = b[scnt]\n sop.next = sop + c * xd[scnt]\n scnt.next = scnt + 1\n\n return hdl.instances()", "def combine_filters(*args):\n def func(rec):\n for filt in args:\n if not filt(rec):\n return False\n return True\n return func", "def block(self, dataset: 'Dataset', function_: Callable = None, property_: str = None,\n block: Block = None, block_black_list: BlockBlackList = None, base_on: Block = None):\n block = BlockGenerator._block_args_check(function_, property_, block)\n return block", "def iter_block(check,r):\n bunch = []\n try:\n tr = iter(r)\n bunch.append(next(tr))\n while True:\n a = next(tr)\n if check(a):\n yield bunch\n bunch = []\n bunch.append(a)\n except StopIteration:\n if len(bunch) > 0:\n yield bunch", "def sequence(left_validator, right_validator):\n def callback(value):\n \"\"\"Accept a value to validate, return validation results\"\"\"\n okay, result = left_validator(value)\n if okay:\n return right_validator(value)\n return okay, result\n return callback" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the current value against the target.
def _check(self, target, current): raise NotImplementedError('Do not call the base Goal directly.')
[ "def check(self, state, val):\n return", "def _needs_to_track_change(self, instance, value) -> bool:\n try:\n current_value = instance.__dict__[self._name]\n except KeyError:\n return True\n return value != current_value", "def check(self, instance):\n r = (safe_comp(instance[self.index], self.val) <= 0)\n if self.result is not None:\n return r == self.result\n return r", "def compare_and_set(self, expected_value, new_value):\n return self.value.compare_and_set(expected_value, new_value)", "def test_check_wires(self, wires, target):\n res = check_wires(wires=wires)\n assert res == target", "def check_target():\n snake_pos = (round(snake[0].xcor()), round(snake[0].ycor()))\n\n if snake_pos == target_pos:\n scored()", "def _status_check(target):\n def check_pos(value=None, **kwargs):\n return value in target\n\n return check_pos", "def _test_value(self, value, value_test):\n if callable(value_test):\n return value_test(value)\n else:\n return value == value_test", "def check_running_sum(self):\n while self.__sum > self.__target:\n self.popleft()\n return self.__sum == self.__target", "def _should_update_value(self, device, other_device):\n return (device and device.is_actuator()\n and (not other_device or not other_device.is_sensor()))", "def compare_values(self, x, y):\n return x == y", "def test_attribute_value(feature, att_name, target_value, check_case):\n \n att_value = FME_utils.feature_get_attribute(feature, att_name, True)\n \n if check_case:\n # Nothing to do\n pass\n else:\n # Adjust the case\n target_value = target_value.lower()\n att_value = att_value.lower()\n \n if att_value == target_value:\n match = True\n else:\n match = False\n \n return match", "def is_target_reached(self):\n return (abs(self.read_output_field() - self.target_field) <\n self.output_fluctuations)", "def _check_current(self, level, val):\n self_val = getattr(self, 'current_{}'.format(level))\n if val is None:\n if self_val is None:\n raise ValueError(\"Must provide a {} since none has been\"\n \" selected\".format(level))\n val = self_val\n return val", "def check_on_target(self, bx, by):\n on_target = False\n for (ty, tx) in self.tgt_positions:\n # if the box is on a target, this is fine\n if (bx, by) == (tx, ty):\n on_target = True\n return on_target", "def check(self, currentTau):\n self.voltage += self.sumInputs\n self.sumInputs = 0\n self.refractCount -= 1\n self.voltageHistory.append(self.voltage)\n if(self.refractCount <= 0):\n self.refractCount = 0 \n if(self.voltage >= self.threshold):\n self.spikeTimes.append(currentTau)\n self.AP()\n self.voltage -= abs(self.threshold)\n self.refractCount = self.refractory\n #print(\"AP at \"+ str(currentTau) + \" at \" + self.name)\n return True\n return False", "def is_same(self, another):\n return self.get_reg().__cmp__(another.get_reg()) == 0", "def __eq__(self,other):\n if isinstance(other, self.__class__):\n if self.test_code == other.test_code:\n #Implement loose equality for specific test codes\n if self.test_code.code in ['CD4', 'CD4%', 'CD8', 'CD8%']:\n return abs(int(float(self.result_item_value))-int(float(other.result_item_value))) <= 1\n else:\n return self.result_item_value == other.result_item_value\n else:\n return False \n else:\n return False", "def test_binding_callback_updates_target(self):\n self.source_instance.int_value = 2\n\n self.binding_callback()\n\n assert self.target_instance.int_value == self.source_instance.int_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate all possible oligos from seq with length constraints seq is Bio.Seq.Seq or string
def oligo_gen(seq, min_len, max_len): for i in range(len(seq) - min_len): for j in range(min_len, max_len + 1): oligo = seq[i:i + j] if len(oligo) == j: yield oligo
[ "def seq_permutation(seq_len: int,\n charset: str = \"ATCG\") -> t.Iterable[str]:\n if seq_len <= 0:\n yield \"\"\n else:\n for seq in seq_permutation(seq_len-1, charset):\n for c in charset:\n yield seq + c", "def build_sequences(data, seq_len=5):\n n_sequences = len(data) - seq_len\n return np.array([data[i:i+seq_len] for i in range(n_sequences)])", "def SpliceSeq(seq):\n tmpSeq = copy.deepcopy(seq) # nomal copy does not work!\n ORFLis = [[], [], []]\n for i in range(3):\n tmpSeq = tmpSeq[i:]\n while len(tmpSeq) >= 6:\n curTriPair = tmpSeq[:6]\n tri1, tri2 = curTriPair[:3], curTriPair[3:]\n ORFLis[i].append((tri1, tri2))\n tmpSeq = tmpSeq[1:]\n tmpSeq = copy.deepcopy(seq) # nomal copy does not work!\n return ORFLis", "def create_long_sequence():\n\n return final_sequences('long')", "def find_orfs(seq, min_protein_length, strand=1, trans_table=1,\n ignore_ambiguous_orfs=True):\n answer = []\n seq_len = len(seq)\n\n # Get sequence associated with the specified location and strand\n if strand == 1:\n dna_seq = seq\n else:\n dna_seq = seq.reverse_complement()\n\n for frame in range(3):\n trans = str(dna_seq[frame:].translate(trans_table))\n trans_len = len(trans)\n aa_start = 0\n aa_end = 0\n\n # Iterate through ORFS in reading frame\n while aa_start < trans_len:\n # Set end counter to position of next stop codon\n aa_start = trans.find(\"M\", aa_start)\n aa_end = trans.find(\"*\", aa_start)\n\n # If no start or stop codons found, stop here\n if aa_start == -1 or aa_end == -1:\n break\n\n if (aa_end < aa_start):\n raise Exception('wtf')\n\n # Compute coordinates of ORF\n if strand == 1:\n start = frame + aa_start * 3\n end = min(seq_len, frame + aa_end * 3 + 3)\n else:\n start = seq_len - frame - aa_end * 3 - 3\n end = seq_len - frame - aa_start * 3\n\n # Add to output\n str_strand = \"+\" if strand == 1 else '-'\n\n # Check to make sure ORF doesn't contain a bunch of N's\n if ignore_ambiguous_orfs:\n num_unknown = trans[aa_start:aa_end].count('X')\n if (num_unknown / (aa_end - aa_start)) > 0.25:\n aa_start = aa_end + 1\n continue\n\n # increment start counter\n aa_start = aa_end + 1\n\n # Add ORF coordinates and continue\n answer.append((start, end, str_strand))\n\n # Sort results\n answer.sort()\n\n return answer", "def _generate_sequences(self):\r\n\r\n num_quants = len(self._quantifiers)\r\n num_chars = quantifiers.Quantifier.num_chars\r\n\r\n all_gens = []\r\n for n in range(1, self._max_len + 1):\r\n seqs = itertools.product(range(num_chars), repeat=n)\r\n data_n = ((seq, quant) for seq in seqs\r\n for quant in range(num_quants))\r\n all_gens.append(data_n)\r\n\r\n return itertools.chain(*all_gens)", "def draw_seqs(L, N, Om=10, switch=-1, be_picky=True, mirror=False):\n \n if mirror:\n maxseq = int(L*math.factorial(len(Om))/math.factorial(len(Om)-L))\n else:\n maxseq = int(math.factorial(L)*math.factorial(len(Om))/math.factorial(len(Om)-L))\n \n if be_picky:\n if N > maxseq: #unlikely\n N = maxseq\n \n # select a random subset of all L-length permutations\n npfx = np.ceil(N/L)\n nperm = math.factorial(len(Om))/math.factorial(len(Om)-L)\n \n perms = permutations(Om, L) # there are many of these\n pinds = np.random.permutation(np.arange(nperm)<npfx)\n toks = np.array(list(compress(perms, pinds))) # select random elements of permutations\n\n toks = np.repeat(toks, L, axis=0)\n # choose the non-repeated tokens\n inds = (np.tile(np.eye(L), int(npfx)).T > 0)\n \n if mirror: # get repeated tokens\n skot = np.fliplr(toks[inds==0].reshape((-1, L-1))) \n else:\n skot = scramble(toks[inds==0].reshape((-1, L-1)), axis=0)\n if switch is not None:\n skot = np.insert(skot, 0, switch, axis=1)\n \n S = np.concatenate((toks, skot), axis=1)\n A = toks[inds]\n # scramble\n shf = np.random.choice(int(npfx*L), int(N), replace=False)\n S = S[shf,:]\n A = A[shf]\n \n else: # if there are many unique sequences, just take random samples\n if switch is not None:\n S = np.zeros((N, 2*L), dtype = int)\n else:\n S = np.zeros((N, 2*L-1), dtype = int)\n A = np.zeros((N,1), dtype = int)\n \n # draw tokens from alphabet\n for n in range(N):\n toks = np.random.choice(Om, L, replace = False)\n distok = np.random.choice(L,1)\n skot = np.flip(np.delete(toks,distok))\n if not mirror:\n skot = skot[np.random.choice(L-1,L-1,replace=False)]\n if switch is not None:\n skot = np.append(switch, skot)\n S[n,:] = np.append(toks,skot)\n A[n] = toks[distok]\n \n return S, A", "def seqToGenbankLines(seq):\n # first chunk into 10bp parts\n parts = [seq[i:i+10] for i in range(0, len(seq), 10)]\n\n # put into lines of 6*10 bp\n lines = []\n for i in range(0, len(parts), 6):\n lines.append(\" \".join(parts[i:i+6]))\n return lines", "def motzkin_sequences(length, minwidth):\n if length == 0:\n yield ''\n else:\n # Either the first element of this one is a dot, or it isn't\n # If it is a dot, the number of possible sequences is equal\n # to the number of sequences of length 'length-1'\n # (that is, each of these sequences preceeded by a dot)\n dot_before_prev_possibilities = ('.' + seq\n for seq\n in motzkin_sequences(length-1,\n minwidth))\n for sequence in dot_before_prev_possibilities:\n yield sequence\n\n # If the first element is not a dot, it is an open parenthesis\n\n for length_inside_first_set in range(minwidth, length-2+1):\n # If the first element is an open parenthesis, we need to\n # place a matching, closed parenthesis after it.\n # There must be at least minwidth elements between\n # the first parenthesis and its partner, because we\n # need at least minwidth dots between these two parenthesis.\n #\n # There can be as many as length-2 spots between the\n # first and last element, since this would be representative\n # of having the first space be an open parenthesis and the last\n # spot being its partnered close parenthesis.\n #\n # So, let's allocate the parenthesis inside this pair\n length_after_first_set = length-2-length_inside_first_set\n stuff_inside_first_set = [sequence for sequence in\n motzkin_sequences\n (length_inside_first_set,\n minwidth)]\n stuff_after_first_set = [sequence for sequence in\n motzkin_sequences(\n length_after_first_set,\n minwidth)]\n for inside in stuff_inside_first_set:\n for after in stuff_after_first_set:\n sequence = '({inside}){after}'.format(\n inside=inside,\n after=after)\n yield sequence", "def gen_random_seq(length: int) -> str:\n return ''.join(random.choices(BASES, k=length))", "def get_seq_list(seq_arg):\n\tpep_sequences = []\n\tfor inp in seq_arg:\n\t\tif '.' in inp:\n\t\t\t# If input is a file\n\t\t\tlis = readfile(inp) \n\t\t\tif len(lis) == 1:\n\t\t\t\t# If all sequences are listed horizontally on one line\n\t\t\t\t# rather than one per line, rearrange\n\t\t\t\tlis = lis[0].split()\n\n\t\t\tfor i in lis:\n\t\t\t\tpep_sequences.append(i.strip())\n\n\t\telse:\n\t\t\t# Sequence was typed directly into the argument\n\t\t\tpep_sequences.append(inp.strip())\n\n\t\tfor n, s in enumerate(pep_sequences):\n\t\t\tif len(s) == 5: # Hard code for HCV\n\t\t\t\tpep_sequences[n] = 'A' + s + 'CSMHL'\n\t\t\telif len(s) == 6: # Hard code for HCV\n\t\t\t\tpep_sequences[n] = 'A' + s + 'SMHL'\n\t\t\telse:\n\t\t\t\tassert len(s) == 11\n\n\treturn pep_sequences", "def test_get_sequence_str(self):\n my_seq = \"TCTAGA\" + 50 * \"A\" + \"CCGCGG\" + 30 * \"T\"\n genome = {\"chrA\": my_seq}\n\n my_revcomp = str(SeqRecord(Seq(genome[\"chrA\"])).reverse_complement().seq)\n\n iv1p = GenomicSegment(\"chrA\", 0, 6, \"+\")\n iv2p = GenomicSegment(\"chrA\", 56, 62, \"+\")\n iv3p = GenomicSegment(\"chrA\", 0, 92, \"+\")\n\n iv1m = GenomicSegment(\"chrA\", 0, 6, \"-\")\n iv2m = GenomicSegment(\"chrA\", 56, 62, \"-\")\n iv3m = GenomicSegment(\"chrA\", 0, 92, \"-\")\n\n ivc1p = self.test_class(iv1p, iv2p, ID=\"ivc1p\")\n ivc1m = self.test_class(iv1m, iv2m, ID=\"ivc1m\")\n\n self.assertEquals(ivc1p.get_sequence(genome), \"TCTAGACCGCGG\")\n self.assertEquals(ivc1p.get_fasta(genome), \">ivc1p\\nTCTAGACCGCGG\\n\")\n\n self.assertEquals(ivc1m.get_sequence(genome), \"CCGCGGTCTAGA\")\n self.assertEquals(ivc1m.get_fasta(genome), \">ivc1m\\nCCGCGGTCTAGA\\n\")\n\n ivc2p = self.test_class(iv3p, ID=\"ivc2p\")\n ivc2m = self.test_class(iv3m, ID=\"ivc2m\")\n\n self.assertEquals(ivc2p.get_sequence(genome), my_seq)\n self.assertEquals(ivc2p.get_fasta(genome), \">ivc2p\\n%s\\n\" % my_seq)\n\n self.assertEquals(ivc2m.get_sequence(genome), my_revcomp)\n self.assertEquals(ivc2m.get_fasta(genome), \">ivc2m\\n%s\\n\" % my_revcomp)", "def simulate_sequence(length):\n dna = ['A', 'C', 'G', 'T']\n sequence = ''\n for i in range(length):\n sequence += random.choice(dna)\n return sequence", "def make_seq_string(seq, container_chars = '[]'):\n string = '%s ' % container_chars[0]\n for elem in seq: string += str(elem) + ', '\n string = '%s %s' % (string[:-2], container_chars[1])\n return string", "def outcome_seq(self):\n seq = []\n for num, ch in enumerate(self.chains.keys()):\t\t\t\t#('A', 'B')\n resi_list = self.residues[num]\n s = list(self.pdb_seq[self.pdb_chains[ch][2]])\n# print(\"struct: \", self.seq[self.chains[ch][2]])##############################\n# print(\"seq : \", \"\".join(s))###########################\n# print(len(self.seq[self.chains[ch][2]]), len(s), len(resi_list))#########################\n if len(s) != len(self.seq[self.chains[ch][2]]):\n for aa in range(0, len(s)):\n if s[aa] != resi_list[aa][2]:\n print(\"ERROR: seq_from_struct + missing_resi differ from seq_from_header at position %s\" %aa)\n if resi_list[aa][4] == 'm':\n s[aa] = '-'\n seq.append(\"\".join(s))\n# print(\"out_s : \", seq[0])#######################################\n return seq", "def split_subsequences(iterable, length=2, overlap=0, \r\n join_substr=True):\r\n isstring = isinstance(iterable, str) and join_substr\r\n it = iter(iterable)\r\n results = list(itertools.islice(it, length))\r\n while len(results) == length:\r\n yield ''.join(results) if isstring else results\r\n results = results[length - overlap:]\r\n results.extend(itertools.islice(it, length - overlap))\r\n if results:\r\n yield ''.join(results) if isstring else results", "def generate_list_of_pose_objs(sequence):\n\n seq_list = []\n for english_name in sequence:\n pose_obj = get_pose_by_name_eng(english_name)\n seq_list.append(pose_obj)\n\n return seq_list", "def calc_sim_orfs(sims, seqs_string, seqs_lengths):\n\n outputs = {}\n if len(sims):\n np.random.seed()\n for i, sim in enumerate(sims):\n gen.print_parallel_status(i, sims)\n sim_nts = list(seqs_string)\n np.random.shuffle(sim_nts)\n sim_string = \"\".join(sim_nts)\n new_seqs = []\n index = 0\n for length in seqs_lengths:\n new_seqs.append(sim_string[index:index+length])\n sim_orfs = seqo.get_longest_orfs(new_seqs)\n outputs[sim] = sim_orfs\n return outputs", "def gen_random_orf(codonpref, simulations, protein_seq=False, orf_len=560):\n if protein_seq:\n rand_codon_dict = {}\n for amino in codonpref.keys():\n rand_codon_dict[amino] = rwg_codon(codonpref[amino])\n for sim in xrange(simulations):\n codon_lis = []\n for amino in protein_seq:\n new_codon = rand_codon_dict[amino]()\n codon_lis.append(new_codon)\n\n yield ''.join(codon_lis)\n\n else:\n rand_codon = rwg_codon(codonpref)\n for sim in xrange(simulations):\n codon_lis = ['ATG']\n while len(codon_lis) < orf_len:\n new_codon = rand_codon()\n if new_codon != 'TAG' and new_codon != 'TGA' and new_codon \\\n != 'TAA':\n codon_lis.append(new_codon)\n\n yield ''.join(codon_lis)", "def convert_to_strings(self, sequences, sizes=None):\n strings = []\n for x in xrange(len(sequences)):\n seq_len = sizes[x] if sizes is not None else len(sequences[x])\n string = self._convert_to_string(sequences[x], seq_len)\n strings.append(string)\n return strings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate all possible point mutations from DNA seq seq is Bio.Seq.Seq Does not respect case of letters
def dna_mutation_gen(seq): letters = seq.alphabet.letters for i in range(len(seq)): for letter in letters: if letter != seq[i].upper(): yield seq[:i] + letter + seq[i + 1:]
[ "def translate(seq):\n\n table = {\n 'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',\n 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',\n 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',\n 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',\n 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',\n 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',\n 'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',\n 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',\n 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',\n 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',\n 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',\n 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',\n 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',\n 'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',\n 'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',\n 'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',\n }\n\n seq_len = len(seq)\n if seq_len % 3 == 0:\n protein = \"\"\n for i in range(0, seq_len, 3):\n codon = seq[i: i+3]\n protein += table[codon]\n return protein\n else:\n return f\"Invalid Input Sequence, len = {seq_len}\"", "def generate_all_insertion_mutants(self, sequence):\n ancestor_sequence = list(sequence)\n all_insertion_mutants = []\n \n #make all insertions, (+1 for insertion off the last instruction)\n for i in range(len(sequence) + 1):\n for new_char in self.char_lookup:\n new_seq = list(ancestor_sequence)\n new_seq.insert(i, new_char)\n all_insertion_mutants.append(''.join(new_seq))\n \n return all_insertion_mutants", "def translate(seq):\n \n #translation table of codons to amino acids\n # _ underscores are nature's stop codons.\n table = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',\n 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',\n }\n \n #The protein is a sequence of amino acids\n protein = \"\"\n \n # Check that the length of the string is divisible by 3\n if len(seq) % 3 == 0:\n # Valid sequence - proceed\n # Loop over the sequence\n for i in range(0, len(seq), 3):\n \n # Extract a single codon (3-letter string)\n codon = seq[i:i+3]\n \n # Look up each codon (3-letter string) and store the result\n # Concatenating to generate an amino acid sequence\n protein += table[codon]\n else:\n pass\n \n\n return protein;", "def expandIupac(seq):\n # http://stackoverflow.com/questions/27551921/how-to-extend-ambiguous-dna-sequence\n d = {'A': 'A', 'C': 'C', 'B': 'CGT', 'D': 'AGT', 'G': 'G', \\\n 'H': 'ACT', 'K': 'GT', 'M': 'AC', 'N': 'GATC', 'S': 'CG', \\\n 'R': 'AG', 'T': 'T', 'W': 'AT', 'V': 'ACG', 'Y': 'CT', 'X': 'GATC'}\n seqs = []\n for i in product(*[d[j] for j in seq]):\n seqs.append(\"\".join(i))\n return seqs", "def generate_random_seq(seq_len, number_seq, number_mutations, list_all_20_aa, probabilities_all_20_aa):\n\n # seq_list = []\n # sublist = ''.join(np.random.choice(list_all_20_aa, p=probabilities_all_20_aa) for _ in range(subset_num))\n # subdict = { my_key: prob_table[my_key] for my_key in sublist }\n # pick_list = []\n # for key, prob in subdict.items():\n # pick_list.extend([key] * int((prob * 100)))\n\n # generate a reference sequence based on the aa propensity of TM or non-TM region\n\n orig_seq = \"\".join(np.random.choice(list_all_20_aa, p=probabilities_all_20_aa) for _ in range(int(seq_len)))\n\n # generate sequence cluster by randomly replacing predetermined number of residues in reference seq\n seq_matrix = []\n # firstly, choose a set of positions whoose aa will be replaced\n for n in range(number_seq):\n # sys.write something to show that the programming is still running\n if n != 0 and n % 200 == 0:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n # create indices (list of positions)\n inds = list(range(seq_len))\n # number of mutations is calculated beforehand. E.g. if ident=0.9, seqlen=100, number_mutations = 10)\n # create a sample of positions to mutate, e.g. [77, 81, 18, 46, 42, 53, 65, 2, 89, 69, ..... and so on\n list_of_aa_positions_to_be_mutated = random.sample(inds, number_mutations)\n orig_seq_as_list = list(orig_seq)\n # based on aa propensity, replace the residue at each chosen position\n for pos in list_of_aa_positions_to_be_mutated:\n orig_seq_as_list[pos] = np.random.choice(list_all_20_aa, p=probabilities_all_20_aa)\n seq_incl_mutations = \"\".join(orig_seq_as_list)\n\n # append each new sequence to the seq_matrix\n seq_matrix.append(list(seq_incl_mutations))\n\n # convert the seq_matrix into a np.array to ease further steps (slicing columns)\n seq_matrix = np.array(seq_matrix)\n\n return orig_seq, seq_matrix", "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def genseq(DNA):\n DNA = DNA.upper()\n res1 = genes(DNA)\n DNA = DNA.translate(str.maketrans('ATGC', 'TACG'))[::-1]\n res2 = genes(DNA)\n return (res1+res2)", "def mutate_specific(self, muts, seq=None):\n\n if seq == None:\n seq = self.startseq[:20, :].copy()\n else:\n seq, _, _ = self.TFrecordsgenerator._seq2tensor(seq.strip('\\n'))\n # sequence is now in one-hot encoding\n aas = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n\n for mut in muts:\n seq[:, int(mut[:-1])-1] = np.zeros(20) # reset\n seq[aas.index(mut[-1:]), int(mut[:-1])-1] = 1\n\n return seq", "def mutagenesis(protein,dna,sequence,designed_sequence,PDBname):\n\t\n\tn_of_substitutions,totals,total_atm = 0,-1,0\n\tdesigned_dna = []\n\t\n\tfor s in range(0,len(sequence)):\n\t\tconserved = False\n\t\tnt1 = sequence[s:s+1]\n\t\tnt2 = designed_sequence[s:s+1]\n\t\tif(nt1 == '/'): continue \n\t\ttotals += 1\n\t\tif(nt1 == nt2): conserved = True\n\t\telse: n_of_substitutions += 1\n\t\t\n\t\tnt = ''\n\t\tfor atomo in dna[totals].split(\"\\n\"):\n\t\t\tif(atomo == ''): break\n\t\t\tif(atomo[15:16] == \"'\" or atomo[12:15] == ' P ' or atomo[12:15] == ' OP' \\\n\t\t\tor conserved):\n\t\t\t\tnt += atomo + \"\\n\"\n\t\tif(conserved == False): \n\t\t\tnt += base_mutada(dna[totals],nt2)\t\n\t\t\n\t\t(renum_nt,total_atm) = renumera_atomos(nt,total_atm)\n\t\tdesigned_dna.append(renum_nt)\n\n\tpdbfile = open(PDBname, 'w')\n\tprint(\"HEADER redesigned protein-DNA complex\\n\", file=pdbfile)\n\tprint(\"REMARK original sequence : %s\\n\" % (sequence), file=pdbfile)\n\tprint(\"REMARK designed sequence : %s\\n\" % (designed_sequence), file=pdbfile)\n\tfor r in protein: print(\"%s\" % (r), file=pdbfile)\n\tprint(\"TER\\n\", file=pdbfile)\n\tfor r in designed_dna: print(\"%s\" % (r), file=pdbfile)\n\tprint(\"TER\\n\", file=pdbfile)\n\tpdbfile.close()\t\n\t\n\treturn n_of_substitutions", "def _retranslate(seq):\n if len(seq[\"vdj_nt\"]) % 3 != 0:\n trunc = len(seq[\"vdj_nt\"]) % 3\n seq[\"vdj_nt\"] = seq[\"vdj_nt\"][:-trunc]\n seq[\"vdj_aa\"] = Seq(seq[\"vdj_nt\"], generic_dna).translate()", "def encode_whole_protein(seq, true_coords, angles, padding_seq,\n needed_info = { \"cutoffs\": [2, 5, 10],\n \"bond_scales\": [0.5, 1, 2]}, free_mem=False):\n device, precise = true_coords.device, true_coords.type()\n #################\n # encode points #\n #################\n scaffolds = build_scaffolds_from_scn_angles(seq[:(-padding_seq) or None], angles[:(-padding_seq) or None])\n flat_mask = rearrange(scaffolds[\"cloud_mask\"], 'l c -> (l c)')\n # embedd everything\n\n # general position embedding\n center_coords = true_coords - true_coords.mean(dim=0)\n pos_unit_norms = torch.norm(center_coords, dim=-1, keepdim=True)\n pos_unit_vecs = center_coords / pos_unit_norms\n pos_unit_norms_enc = encode_dist(pos_unit_norms, scales=needed_info[\"atom_pos_scales\"]).squeeze()\n # reformat coordinates to scn (L, 14, 3) - TODO: solve if padding=0\n coords_wrap = rearrange(center_coords, '(l c) d -> l c d', c=14)[:(-padding_seq) or None]\n\n # position in backbone embedding\n aa_pos = encode_dist( torch.arange(len(seq[:(-padding_seq) or None]), device=device).float(), scales=needed_info[\"aa_pos_scales\"])\n atom_pos = chain2atoms(aa_pos)[scaffolds[\"cloud_mask\"]]\n\n # atom identity embedding\n atom_id_embedds = torch.stack([SUPREME_INFO[k][\"atom_id_embedd\"] for k in seq[:(-padding_seq) or None]],\n dim=0)[scaffolds[\"cloud_mask\"]].to(device)\n # aa embedding\n seq_int = torch.tensor([AAS.index(aa) for aa in seq[:(-padding_seq) or None]], device=device).long()\n aa_id_embedds = chain2atoms(seq_int, mask=scaffolds[\"cloud_mask\"])\n\n # CA - SC distance\n dist2ca_vec, dist2ca_norm = dist2ca(coords_wrap) \n dist2ca_norm_enc = encode_dist(dist2ca_norm, scales=needed_info[\"dist2ca_norm_scales\"]).squeeze()\n\n # BACKBONE feats\n vecs, norms = orient_aa(coords_wrap)\n bb_vecs_atoms = chain2atoms(torch.transpose(vecs, 0, 1), mask=scaffolds[\"cloud_mask\"])\n bb_norms_atoms = chain2atoms(torch.transpose(norms, 0, 1), mask=scaffolds[\"cloud_mask\"])\n bb_norms_atoms_enc = encode_dist(bb_norms_atoms, scales=[0.5])\n\n ################\n # encode bonds #\n ################\n bond_info = encode_whole_bonds(x = coords_wrap[scaffolds[\"cloud_mask\"]],\n x_format = \"coords\",\n embedd_info = {},\n needed_info = needed_info )\n whole_bond_enc, whole_bond_idxs, bond_embedd_info = bond_info\n #########\n # merge #\n #########\n\n # concat so that final is [vector_dims, scalar_dims]\n point_n_vectors = 1 + 1 + 5\n point_n_scalars = 2*len(needed_info[\"atom_pos_scales\"]) + 1 +\\\n 2*len(needed_info[\"aa_pos_scales\"]) + 1 +\\\n 2*len(needed_info[\"dist2ca_norm_scales\"]) + 1+\\\n rearrange(bb_norms_atoms_enc, 'atoms feats encs -> atoms (feats encs)').shape[1] +\\\n 2 # the last 2 are to be embedded yet\n\n whole_point_enc = torch.cat([ pos_unit_vecs[ :(-padding_seq*14) or None ][ flat_mask ], # 1\n dist2ca_vec[scaffolds[\"cloud_mask\"]], # 1\n rearrange(bb_vecs_atoms, 'atoms n d -> atoms (n d)'), # 5\n # scalars\n pos_unit_norms_enc[ :(-padding_seq*14) or None ][ flat_mask ], # 2n+1\n atom_pos, # 2n+1\n dist2ca_norm_enc[scaffolds[\"cloud_mask\"]], # 2n+1\n rearrange(bb_norms_atoms_enc, 'atoms feats encs -> atoms (feats encs)'), # 2n+1\n atom_id_embedds.unsqueeze(-1),\n aa_id_embedds.unsqueeze(-1) ], dim=-1) # the last 2 are yet to be embedded\n if free_mem:\n del pos_unit_vecs, dist2ca_vec, bb_vecs_atoms, pos_unit_norms_enc, \\\n atom_pos, dist2ca_norm_enc, bb_norms_atoms_enc, atom_id_embedds, aa_id_embedds\n\n\n # record embedding dimensions\n point_embedd_info = {\"point_n_vectors\": point_n_vectors,\n \"point_n_scalars\": point_n_scalars,}\n\n embedd_info = {**point_embedd_info, **bond_embedd_info}\n\n return whole_point_enc, whole_bond_idxs, whole_bond_enc, embedd_info", "def translate(cds):\n RNA2DNA = lambda seq: seq.upper().replace('U', 'T')\n cds = RNA2DNA(cds)\n valid_sequence(cds)\n codon_table = {'TTT': 'F', 'CTT': 'L', 'ATT': 'I', 'GTT': 'V',\n 'TTC': 'F', 'CTC': 'L', 'ATC': 'I', 'GTC': 'V',\n 'TTA': 'L', 'CTA': 'L', 'ATA': 'I', 'GTA': 'V',\n 'TTG': 'L', 'CTG': 'L', 'ATG': 'M', 'GTG': 'V',\n 'TCT': 'S', 'CCT': 'P', 'ACT': 'T', 'GCT': 'A',\n 'TCC': 'S', 'CCC': 'P', 'ACC': 'T', 'GCC': 'A',\n 'TCA': 'S', 'CCA': 'P', 'ACA': 'T', 'GCA': 'A',\n 'TCG': 'S', 'CCG': 'P', 'ACG': 'T', 'GCG': 'A',\n 'TAT': 'Y', 'CAT': 'H', 'AAT': 'N', 'GAT': 'D',\n 'TAC': 'Y', 'CAC': 'H', 'AAC': 'N', 'GAC': 'D',\n 'TAA': '', 'CAA': 'Q', 'AAA': 'K', 'GAA': 'E',\n 'TAG': '', 'CAG': 'Q', 'AAG': 'K', 'GAG': 'E',\n 'TGT': 'C', 'CGT': 'R', 'AGT': 'S', 'GGT': 'G',\n 'TGC': 'C', 'CGC': 'R', 'AGC': 'S', 'GGC': 'G',\n 'TGA': '', 'CGA': 'R', 'AGA': 'R', 'GGA': 'G',\n 'TGG': 'W', 'CGG': 'R', 'AGG': 'R', 'GGG': 'G'}\n prot = ''\n if len(cds) % 3:\n raise ValueError('The CDS sequence seems to have wrong length which cannot be divided by 3!')\n prot = ''.join((codon_table[codon] for codon in split_seq(cds, 3)))\n return prot", "def mutated_seq(self, seq):\n rand = random.random()\n aa = -1\n while rand > 0:\n rand -= self.normalized_weights[aa + 1]\n aa += 1\n\n locus = -1\n mutatables = np.sum(seq[20, :])\n\n tomutate = random.randint(1, mutatables)\n\n while tomutate > 0: # go to random locus that is to mutate\n if seq[20, locus+1] == 1:\n tomutate -= 1\n locus += 1\n\n # check if the sequence can be mutated further away from the original:\n\n if np.sum(np.abs(seq[:20, :] - self.startseq[:20, :]))/2 >= self.maxmut:\n\n if np.argmax(seq[:20, locus]) != np.argmax(self.startseq[:20, locus]): # mutated, still original sequence\n seq[:20, locus] = np.zeros(20)\n seq[aa, locus] = 1 # now a random amino acid\n seq[21, locus] += 1\n\n else: # another locus has to be backmutated\n self.backmutating = True\n\n tobackmutate = random.randint(1, self.maxmut)\n locusback = -1\n while tobackmutate > 0: # go to random locus that is to mutate\n if np.argmax(seq[:20, locusback + 1]) != np.argmax(self.startseq[:20, locusback + 1]): # mutated\n tobackmutate -= 1\n locusback += 1\n\n seq[:20, locus] = np.zeros(20)\n seq[aa, locus] = 1 # now a random amino acid\n seq[21, locus] += 1\n\n seq[:20, locusback] = np.zeros(20)\n seq[np.argmax(self.startseq[:20, locusback]), locusback] = 1 # set one where the starting seq is one\n seq[21, locusback] += 1\n\n else: # there are mutations left, we can mutate it.\n seq[:20, locus] = np.zeros(20)\n seq[aa, locus] = 1 # now a random amino acid\n seq[21, locus] += 1\n\n return seq", "def seqs2degen(seqs):\n if seqs is None:\n return ''\n # converting to list of lists (individual chars)\n seqs = [[y for y in str(x).upper()] for x in seqs]\n seq_lens = set([len(x) for x in seqs])\n if not len(seq_lens) == 1:\n raise KeyError('Sequences are not the same length!')\n # getting seq variation at each position, then mapping to IUPAC of degen chars\n degen_seq = ''\n for i in range(list(seq_lens)[0]):\n chars = set([seq[i] for seq in seqs])\n if '-' in chars:\n return ''\n if 'N' in chars:\n chars = {'A', 'C', 'G', 'T'}\n try:\n degen_seq += IUPAC_R[tuple(sorted(set(chars)))]\n except KeyError:\n msg = 'Cannot map chars to degen character: {}'\n raise KeyError(msg.format(','.join(list(chars))))\n # checking the degen seq_len = input_seqs\n if len(degen_seq) != list(seq_lens)[0]:\n raise ValueError('Degenerate seq len != non-degen seqs!')\n return degen_seq", "def mutate(dna):\n dna_out = \"\"\n mutation_chance = 100\n for c in range(DNA_SIZE):\n if int(random.random() * mutation_chance) == 1:\n dna_out += random_char()\n else:\n dna_out += dna[c]\n return dna_out", "def generate_all_mutants(self, sequence):\n\n return(self.generate_all_deletion_mutants(sequence)\n + self.generate_all_insertion_mutants(sequence) \n + self.generate_all_point_mutants(sequence))", "def isolated_peptide_result(read_frame, strand, variant_comb, somatic_mutation_sub_dict, ref_mut_seq, gene_start, all_read_frames):\n\n start_v1 = read_frame.cds_left_modi\n stop_v1 = read_frame.cds_right_modi\n emitting_frame = read_frame.read_phase\n start_v2 = np.nan\n stop_v2 = np.nan\n\n if somatic_mutation_sub_dict: # exist maf dictionary, so we use germline mutation-applied seq as the background seq\n ref_seq = ref_mut_seq['background']\n else:\n ref_seq = ref_mut_seq['ref']\n mut_seq = ref_mut_seq['background']\n\n if strand == '+':\n coord = Coord(start_v1, stop_v1, start_v2, stop_v2)\n peptide_dna_str_mut = get_sub_mut_dna(mut_seq, coord, variant_comb, somatic_mutation_sub_dict, strand, gene_start)\n peptide_dna_str_ref = ref_seq[start_v1 - gene_start:stop_v1 - gene_start]\n else:\n coord = Coord(start_v1, stop_v1, start_v2, stop_v2)\n peptide_dna_str_mut = complementary_seq(get_sub_mut_dna(mut_seq, coord, variant_comb, somatic_mutation_sub_dict, strand, gene_start))\n peptide_dna_str_ref = complementary_seq(ref_seq[start_v1 - gene_start:stop_v1 - gene_start][::-1])\n\n peptide_mut, mut_has_stop_codon = translate_dna_to_peptide(peptide_dna_str_mut, all_read_frames)\n peptide_ref, ref_has_stop_codon = translate_dna_to_peptide(peptide_dna_str_ref, all_read_frames)\n\n is_isolated = True\n\n peptide = Peptide(peptide_mut,peptide_ref)\n coord = Coord(start_v1, stop_v1, start_v2, stop_v2)\n flag = Flag(mut_has_stop_codon,is_isolated)\n\n return peptide, coord, flag", "def generate_all_deletion_mutants(self, sequence):\n ancestor_sequence = list(sequence)\n all_deletion_mutants = []\n \n #deletions\n for i in range(len(sequence)):\n new_seq = list(ancestor_sequence)\n new_seq.pop(i)\n all_deletion_mutants.append(''.join(new_seq))\n \n return all_deletion_mutants", "def test_mutate_from_all_to_all(): # TODO: fix protein mutations\n import perses.rjmc.topology_proposal as topology_proposal\n import perses.rjmc.geometry as geometry\n geometry_engine = geometry.FFAllAngleGeometryEngine()\n\n aminos = ['ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n\n for aa in aminos:\n topology, positions = _get_capped_amino_acid(amino_acid=aa)\n modeller = app.Modeller(topology, positions)\n\n max_point_mutants = 1\n\n system = system_generator.create_system(modeller.topology)\n chain_id = '1'\n\n pm_top_engine = topology_proposal.PointMutationEngine(modeller.topology, system_generator, chain_id, max_point_mutants=max_point_mutants)\n\n current_system = system\n current_topology = modeller.topology\n current_positions = modeller.positions\n minimize_integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)\n platform = openmm.Platform.getPlatformByName(\"Reference\")\n minimize_context = openmm.Context(current_system, minimize_integrator, platform)\n minimize_context.setPositions(current_positions)\n initial_state = minimize_context.getState(getEnergy=True)\n initial_potential = initial_state.getPotentialEnergy()\n openmm.LocalEnergyMinimizer.minimize(minimize_context)\n final_state = minimize_context.getState(getEnergy=True, getPositions=True)\n final_potential = final_state.getPotentialEnergy()\n current_positions = final_state.getPositions()\n #print(\"Minimized initial structure from %s to %s\" % (str(initial_potential), str(final_potential)))\n\n for k, proposed_amino in enumerate(aminos):\n pm_top_engine._allowed_mutations = [[('2',proposed_amino)]]\n pm_top_proposal = pm_top_engine.propose(current_system, current_topology)\n new_positions, logp = geometry_engine.propose(pm_top_proposal, current_positions, beta)\n new_system = pm_top_proposal.new_system\n if np.isnan(logp):\n raise Exception(\"NaN in the logp\")\n integrator = openmm.VerletIntegrator(1*unit.femtoseconds)\n platform = openmm.Platform.getPlatformByName(\"Reference\")\n context = openmm.Context(new_system, integrator, platform)\n context.setPositions(new_positions)\n state = context.getState(getEnergy=True)\n #print(compute_potential_components(context))\n potential = state.getPotentialEnergy()\n potential_without_units = potential / potential.unit\n #print(str(potential))\n if np.isnan(potential_without_units):\n raise Exception(\"Energy after proposal is NaN\")", "def comp_seq(self):\n\n\t\tcomp_base = str.maketrans(\"atgc\",\"tacg\")\n\t\tcomp_dna = self.dna_seq.translate(comp_base)\n\t\tcomp_dna = Sequence(comp_dna)\n\n\t\treturn(comp_dna)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate all single inosine mutations in seq seq is a Bio.Seq.Seq or str Does not respect alphabets
def inosine_gen(seq): compat = set('GAT') for i in range(len(seq)): if seq[i].upper() in compat: yield seq[:i] + 'I' + seq[i + 1:]
[ "def generate_all_insertion_mutants(self, sequence):\n ancestor_sequence = list(sequence)\n all_insertion_mutants = []\n \n #make all insertions, (+1 for insertion off the last instruction)\n for i in range(len(sequence) + 1):\n for new_char in self.char_lookup:\n new_seq = list(ancestor_sequence)\n new_seq.insert(i, new_char)\n all_insertion_mutants.append(''.join(new_seq))\n \n return all_insertion_mutants", "def expandIupac(seq):\n # http://stackoverflow.com/questions/27551921/how-to-extend-ambiguous-dna-sequence\n d = {'A': 'A', 'C': 'C', 'B': 'CGT', 'D': 'AGT', 'G': 'G', \\\n 'H': 'ACT', 'K': 'GT', 'M': 'AC', 'N': 'GATC', 'S': 'CG', \\\n 'R': 'AG', 'T': 'T', 'W': 'AT', 'V': 'ACG', 'Y': 'CT', 'X': 'GATC'}\n seqs = []\n for i in product(*[d[j] for j in seq]):\n seqs.append(\"\".join(i))\n return seqs", "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def translate(seq):\n \n #translation table of codons to amino acids\n # _ underscores are nature's stop codons.\n table = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',\n 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',\n }\n \n #The protein is a sequence of amino acids\n protein = \"\"\n \n # Check that the length of the string is divisible by 3\n if len(seq) % 3 == 0:\n # Valid sequence - proceed\n # Loop over the sequence\n for i in range(0, len(seq), 3):\n \n # Extract a single codon (3-letter string)\n codon = seq[i:i+3]\n \n # Look up each codon (3-letter string) and store the result\n # Concatenating to generate an amino acid sequence\n protein += table[codon]\n else:\n pass\n \n\n return protein;", "def msa_consensus_filled(seqs, del_as_var=False):\n filter_chars = 'N'\n if not del_as_var:\n filter_chars += '-'\n\n for bases in zip(*seqs):\n bases = filter(lambda x: x not in filter_chars, bases) # no bases other than padding '-'\n counts = Counter(bases)\n if not counts:\n yield 'N'\n elif len(counts) == 1:\n base = tuple(counts.keys())[0]\n if base == '-':\n yield 'N'\n else:\n yield base\n else:\n yield 'N'", "def translate(seq):\n\n table = {\n 'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',\n 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',\n 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',\n 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',\n 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',\n 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',\n 'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',\n 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',\n 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',\n 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',\n 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',\n 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',\n 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',\n 'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',\n 'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',\n 'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',\n }\n\n seq_len = len(seq)\n if seq_len % 3 == 0:\n protein = \"\"\n for i in range(0, seq_len, 3):\n codon = seq[i: i+3]\n protein += table[codon]\n return protein\n else:\n return f\"Invalid Input Sequence, len = {seq_len}\"", "def translate(RNA_seq):\n \n RNA = remove_blanks(RNA_seq)\n \n # Uses find_start_codon() method to find codon from which\n # translation will start\n counter = Ribosome.find_start_codon(RNA)\n codon = ''\n protein = ''\n \n # Assigns triplets of RNA sequence chars to 'codon' and concatenates the\n # corresponding amino acid symbol to the growing chain of amino acids,\n # then moves on to the next triplet, until reaching stop codon.\n while counter <= (len(RNA) - 3):\n codon = RNA[counter] + RNA[counter+1] + RNA[counter+2]\n \n #Start codon & Methionine(M)\n if codon == 'AUG':\n protein = protein + 'M'\n #Phenylalanine(F)\n elif codon == 'UUU' or codon == 'UUC':\n protein = protein + 'F'\n #Leucine(L)\n elif codon == 'UUA' or codon == 'UUG' or codon == 'CUU' \\\n or codon == 'CUC' or codon == 'CUA' or codon == 'CUG':\n protein = protein + 'L'\n #Isoleucine(I)\n elif codon == 'AUU' or codon == 'AUC' or codon == 'AUA':\n protein = protein + 'I'\n #Valine(V)\n elif codon == 'GUU' or codon == 'GUC' or codon == 'GUA' \\\n or codon == 'GUG':\n protein = protein + 'V'\n #Serine(S)\n elif codon == 'UCU' or codon == 'UCC' or codon == 'UCA' \\\n or codon == 'UCG' or codon == 'AGU' or codon == 'AGC':\n protein = protein + 'S'\n #Proline(P)\n elif codon == 'CCU' or codon == 'CCC' or codon == 'CCA' \\\n or codon == 'CCG':\n protein = protein + 'P'\n #Threonine(T)\n elif codon == 'ACU' or codon == 'ACC' or codon == 'ACA' \\\n or codon == 'ACG':\n protein = protein + 'T'\n #Alaline(A)\n elif codon == 'GCU' or codon == 'GCC' or codon == 'GCA' \\\n or codon == 'GCG':\n protein = protein + 'A'\n #Tyrosine(Y)\n elif codon == 'UAU' or codon == 'UAC':\n protein = protein + 'Y'\n #Histidine(H)\n elif codon == 'CAU' or codon == 'CAC':\n protein = protein + 'H'\n #Glutamine(Q)\n elif codon == 'CAA' or codon == 'CAG':\n protein = protein + 'Q'\n #Asparagine(N)\n elif codon == 'AAU' or codon == 'AAC':\n protein = protein + 'N'\n #Lysine(K)\n elif codon == 'AAA' or codon == 'AAG':\n protein = protein + 'K'\n #Aspartate(D)\n elif codon == 'GAU' or codon == 'GAC':\n protein = protein + 'D'\n #Glutamate(E)\n elif codon == 'GAA' or codon == 'GAG':\n protein = protein + 'E'\n #Cysteine(C)\n elif codon == 'UGU' or codon == 'UGC':\n protein = protein + 'C'\n #Tryptophan(W)\n elif codon == 'UGG':\n protein = protein + 'W'\n #Arginine(R)\n elif codon == 'CGU' or codon == 'CGC' or codon == 'CGA' \\\n or codon == 'CGG' or codon == 'AGA' or codon == 'AGG':\n protein = protein + 'R'\n #Glycine(G)\n elif codon == 'GGU' or codon == 'GGC' or codon == 'GGA' \\\n or codon == 'GGG':\n protein = protein + 'G'\n #Stop codons\n elif codon == 'UAA' or codon == 'UAG' or codon == 'UGA':\n break\n #Exception for if codon is not found\n else: \n raise Exception(\"No such codon found!\")\n \n #Increments counter to move to next codon\n counter = counter + 3\n \n return protein", "def mutated_sequence(self):\n for i in range(len(self.seq)):\n for alt in self.vocab:\n if i in self.fixed_positions or alt == self.seq[i]:\n continue\n yield SeqNode(self.seq[:i] + alt + self.seq[i + 1:],\n fixed_positions=self.fixed_positions + [i])", "def mutate_specific(self, muts, seq=None):\n\n if seq == None:\n seq = self.startseq[:20, :].copy()\n else:\n seq, _, _ = self.TFrecordsgenerator._seq2tensor(seq.strip('\\n'))\n # sequence is now in one-hot encoding\n aas = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n\n for mut in muts:\n seq[:, int(mut[:-1])-1] = np.zeros(20) # reset\n seq[aas.index(mut[-1:]), int(mut[:-1])-1] = 1\n\n return seq", "def mutated_seq(self, seq):\n rand = random.random()\n aa = -1\n while rand > 0:\n rand -= self.normalized_weights[aa + 1]\n aa += 1\n\n locus = -1\n mutatables = np.sum(seq[20, :])\n\n tomutate = random.randint(1, mutatables)\n\n while tomutate > 0: # go to random locus that is to mutate\n if seq[20, locus+1] == 1:\n tomutate -= 1\n locus += 1\n\n # check if the sequence can be mutated further away from the original:\n\n if np.sum(np.abs(seq[:20, :] - self.startseq[:20, :]))/2 >= self.maxmut:\n\n if np.argmax(seq[:20, locus]) != np.argmax(self.startseq[:20, locus]): # mutated, still original sequence\n seq[:20, locus] = np.zeros(20)\n seq[aa, locus] = 1 # now a random amino acid\n seq[21, locus] += 1\n\n else: # another locus has to be backmutated\n self.backmutating = True\n\n tobackmutate = random.randint(1, self.maxmut)\n locusback = -1\n while tobackmutate > 0: # go to random locus that is to mutate\n if np.argmax(seq[:20, locusback + 1]) != np.argmax(self.startseq[:20, locusback + 1]): # mutated\n tobackmutate -= 1\n locusback += 1\n\n seq[:20, locus] = np.zeros(20)\n seq[aa, locus] = 1 # now a random amino acid\n seq[21, locus] += 1\n\n seq[:20, locusback] = np.zeros(20)\n seq[np.argmax(self.startseq[:20, locusback]), locusback] = 1 # set one where the starting seq is one\n seq[21, locusback] += 1\n\n else: # there are mutations left, we can mutate it.\n seq[:20, locus] = np.zeros(20)\n seq[aa, locus] = 1 # now a random amino acid\n seq[21, locus] += 1\n\n return seq", "def mrna(self):\n seq = \"\".join(\n [\n self.genome.get_genome_sequence(self.chr, start, stop)\n for (start, stop) in self.exons\n ]\n )\n if self.strand == -1:\n seq = reverse_complement(seq)\n return seq", "def remove_duplicate_seqs(alignment):\n\tnewseqs = list()\n\treturnseqs = list()\n\tfor record in alignment:\n\t\tsequence = record.seq\n\t\tif sequence not in newseqs:\n\t\t\tnewseqs.append(record.seq)\n\t\t\t# record.seq = Seq(str(record.seq).replace('N','-'), record.seq.alphabet)\n\t\t\treturnseqs.append(record)\n\treturn MultipleSeqAlignment(returnseqs)", "def generate_all_deletion_mutants(self, sequence):\n ancestor_sequence = list(sequence)\n all_deletion_mutants = []\n \n #deletions\n for i in range(len(sequence)):\n new_seq = list(ancestor_sequence)\n new_seq.pop(i)\n all_deletion_mutants.append(''.join(new_seq))\n \n return all_deletion_mutants", "def dinucleotide(sequence):\n\tfrog = []\n\n\tfor i in range(0,(len(sequence)-1)):\n\t\tbp = sequence[i]\n\t\tbp_next = sequence[i+1]\n\t\tbp = bp.capitalize()\n\t\tbp_next = bp_next.capitalize()\n\n\t\tif bp == 'A':\n\t\t\tif bp_next == 'A':\n\t\t\t\tfrog.append([-1,-1,-1,-1])\n\t\t\telif bp_next == 'C':\n\t\t\t\tfrog.append([-1,-1,-1,1])\n\t\t\telif bp_next == 'G':\n\t\t\t\tfrog.append([-1,-1,1,-1])\n\t\t\telif bp_next == 'T':\n\t\t\t\tfrog.append([-1,-1,1,1])\n\t\telif bp == 'C':\n\t\t\tif bp_next == 'A':\n\t\t\t\tfrog.append([-1,1,-1,-1])\n\t\t\telif bp_next == 'C':\n\t\t\t\tfrog.append([-1,1,-1,1])\n\t\t\telif bp_next == 'G':\n\t\t\t\tfrog.append([-1,1,1,-1])\n\t\t\telif bp_next == 'T':\n\t\t\t\tfrog.append([-1,1,1,1])\n\t\telif bp == 'G':\n\t\t\tif bp_next == 'A':\n\t\t\t\tfrog.append([1,-1,-1,-1])\n\t\t\telif bp_next == 'C':\n\t\t\t\tfrog.append([1,-1,-1,1])\n\t\t\telif bp_next == 'G':\n\t\t\t\tfrog.append([1,-1,1,-1])\n\t\t\telif bp_next == 'T':\n\t\t\t\tfrog.append([1,-1,1,1])\n\t\telif bp == 'T':\n\t\t\tif bp_next == 'A':\n\t\t\t\tfrog.append([1,1,-1,-1])\n\t\t\telif bp_next == 'C':\n\t\t\t\tfrog.append([1,1,-1,1])\n\t\t\telif bp_next == 'G':\n\t\t\t\tfrog.append([1,1,1,-1])\n\t\t\telif bp_next == 'T':\n\t\t\t\tfrog.append([1,1,1,1])\n\tfrog = np.array(frog).flatten()\n\n\treturn frog", "def test_get_sequence_str(self):\n my_seq = \"TCTAGA\" + 50 * \"A\" + \"CCGCGG\" + 30 * \"T\"\n genome = {\"chrA\": my_seq}\n\n my_revcomp = str(SeqRecord(Seq(genome[\"chrA\"])).reverse_complement().seq)\n\n iv1p = GenomicSegment(\"chrA\", 0, 6, \"+\")\n iv2p = GenomicSegment(\"chrA\", 56, 62, \"+\")\n iv3p = GenomicSegment(\"chrA\", 0, 92, \"+\")\n\n iv1m = GenomicSegment(\"chrA\", 0, 6, \"-\")\n iv2m = GenomicSegment(\"chrA\", 56, 62, \"-\")\n iv3m = GenomicSegment(\"chrA\", 0, 92, \"-\")\n\n ivc1p = self.test_class(iv1p, iv2p, ID=\"ivc1p\")\n ivc1m = self.test_class(iv1m, iv2m, ID=\"ivc1m\")\n\n self.assertEquals(ivc1p.get_sequence(genome), \"TCTAGACCGCGG\")\n self.assertEquals(ivc1p.get_fasta(genome), \">ivc1p\\nTCTAGACCGCGG\\n\")\n\n self.assertEquals(ivc1m.get_sequence(genome), \"CCGCGGTCTAGA\")\n self.assertEquals(ivc1m.get_fasta(genome), \">ivc1m\\nCCGCGGTCTAGA\\n\")\n\n ivc2p = self.test_class(iv3p, ID=\"ivc2p\")\n ivc2m = self.test_class(iv3m, ID=\"ivc2m\")\n\n self.assertEquals(ivc2p.get_sequence(genome), my_seq)\n self.assertEquals(ivc2p.get_fasta(genome), \">ivc2p\\n%s\\n\" % my_seq)\n\n self.assertEquals(ivc2m.get_sequence(genome), my_revcomp)\n self.assertEquals(ivc2m.get_fasta(genome), \">ivc2m\\n%s\\n\" % my_revcomp)", "def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4] == ch:\n fasta += AA_code(atom[3])\n else:\n seq.append(fasta)\n ch = atom[4]\n fasta = AA_code(atom[3])\n seq.append(fasta)\n return seq", "def encode_DNA(seq):\n\tseq2bin_dict = {'A':[0,0], 'C':[0,1], 'G':[1,0], 'T':[1,1]}\n\treturn np.array(sum([seq2bin_dict.get(nuc) for nuc in seq], []))", "def to_mrna(seq):\n start_codon = \"AUG\"\n stop = [\"UAG\", \"UGA\", \"UAA\"]\n start_positions = []\n final_mrnas = []\n i = 0\n while i < len(seq) - 2:\n if seq[i:i+3] == start_codon: # At start codon\n start_positions.append(i)\n i += 3\n\n for pos in start_positions:\n mrna = \"\"\n i = pos\n is_orf = True\n while i < (len(seq)-2) and is_orf:\n if seq[i:i+3] in stop: # Stop codon reached\n is_orf = False\n final_mrnas.append(mrna)\n else:\n mrna += seq[i:i+3]\n i += 3\n\n return final_mrnas", "def mutagenesis(protein,dna,sequence,designed_sequence,PDBname):\n\t\n\tn_of_substitutions,totals,total_atm = 0,-1,0\n\tdesigned_dna = []\n\t\n\tfor s in range(0,len(sequence)):\n\t\tconserved = False\n\t\tnt1 = sequence[s:s+1]\n\t\tnt2 = designed_sequence[s:s+1]\n\t\tif(nt1 == '/'): continue \n\t\ttotals += 1\n\t\tif(nt1 == nt2): conserved = True\n\t\telse: n_of_substitutions += 1\n\t\t\n\t\tnt = ''\n\t\tfor atomo in dna[totals].split(\"\\n\"):\n\t\t\tif(atomo == ''): break\n\t\t\tif(atomo[15:16] == \"'\" or atomo[12:15] == ' P ' or atomo[12:15] == ' OP' \\\n\t\t\tor conserved):\n\t\t\t\tnt += atomo + \"\\n\"\n\t\tif(conserved == False): \n\t\t\tnt += base_mutada(dna[totals],nt2)\t\n\t\t\n\t\t(renum_nt,total_atm) = renumera_atomos(nt,total_atm)\n\t\tdesigned_dna.append(renum_nt)\n\n\tpdbfile = open(PDBname, 'w')\n\tprint(\"HEADER redesigned protein-DNA complex\\n\", file=pdbfile)\n\tprint(\"REMARK original sequence : %s\\n\" % (sequence), file=pdbfile)\n\tprint(\"REMARK designed sequence : %s\\n\" % (designed_sequence), file=pdbfile)\n\tfor r in protein: print(\"%s\" % (r), file=pdbfile)\n\tprint(\"TER\\n\", file=pdbfile)\n\tfor r in designed_dna: print(\"%s\" % (r), file=pdbfile)\n\tprint(\"TER\\n\", file=pdbfile)\n\tpdbfile.close()\t\n\t\n\treturn n_of_substitutions", "def sequence():\n return project().molecule_list()[0].sequence.one_letter_codes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of namedtuple with some DNA properties seqs is a list[Bio.Seq.Seq or str] representing DNA sequences
def dna_properties_batch(seqs): seqs = [str(seq) for seq in seqs] gcs = [GC(seq) for seq in seqs] Tms = melting_temp(seqs) ss_dGs = hybrid_ss_min(seqs) self_hyb_dGs = [r[0] for r in hybrid_min(seqs, seqs)] return [DNAProp(*tup) for tup in zip(seqs, gcs, Tms, ss_dGs, self_hyb_dGs)]
[ "def createDnaObjects(self):\n ADNs = list()\n for sequance in self.__read_FASTA_sequences():\n # assignment title and sequance\n ADNs.append(a.adn(sequance[1], sequance[0]))\n return ADNs", "def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4] == ch:\n fasta += AA_code(atom[3])\n else:\n seq.append(fasta)\n ch = atom[4]\n fasta = AA_code(atom[3])\n seq.append(fasta)\n return seq", "def fastaToSeqs(fasta):\n headers = []\n sequences = []\n\n skipped = False\n for i in range(len(fasta)):\n if skipped:\n skipped = False\n continue\n if fasta[i] == '' or ('pub_gene_id' not in fasta[i] and i % 2 == 0):\n skipped = True\n continue\n if i % 2 == 0:\n headers.append(fasta[i])\n else:\n sequences.append(fasta[i])\n\n print headers, sequences\n\n speciesIDs = [line.split(\":\")[0][1:] for line in headers]\n names = [line.split('\"pub_gene_id\":')[1].split(',')[0].strip('\"') for line in headers]\n names = [thing.split(';')[1] if ';' in thing else thing for thing in names]\n\n return (names, sequences, speciesIDs)", "def _make_mapping_from_seq(self):\n mapping = []\n for residue in self.seq:\n mapping.append((residue, ATOM_MAP_14[residue]))\n return mapping", "def getSequence(sequencePath):\n with open(sequencePath,'r') as SEQPATH:\n lines=SEQPATH.readlines()\n SEQPATH.closed\n rna=[]\n names=[]\n i=0\n while i<len(lines):\n if lines[i][0]=='>':\n names.append(lines[i].strip().replace('.','-'))\n rna.append(\"\")\n line=len(names)-1\n else:\n rna[line]+=lines[i].strip()\n i+=1\n \n if len(rna)!=len(names):\n print\"error:fasta file is not good:\", len(names),\" rna but \", len(rna),\" sequences.\"\n sys.exit(1)\n return names,rna", "def get_seq_list(seq_arg):\n\tpep_sequences = []\n\tfor inp in seq_arg:\n\t\tif '.' in inp:\n\t\t\t# If input is a file\n\t\t\tlis = readfile(inp) \n\t\t\tif len(lis) == 1:\n\t\t\t\t# If all sequences are listed horizontally on one line\n\t\t\t\t# rather than one per line, rearrange\n\t\t\t\tlis = lis[0].split()\n\n\t\t\tfor i in lis:\n\t\t\t\tpep_sequences.append(i.strip())\n\n\t\telse:\n\t\t\t# Sequence was typed directly into the argument\n\t\t\tpep_sequences.append(inp.strip())\n\n\t\tfor n, s in enumerate(pep_sequences):\n\t\t\tif len(s) == 5: # Hard code for HCV\n\t\t\t\tpep_sequences[n] = 'A' + s + 'CSMHL'\n\t\t\telif len(s) == 6: # Hard code for HCV\n\t\t\t\tpep_sequences[n] = 'A' + s + 'SMHL'\n\t\t\telse:\n\t\t\t\tassert len(s) == 11\n\n\treturn pep_sequences", "def getSequences():\n seqObjs = []\n sg = getShotgunHandle()\n\n sequences = sg.find(\n \"Sequence\",\n [\n [\n \"project.Project.name\",\n \"is\",\n CONFIG_DATA[\"shotgun\"][\"settings\"][\"project_name\"],\n ]\n ],\n [\"code\"],\n )\n for sequence in sequences:\n seqObjs.append(vfxEntities.Sequence(sequence, sg))\n\n return seqObjs", "def generate_list_of_pose_objs(sequence):\n\n seq_list = []\n for english_name in sequence:\n pose_obj = get_pose_by_name_eng(english_name)\n seq_list.append(pose_obj)\n\n return seq_list", "def get_seq_names(self):\r\n return self.store.root.__members__", "def attrsToList(self, attrs):\n return [g.Bunch(name=name, val=attrs.getValue(name))\n for name in attrs.getNames()]", "def outcome_seq(self):\n seq = []\n for num, ch in enumerate(self.chains.keys()):\t\t\t\t#('A', 'B')\n resi_list = self.residues[num]\n s = list(self.pdb_seq[self.pdb_chains[ch][2]])\n# print(\"struct: \", self.seq[self.chains[ch][2]])##############################\n# print(\"seq : \", \"\".join(s))###########################\n# print(len(self.seq[self.chains[ch][2]]), len(s), len(resi_list))#########################\n if len(s) != len(self.seq[self.chains[ch][2]]):\n for aa in range(0, len(s)):\n if s[aa] != resi_list[aa][2]:\n print(\"ERROR: seq_from_struct + missing_resi differ from seq_from_header at position %s\" %aa)\n if resi_list[aa][4] == 'm':\n s[aa] = '-'\n seq.append(\"\".join(s))\n# print(\"out_s : \", seq[0])#######################################\n return seq", "def from_seq(seq: tp.Sequence) -> List:\n return Nil() if len(seq) == 0 else Cons(car=seq[0], cdr=from_seq(seq[1:]))", "def seq_list(self):\n if not self.__seq_list:\n return None\n\n if self.combine_seq:\n # wrap in list, since accessor wants a list\n return [self.__seq_list[-1]]\n return self.__seq_list", "def sequence_items(self):\n seq_css = 'ol#sequence-list>li>a>p'\n return self.q(css=seq_css).map(self._clean_seq_titles).results", "def __init__(self, sequences, name=\"zip\", fields=[]):\n # shall be changed to this:\n # def __init__(self, seqs, construct=tuple, with_context=False):\n # *Construct* is a callable which accepts a\n if not sequences:\n raise exceptions.LenaTypeError(\n \"at least one sequence must be given\"\n )\n\n self._sequences = []\n seq_types_list = []\n\n for sequence in sequences:\n try:\n seq, seq_type = split._get_seq_with_type(sequence)\n except exceptions.LenaTypeError:\n raise exceptions.LenaTypeError(\n \"unknown argument type. Must be one of \"\n \"FillComputeSeq, FillRequestSeq or Source, \"\n \"{} provided\".format(sequence)\n )\n self._sequences.append(seq)\n seq_types_list.append(seq_type)\n\n seq_types = set(seq_types_list)\n if len(seq_types) != 1:\n raise exceptions.LenaTypeError(\n \"only one sequence type is allowed\"\n )\n\n seq_type = seq_types.pop()\n if seq_type == \"fill_compute\":\n self.fill = self._fill\n self.compute = self._compute\n elif seq_type == \"fill_request\":\n self.fill = self._fill\n self.request = self._request\n self.reset = self._reset\n else:\n raise exceptions.LenaNotImplementedError\n\n self._name = name\n if fields:\n if len(fields) != len(sequences) and not isinstance(fields, str):\n raise exceptions.LenaTypeError(\n \"fields, if provided, must have same length as sequences, \"\n \"{} and {} given\".format(fields, sequences)\n )\n self._fields = fields\n self._namedtuple = collections.namedtuple(name, fields)\n # needed to be able to pickle and unpickle these namedtuples\n globals()[name] = self._namedtuple\n else:\n self._namedtuple = None", "def __getReferenceSequenceDetails(self, dataContainer):\n logger.debug(\"Starting with %r\", dataContainer.getName())\n self.__addStructRefSeqEntityIds(dataContainer)\n #\n # To exclude self references -\n excludeRefDbList = [\"PDB\"]\n rD = {\"seqEntityAlignmentD\": {}, \"seqEntityRefDbD\": {}, \"entityPolymerSeqenceD\": {}}\n try:\n # Exit if source categories are missing\n if not (dataContainer.exists(\"struct_ref_seq\") and dataContainer.exists(\"struct_ref\") and dataContainer.exists(\"entity_poly\")):\n return rD\n # ------- --------- ------- --------- ------- --------- ------- --------- ------- ---------\n entityPolymerSequenceD = {}\n if dataContainer.exists(\"entity_poly\"):\n epObj = dataContainer.getObj(\"entity_poly\")\n for ii in range(epObj.getRowCount()):\n entityId = epObj.getValue(\"entity_id\", ii)\n pType = epObj.getValue(\"type\", ii)\n pTypeFiltered = self.filterEntityPolyType(pType)\n if epObj.hasAttribute(\"pdbx_seq_one_letter_code_can\"):\n sampleSeq = self.__stripWhiteSpace(epObj.getValue(\"pdbx_seq_one_letter_code_can\", ii))\n if sampleSeq and sampleSeq not in [\"?\", \".\"]:\n entityPolymerSequenceD[entityId] = {\"sequence\": sampleSeq, \"polymerType\": pType, \"polymerTypeFiltered\": pTypeFiltered}\n #\n srObj = None\n if dataContainer.exists(\"struct_ref\"):\n srObj = dataContainer.getObj(\"struct_ref\")\n #\n srsObj = None\n if dataContainer.exists(\"struct_ref_seq\"):\n srsObj = dataContainer.getObj(\"struct_ref_seq\")\n\n # srsdObj = None\n # if dataContainer.exists(\"struct_ref_seq_dif\"):\n # srsdObj = dataContainer.getObj(\"struct_ref_seq_dif\")\n\n polymerEntityTypeD = self.getPolymerEntityFilteredTypes(dataContainer)\n # Map alignId -> entityId\n seqEntityRefDbD = {}\n tupSeqEntityRefDbD = {}\n alignEntityMapD = {}\n # entity alignment details\n seqEntityAlignmentD = {}\n for ii in range(srObj.getRowCount()):\n dbAccessionAlignS = set()\n entityId = srObj.getValue(\"entity_id\", ii)\n refId = srObj.getValue(\"id\", ii)\n dbName = str(srObj.getValue(\"db_name\", ii)).strip().upper()\n #\n if dbName in excludeRefDbList:\n continue\n #\n if entityId not in polymerEntityTypeD:\n logger.warning(\"%s skipping non-polymer entity %r in sequence reference\", dataContainer.getName(), entityId)\n continue\n\n if dbName in [\"UNP\"] and polymerEntityTypeD[entityId] != \"Protein\":\n logger.warning(\"%s skipping inconsistent reference assignment for %s polymer type %s\", dataContainer.getName(), dbName, polymerEntityTypeD[entityId])\n continue\n #\n tS = srObj.getValue(\"pdbx_db_accession\", ii)\n dbAccession = tS if tS and tS not in [\".\", \"?\"] else None\n #\n tS = srObj.getValue(\"pdbx_db_isoform\", ii)\n dbIsoform = tS if tS and tS not in [\".\", \"?\"] else None\n # Look for a stray isoform\n if dbName in [\"UNP\"] and dbAccession and \"-\" in dbAccession:\n if not dbIsoform:\n dbIsoform = dbAccession\n ff = dbAccession.split(\"-\")\n dbAccession = ff[0]\n\n #\n if dbIsoform and dbAccession not in dbIsoform:\n logger.warning(\"entryId %r entityId %r accession %r isoform %r inconsistency\", dataContainer.getName(), entityId, dbAccession, dbIsoform)\n # ---\n # Get indices for the target refId.\n iRowL = srsObj.selectIndices(refId, \"ref_id\")\n logger.debug(\"entryId %r entityId %r refId %r rowList %r\", dataContainer.getName(), entityId, refId, iRowL)\n\n for iRow in iRowL:\n try:\n entitySeqIdBeg = srsObj.getValue(\"seq_align_beg\", iRow)\n entitySeqIdEnd = srsObj.getValue(\"seq_align_end\", iRow)\n entityAlignLength = int(entitySeqIdEnd) - int(entitySeqIdBeg) + 1\n except Exception:\n entityAlignLength = 0\n #\n if entityAlignLength <= 0:\n logger.warning(\"%s entity %r skipping bad alignment seqBeg %r seqEnd %r\", dataContainer.getName(), entityId, entitySeqIdBeg, entitySeqIdEnd)\n continue\n\n alignId = srsObj.getValue(\"align_id\", iRow)\n alignEntityMapD[alignId] = entityId\n #\n authAsymId = srsObj.getValue(\"pdbx_strand_id\", iRow)\n dbSeqIdBeg = srsObj.getValue(\"db_align_beg\", iRow)\n dbSeqIdEnd = srsObj.getValue(\"db_align_end\", iRow)\n # ----\n try:\n idbSeqIdBeg = int(dbSeqIdBeg)\n if idbSeqIdBeg == 0:\n idbSeqIdBeg = 1\n dbSeqIdBeg = str(idbSeqIdBeg)\n idbSeqIdEnd = int(dbSeqIdEnd)\n idbSeqIdEnd += 1\n dbSeqIdEnd = str(idbSeqIdEnd)\n logger.debug(\"%s offset reference sequence database position\", dataContainer.getName())\n except Exception:\n pass\n # ----\n #\n tS = srsObj.getValue(\"pdbx_db_accession\", iRow)\n # use the parent pdbx_accession\n dbAccessionAlign = tS if tS and tS not in [\".\", \"?\"] else dbAccession\n # Look for a stray isoform\n if dbName in [\"UNP\"] and dbAccessionAlign and \"-\" in dbAccessionAlign:\n if not dbIsoform:\n dbIsoform = dbAccessionAlign\n ff = dbAccessionAlign.split(\"-\")\n dbAccessionAlign = ff[0]\n\n dbAccessionAlignS.add(dbAccessionAlign)\n #\n #\n seqEntityAlignmentD.setdefault(entityId, []).append(\n SeqAlign(\n \"PDB\",\n **{\n \"authAsymId\": authAsymId,\n \"entitySeqIdBeg\": entitySeqIdBeg,\n \"entitySeqIdEnd\": entitySeqIdEnd,\n \"dbSeqIdBeg\": dbSeqIdBeg,\n \"dbSeqIdEnd\": dbSeqIdEnd,\n \"dbName\": dbName,\n \"dbAccession\": dbAccessionAlign,\n \"dbIsoform\": dbIsoform,\n \"entityAlignLength\": entityAlignLength,\n },\n )\n )\n # Check consistency\n try:\n if len(dbAccessionAlignS) == 1 and list(dbAccessionAlignS)[0] == dbAccession:\n tupSeqEntityRefDbD.setdefault(entityId, []).append((dbName, dbAccession, dbIsoform))\n elif len(dbAccessionAlignS) == 1 and list(dbAccessionAlignS)[0]:\n tupSeqEntityRefDbD.setdefault(entityId, []).append((dbName, list(dbAccessionAlignS)[0], None))\n elif dbAccession:\n tupSeqEntityRefDbD.setdefault(entityId, []).append((dbName, dbAccession, dbIsoform))\n else:\n logger.warning(\"%s entityId %r inconsistent reference sequence %r %r\", dataContainer.getName(), entityId, dbAccession, dbAccessionAlignS)\n except Exception:\n logger.exception(\"%s entityId %r inconsistent reference sequence %r %r\", dataContainer.getName(), entityId, dbAccession, dbAccessionAlignS)\n\n # -----\n dbMapD = self.getDatabaseNameMap()\n for entityId, tupL in tupSeqEntityRefDbD.items():\n uTupL = list(OrderedDict({tup: True for tup in tupL}).keys())\n for tup in uTupL:\n tS = dbMapD[tup[0]] if tup[0] in dbMapD else tup[0]\n if tup[1]:\n seqEntityRefDbD.setdefault(entityId, []).append({\"dbName\": tS, \"dbAccession\": tup[1], \"dbIsoform\": tup[2]})\n else:\n logger.warning(\"%s %s skipping incomplete sequence reference assignment %r\", dataContainer.getName(), entityId, tup)\n\n return {\n \"seqEntityAlignmentD\": seqEntityAlignmentD,\n \"seqEntityRefDbD\": seqEntityRefDbD,\n \"entityPolymerSequenceD\": entityPolymerSequenceD,\n }\n except Exception as e:\n logger.exception(\"%s failing with %s\", dataContainer.getName(), str(e))\n return rD", "def get_sequence_list(self):\r\n return self.sequence_list", "def get_acs_for_protein_seq(self, seq):\n md5 = seq_md5(seq)\n return [r['ac'] for r in self._fetchall(self._queries['acs_for_protein_md5'], [md5])] + ['MD5_' + md5]", "def make_token_seq(seq):\n ret = []\n for name in seq: ret.append(make_token(name))\n return ret", "def iteritems(self):\n for seq in self:\n yield seq.identifier, seq" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a namedtuple with some DNA properties seq is a Bio.Seq.Seq or str representing DNA sequence
def dna_properties(seq): return dna_properties_batch([seq])[0]
[ "def __init__(self, seq, extraNt=''):\n\n self.seq = DNAseq(str(seq))\n self.extraNt = extraNt", "def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4] == ch:\n fasta += AA_code(atom[3])\n else:\n seq.append(fasta)\n ch = atom[4]\n fasta = AA_code(atom[3])\n seq.append(fasta)\n return seq", "def generate_cdr_sequence_record(row):\n indices = json.loads(row.cdr_bp_id_str)\n start_ind = indices[0]\n end_ind = indices[-1]\n name = \"_\".join(map(str, [row.cdr_pdb_id,\n \"@\",\n row.cdr_resnames,\n \"@\",\n start_ind,\n end_ind]))\n record = SeqRecord(Seq(row.cdr_resnames, IUPAC.protein),\n id=name,\n name=name,\n description=name)\n return record", "def comp_seq(self):\n\n\t\tcomp_base = str.maketrans(\"atgc\",\"tacg\")\n\t\tcomp_dna = self.dna_seq.translate(comp_base)\n\t\tcomp_dna = Sequence(comp_dna)\n\n\t\treturn(comp_dna)", "def addDNAseq(self, protein_name, DNAseq):\n if self.isknownProtein(protein_name):\n # Do we already have a DNA sequence?\n self.data[protein_name].DNAseq = DNAseq\n return True\n return None", "def translate(self, **kwargs) -> \"SeqLike\":\n sc = deepcopy(self)\n if sc._nt_record is None:\n raise ValueError(\n \"Oops! It looks like you're trying to translate a SeqLike object \"\n \"that doesn't have a nucleotide record set. \"\n \"Unfortunately this would be semantically incorrect. \"\n \"Please ensure that your SeqLike has a `._nt_record` SeqRecord \"\n \"before calling on `.translate()`.\"\n )\n\n if len(sc) % 3 != 0:\n raise TypeError(\n \"Oh no! It looks like you're trying to translate a nucleotide sequence \"\n \"whose length is not a multiple of 3. \"\n \"As a safeguard, SeqLike objects do not allow this to happen. \"\n )\n sc._aa_record = record_from(sc._nt_record.translate(gap=gap_letter, **kwargs))\n # neutralize \"protein\" `molecule_type` annotation added by BioPython's `SeqRecord.translate()`\n sc._aa_record.annotations.pop(\"molecule_type\")\n return sc.aa()", "def MakeSeq( val ):\n return val if IsSeq( val ) else ( val, )", "def __init__(self, sequences, name=\"zip\", fields=[]):\n # shall be changed to this:\n # def __init__(self, seqs, construct=tuple, with_context=False):\n # *Construct* is a callable which accepts a\n if not sequences:\n raise exceptions.LenaTypeError(\n \"at least one sequence must be given\"\n )\n\n self._sequences = []\n seq_types_list = []\n\n for sequence in sequences:\n try:\n seq, seq_type = split._get_seq_with_type(sequence)\n except exceptions.LenaTypeError:\n raise exceptions.LenaTypeError(\n \"unknown argument type. Must be one of \"\n \"FillComputeSeq, FillRequestSeq or Source, \"\n \"{} provided\".format(sequence)\n )\n self._sequences.append(seq)\n seq_types_list.append(seq_type)\n\n seq_types = set(seq_types_list)\n if len(seq_types) != 1:\n raise exceptions.LenaTypeError(\n \"only one sequence type is allowed\"\n )\n\n seq_type = seq_types.pop()\n if seq_type == \"fill_compute\":\n self.fill = self._fill\n self.compute = self._compute\n elif seq_type == \"fill_request\":\n self.fill = self._fill\n self.request = self._request\n self.reset = self._reset\n else:\n raise exceptions.LenaNotImplementedError\n\n self._name = name\n if fields:\n if len(fields) != len(sequences) and not isinstance(fields, str):\n raise exceptions.LenaTypeError(\n \"fields, if provided, must have same length as sequences, \"\n \"{} and {} given\".format(fields, sequences)\n )\n self._fields = fields\n self._namedtuple = collections.namedtuple(name, fields)\n # needed to be able to pickle and unpickle these namedtuples\n globals()[name] = self._namedtuple\n else:\n self._namedtuple = None", "def getSequence(sequencePath):\n with open(sequencePath,'r') as SEQPATH:\n lines=SEQPATH.readlines()\n SEQPATH.closed\n rna=[]\n names=[]\n i=0\n while i<len(lines):\n if lines[i][0]=='>':\n names.append(lines[i].strip().replace('.','-'))\n rna.append(\"\")\n line=len(names)-1\n else:\n rna[line]+=lines[i].strip()\n i+=1\n \n if len(rna)!=len(names):\n print\"error:fasta file is not good:\", len(names),\" rna but \", len(rna),\" sequences.\"\n sys.exit(1)\n return names,rna", "def __init__(self, id, desc, seq):\n\n\t\tself.id = id\n\t\tself.desc = desc\n\t\tself.seq = seq\n\t\tif not self.seq or self.seq == '\\n':\n\t\t\traise FASTA_error('empty string is not a valid sequence')", "def dna_sequence(self, length: int = 10) -> str:\n return self.random.schoice('TCGA', length)", "def _get_seq_model(self, sequence_name):\n try:\n sequence = self.madx.sequences[sequence_name]\n except KeyError:\n raise RuntimeError(\"The sequence is not defined.\")\n try:\n beam = sequence.beam\n except RuntimeError:\n beam = {}\n try:\n range, twiss = self._get_twiss(sequence)\n except RuntimeError:\n range = (sequence_name+'$start', sequence_name+'$end')\n twiss = {}\n return {\n 'sequence': sequence_name,\n 'range': range,\n 'beam': self.utool.dict_add_unit(beam),\n 'twiss': self.utool.dict_add_unit(twiss),\n }", "def NameToInfo(sequence, strict=True):\n #adjust label\n label = sequence.Name\n try:\n gb, pos = label.split('/',1) #split genbank label and pos\n if not gb:\n gb = None\n if not pos:\n pos = None\n except: #unable to split, so string doesn't contain '/'\n if strict:\n raise RecordError, \"Failed to extract genbank id and positions\" +\\\n \" from label %s\"%label\n else:\n gb = None\n pos =None\n if pos:\n try:\n start, end = pos.split('-',1) #split start and end pos\n except:\n if strict:\n raise RecordError,\\\n \"Failed to extract genbank id and positions from label %s\"\\\n %label\n else:\n start = None\n end = None\n else:\n start = None\n end = None\n if start:\n # adjust start position to do the correct thing in python\n # see comment in docstring\n start = int(start)-1\n if end:\n end = int(end)\n info = Info({'GenBank':gb,'Start':start,'End':end})\n return info", "def createDnaObjects(self):\n ADNs = list()\n for sequance in self.__read_FASTA_sequences():\n # assignment title and sequance\n ADNs.append(a.adn(sequance[1], sequance[0]))\n return ADNs", "def test_get_sequence_seqrecord(self):\n my_seq = \"TCTAGA\" + 50 * \"A\" + \"CCGCGG\" + 30 * \"T\"\n genome = {\"chrA\": SeqRecord(Seq(my_seq))}\n\n my_revcomp = str(genome[\"chrA\"].reverse_complement().seq)\n\n iv1p = GenomicSegment(\"chrA\", 0, 6, \"+\")\n iv2p = GenomicSegment(\"chrA\", 56, 62, \"+\")\n iv3p = GenomicSegment(\"chrA\", 0, 92, \"+\")\n\n iv1m = GenomicSegment(\"chrA\", 0, 6, \"-\")\n iv2m = GenomicSegment(\"chrA\", 56, 62, \"-\")\n iv3m = GenomicSegment(\"chrA\", 0, 92, \"-\")\n\n ivc1p = self.test_class(iv1p, iv2p, ID=\"ivc1p\")\n ivc1m = self.test_class(iv1m, iv2m, ID=\"ivc1m\")\n\n self.assertEquals(ivc1p.get_sequence(genome), \"TCTAGACCGCGG\")\n self.assertEquals(ivc1p.get_fasta(genome), \">ivc1p\\nTCTAGACCGCGG\\n\")\n\n self.assertEquals(ivc1m.get_sequence(genome), \"CCGCGGTCTAGA\")\n self.assertEquals(ivc1m.get_fasta(genome), \">ivc1m\\nCCGCGGTCTAGA\\n\")\n\n ivc2p = self.test_class(iv3p, ID=\"ivc2p\")\n ivc2m = self.test_class(iv3m, ID=\"ivc2m\")\n\n self.assertEquals(ivc2p.get_sequence(genome), my_seq)\n self.assertEquals(ivc2p.get_fasta(genome), \">ivc2p\\n%s\\n\" % my_seq)\n\n self.assertEquals(ivc2m.get_sequence(genome), my_revcomp)\n self.assertEquals(ivc2m.get_fasta(genome), \">ivc2m\\n%s\\n\" % my_revcomp)", "def get_sequence(seq_id, anno_db, genome, seq_type='CDS', exon_split='', flank_len=0):\n def get_sequence_from_genome_by_anno_db(df, genome):\n tmp_seq = genome[df['seq_name']]\n tmp_seq_start = df['seq_start']-1\n tmp_seq_end = df['seq_end']\n df['seq'] = tmp_seq[tmp_seq_start:tmp_seq_end]\n return df\n gene_db = anno_db.query(f'seq_type == \"{seq_type}\" and transcript_id == \"{seq_id}\"')\n gene_db = gene_db.sort_values(by='seq_start')\n gene_db = gene_db.apply(get_sequence_from_genome_by_anno_db, axis=1, genome=genome)\n seq_name = gene_db['seq_name'].unique()\n seq_strand = gene_db['seq_strand'].unique()\n if len(seq_name) > 1 or len(seq_strand) > 1:\n raise TypeError('Different seq_name or seq_strand for the same gene!')\n else:\n seq_name = seq_name[0]\n seq_strand = seq_strand[0]\n if isinstance(exon_split, str):\n cds_seq = exon_split.join(gene_db['seq'])\n else:\n raise TypeError('Input exon_split should be a string but %s' % type(exon_split))\n if flank_len:\n gene_start = gene_db.iloc[0]['seq_start']\n gene_end = gene_db.iloc[-1]['seq_end']\n tmp_seq = genome[seq_name]\n flank5 = tmp_seq[(gene_start-flank_len-1):(gene_start-1)].lower()\n flank3 = tmp_seq[gene_end:(gene_end+flank_len)].lower()\n else:\n flank5 = ''\n flank3 = ''\n cds_seq = flank5 + cds_seq + flank3\n if np.all(gene_db['seq_strand'] == '-'):\n cds_seq = reverse_seq(cds_seq)\n elif np.all(gene_db['seq_strand'] == '+'):\n pass\n else:\n raise ValueError('Different strand in the elements of %s' % seq_id)\n return cds_seq", "def seq2chain(*args):\n if len(args) == 1:\n name = 'seq'\n seq = args[0]\n elif len(args) == 2:\n name = args[0]\n seq = args[1]\n else:\n raise ValueError(\"Give either name,seq or just seq\")\n \n return ImmuneChain(seq=Seq(seq,generic_dna),id=name)", "def ChangedDnaSequence(data, seq_constructor=Dna):\n return seq_constructor(str(data).replace('.','-'))", "def mkfastadict(self,fastapath): \n\t\tseq_dict={}\n\t\n\t\tfasta_sequences = SeqIO.parse(open(fastapath),'fasta')\n\t\n\t\tfor fasta in fasta_sequences:\n\t\t\tseq_dict[fasta.id]=fasta.seq\n\t\t\n\t\treturn(seq_dict)", "def _get_name( self, seq_def ):\n p = re.compile( '\\((.*)\\)' )\n parts = seq_def.split( '|' )\n if len( parts ) > 1:\n m = p.search( parts[2] )\n return m.group( 1 )\n else:\n return seq_def" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a tuple with some protein biochemical properties seq is a Bio.Seq.Seq or str representing protein sequence
def protein_properties(seq): pa = ProteinAnalysis(seq) aa_counts = pa.count_amino_acids() arom = pa.aromaticity() isoelec = pa.isoelectric_point() try: instability = pa.instability_index() except KeyError: instability = None try: gravy = pa.gravy() except KeyError: gravy = None return ProtProp(aa=str(seq), gravy=gravy, aromaticity=arom, isoelectric_point=isoelec, instability=instability, aa_counts=aa_counts)
[ "def readBpseq(bpseq_fn):\n content = open(bpseq_fn).readlines()\n seq = [-1] * len(content)\n struct = [-1] * len(content)\n for i, entry in enumerate(content):\n pos, base, pair = entry.strip().split()\n seq[i] = base\n p = int(pair)\n struct[i] = [1, p][p == 0]\n return \"\".join(seq), struct", "def is_protein(seq):\n return IS_PROTEIN.search(seq)", "def one_protein_generator(self):\n seq_title = ''\n seq_list = []\n with open(self._fasta_path, 'r') as fasta_handle:\n for _line in fasta_handle:\n if not _line:\n print('Blank line existed in fasta file')\n continue\n if _line.startswith('>'):\n if seq_title and seq_list:\n yield seq_title, ''.join(seq_list)\n seq_title = _line.strip('\\n')\n seq_list = []\n else:\n seq_list.append(_line.strip('\\n'))\n if seq_title and seq_list:\n yield seq_title, ''.join(seq_list)", "def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4] == ch:\n fasta += AA_code(atom[3])\n else:\n seq.append(fasta)\n ch = atom[4]\n fasta = AA_code(atom[3])\n seq.append(fasta)\n return seq", "def extract_info(seq_R):\n UMI = seq_R[-8:]\n isoform_type = classify_isoform(seq_R)\n barcode = seq_R[seq_R.find('ACATTCGTTAA') + len('ACATTCGTTAA'):seq_R.rfind('TACGTACT')]\n return [barcode, UMI, isoform_type]", "def get_protein_sequence(self, protein_name):\n if self.data[protein_name].has_key('aaseq'):\n aaseq=self.data[protein_name]['aaseq']\n else:\n aaseq=None\n return aaseq", "def outcome_seq(self):\n seq = []\n for num, ch in enumerate(self.chains.keys()):\t\t\t\t#('A', 'B')\n resi_list = self.residues[num]\n s = list(self.pdb_seq[self.pdb_chains[ch][2]])\n# print(\"struct: \", self.seq[self.chains[ch][2]])##############################\n# print(\"seq : \", \"\".join(s))###########################\n# print(len(self.seq[self.chains[ch][2]]), len(s), len(resi_list))#########################\n if len(s) != len(self.seq[self.chains[ch][2]]):\n for aa in range(0, len(s)):\n if s[aa] != resi_list[aa][2]:\n print(\"ERROR: seq_from_struct + missing_resi differ from seq_from_header at position %s\" %aa)\n if resi_list[aa][4] == 'm':\n s[aa] = '-'\n seq.append(\"\".join(s))\n# print(\"out_s : \", seq[0])#######################################\n return seq", "def Retrieve(self):\n try:\n if len(self.Sequence) == 0:\n self.Sequence = None\n self.Quality = None\n return self.Sequence, self.Quality\n except TypeError:\n return self.Sequence, self.Quality", "def test_get_sequence_str(self):\n my_seq = \"TCTAGA\" + 50 * \"A\" + \"CCGCGG\" + 30 * \"T\"\n genome = {\"chrA\": my_seq}\n\n my_revcomp = str(SeqRecord(Seq(genome[\"chrA\"])).reverse_complement().seq)\n\n iv1p = GenomicSegment(\"chrA\", 0, 6, \"+\")\n iv2p = GenomicSegment(\"chrA\", 56, 62, \"+\")\n iv3p = GenomicSegment(\"chrA\", 0, 92, \"+\")\n\n iv1m = GenomicSegment(\"chrA\", 0, 6, \"-\")\n iv2m = GenomicSegment(\"chrA\", 56, 62, \"-\")\n iv3m = GenomicSegment(\"chrA\", 0, 92, \"-\")\n\n ivc1p = self.test_class(iv1p, iv2p, ID=\"ivc1p\")\n ivc1m = self.test_class(iv1m, iv2m, ID=\"ivc1m\")\n\n self.assertEquals(ivc1p.get_sequence(genome), \"TCTAGACCGCGG\")\n self.assertEquals(ivc1p.get_fasta(genome), \">ivc1p\\nTCTAGACCGCGG\\n\")\n\n self.assertEquals(ivc1m.get_sequence(genome), \"CCGCGGTCTAGA\")\n self.assertEquals(ivc1m.get_fasta(genome), \">ivc1m\\nCCGCGGTCTAGA\\n\")\n\n ivc2p = self.test_class(iv3p, ID=\"ivc2p\")\n ivc2m = self.test_class(iv3m, ID=\"ivc2m\")\n\n self.assertEquals(ivc2p.get_sequence(genome), my_seq)\n self.assertEquals(ivc2p.get_fasta(genome), \">ivc2p\\n%s\\n\" % my_seq)\n\n self.assertEquals(ivc2m.get_sequence(genome), my_revcomp)\n self.assertEquals(ivc2m.get_fasta(genome), \">ivc2m\\n%s\\n\" % my_revcomp)", "def getSequence(sequencePath):\n with open(sequencePath,'r') as SEQPATH:\n lines=SEQPATH.readlines()\n SEQPATH.closed\n rna=[]\n names=[]\n i=0\n while i<len(lines):\n if lines[i][0]=='>':\n names.append(lines[i].strip().replace('.','-'))\n rna.append(\"\")\n line=len(names)-1\n else:\n rna[line]+=lines[i].strip()\n i+=1\n \n if len(rna)!=len(names):\n print\"error:fasta file is not good:\", len(names),\" rna but \", len(rna),\" sequences.\"\n sys.exit(1)\n return names,rna", "def MakeSeq( val ):\n return val if IsSeq( val ) else ( val, )", "def sequence():\n return project().molecule_list()[0].sequence.one_letter_codes", "def get_tupledesc(self):", "def _get_protein_name_for_genomic_position(self,genome_position):\n\t\tfor prot,coords in self.reference_protein_locations.items():\n\t\t\tif (genome_position >= coords[0]) and (genome_position <= coords[1]):\n\t\t\t\treturn prot\n\t\treturn None", "def get_seq_info(self, seq_name):\r\n\r\n info = {'has_features': False, 'has_potentials': False,\r\n 'n_feat_fun': None, 'n_obs': None}\r\n\r\n if self.has_features():\r\n info['has_features'] = True\r\n\r\n if self.has_potentials():\r\n info['has_potentials'] = True\r\n\r\n return info", "def create_skbio_protein_sequence(match_row):\n metadata = {'id': match_row['Protein_Accession'], 'description': ('(From ' + match_row['Sample_Name'] + ')')}\n return Protein(sequence=match_row['Sequence'], metadata=metadata)", "def _get_simple_sequence_2tuple(self):\n simple_sequence = [\n 1234, 1.234, '1234', datetime.date.today(), datetime.datetime.today(), self._get_test_safe_3tuple[1]\n ]\n result_simple_sequence = [str(v) if not hasattr(v, '__html__') else v.__html__() for v in simple_sequence]\n return simple_sequence, result_simple_sequence", "def comp_seq(self):\n\n\t\tcomp_base = str.maketrans(\"atgc\",\"tacg\")\n\t\tcomp_dna = self.dna_seq.translate(comp_base)\n\t\tcomp_dna = Sequence(comp_dna)\n\n\t\treturn(comp_dna)", "def getreadinfo(samfname):\n f = open(samfname)\n firstbases=[]\n seqs = []\n quals = []\n for line in f:\n if line[0] != '@':\n v = line.split()\n seq = v[9]\n seqlen = len(seq)\n noindelCIGARstr = str(seqlen) + \"M\"\n if v[5] == noindelCIGARstr: ## check to see if CIGAR string indicates no indels\n firstbases.append(int(v[3]))\n seqs.append(seq)\n quals.append(v[10])\n return firstbases,seqs,quals" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise ImportError with detailed error message if matplotlib is not installed. Functionality requiring matplotlib should call this helper and then lazily import.
def check_matplotlib_support(caller_name: str) -> None: try: import matplotlib # type: ignore except ImportError as e: raise ImportError( caller_name + " requires matplotlib. You can " "install matplotlib with `pip install matplotlib`." ) from e
[ "def import_matplotlib_pyplot(funcname=\"XU\"):\n try:\n from matplotlib import pyplot as plt\n\n # from .mpl_helper import SqrtAllowNegScale\n return True, plt\n except ImportError: # print(d['qvec'][m][ind['ind'][0]])\n print(\"%s: Warning: plot functionality not available\" % funcname)\n return False, None", "def _import_pyplot():\n try:\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import axes3d\n except Exception as e:\n raise ImportError('Failed to import `matplotlib.pyplot`')\n return plt", "def module_exists():\r\n try:\r\n import matplotlib.pyplot\r\n return True\r\n except ModuleNotFoundError:\r\n print('Module for graph not found. Please exit and install the matplotlib module (https://matplotlib.org/) '\r\n 'to view the graph.')", "def test_unfrozen_importlib(self):\n try:\n request = self.rf.get(\"/test_view/\")\n importlib.import_module(\"abc.def.invalid.name\")\n except Exception:\n exc_type, exc_value, tb = sys.exc_info()\n reporter = ExceptionReporter(request, exc_type, exc_value, tb)\n html = reporter.get_traceback_html()\n self.assertInHTML(\"<h1>ModuleNotFoundError at /test_view/</h1>\", html)", "def import_or_raise(library, error_msg=None, warning=False):\n try:\n return importlib.import_module(library)\n except ImportError:\n if error_msg is None:\n error_msg = \"\"\n msg = (f\"Missing optional dependency '{library}'. Please use pip to install {library}. {error_msg}\")\n if warning:\n warnings.warn(msg)\n else:\n raise ImportError(msg)\n except Exception as ex:\n msg = (f\"An exception occurred while trying to import `{library}`: {str(ex)}\")\n if warning:\n warnings.warn(msg)\n else:\n raise Exception(msg)", "def handleImportError(self, exception):\n first = exception.args[0]\n if first.find('No module named ') < 0:\n raise\n module = first[len('No module named '):]\n module = module.split('.')[0]\n\n if module in self._deps.keys():\n dep = self._deps[module]\n sys.stderr.write(\"Could not import python module '%s'\\n\" % module)\n sys.stderr.write('This module is part of %s.\\n' % dep.name)\n\n self.handleMissingDependency(dep)\n\n # how to confirm the python module got installed\n sys.stderr.write(\"\\n\")\n sys.stderr.write(\n 'You can confirm it is installed by starting Python and running:\\n')\n sys.stderr.write('import %s\\n' % module)\n\n return\n\n # re-raise if we didn't have it\n raise", "def _fix_matplotlib_backend() -> None:\n try:\n backend = matplotlib.get_backend()\n except Exception: # skipcq:PYL-W0703\n logger.error(\n \"Cannot retrieve Matplotlib backend, likely due to a compatibility \"\n \"issue with system dependencies. Please refer to the documentation: \"\n \"https://matplotlib.org/stable/tutorials/introductory/usage.html#backends\"\n )\n raise\n\n # At first, matplotlib will be initialized with default OS-specific\n # available backend\n if backend == \"TkAgg\":\n try:\n # on OSX sometimes the tkinter package is broken and can't be imported.\n # we'll try to import it and if it fails we will use a different backend\n import tkinter\n except (ImportError, ModuleNotFoundError):\n logger.debug(\"Setting matplotlib backend to 'agg'\")\n matplotlib.use(\"agg\")\n\n # if no backend is set by default, we'll try to set it up manually\n elif backend is None: # pragma: no cover\n try:\n # If the `tkinter` package is available, we can use the `TkAgg` backend\n import tkinter\n\n logger.debug(\"Setting matplotlib backend to 'TkAgg'\")\n matplotlib.use(\"TkAgg\")\n except (ImportError, ModuleNotFoundError):\n logger.debug(\"Setting matplotlib backend to 'agg'\")\n matplotlib.use(\"agg\")", "def test_import_function_missing_module():\n with pytest.raises(ModuleNotFoundError):\n some_function = import_function(\"should_not_exist\", \"some_function\")\n some_function()", "def has_matplotlib(self):\r\n if self._found_matplotlib is None:\r\n self.__check_matplotlib()\r\n return self._found_matplotlib", "def _needs_matplotlib_backend(func: FuncType) -> FuncType:\n\n @wraps(func)\n def inner(*args: Any, **kwargs: Any) -> ReturnType: # type: ignore\n \"\"\"Replacement function that fixes matplotlib backend.\"\"\"\n global _MATPLOTLIB_BACKEND_FIXED\n if not _MATPLOTLIB_BACKEND_FIXED:\n _fix_matplotlib_backend()\n _MATPLOTLIB_BACKEND_FIXED = True\n return func(*args, **kwargs)\n\n return inner", "def uti_plot_init(backend=DEFAULT_BACKEND, fname_format=None):\n global _backend\n if backend is not None:\n try:\n import tests.utilities.uti_plot_matplotlib as uti_plot_matplotlib\n _backend = uti_plot_matplotlib.Backend(backend, fname_format)\n return\n except:\n traceback.print_exc()\n print(backend + ': unable to import specified backend; no plots')\n elif fname_format is not None:\n raise Value(fname_format + ': fname_format must be null if backend is None')\n _backend = _BackendNone()", "def _validate_visualization_packages(package):\n if not INSTALLED_VISUALIZATION_PACKAGES[package]:\n raise ValueError(\n \"The \" + package + \" has not been installed. To utilize the \"\n \"mass visualization functions, all visualization packages must be\"\n \"installed.\"\n )", "def redirect_importerror_to_optionalimporterror() -> Iterator[None]:\n try:\n yield\n except ImportError as e: # pylint: disable=invalid-name\n raise OptionalImportError from e", "def failedImport(self, name):\r\n message = 'Failed to import test module: %s' % name\r\n if hasattr(traceback, 'format_exc'):\r\n # Python 2.3 compatibility\r\n # format_exc returns two frames of discover.py as well XXX ?\r\n message += '\\n%s' % traceback.format_exc()\r\n return self._makeFailedTest(\r\n 'ModuleImportFailure', name, ImportError(message))", "def test_load_fail(self):\n with self.assertRaises(ImportError):\n self.loader.load_plugin('nomodule')", "def pil_check():\n try:\n import PIL\n except ImportError:\n raise ImportError(\"Please install PIL to use this feature: \"\n \"https://pillow.readthedocs.io/en/latest\"\n \"/installation.html\")", "def testImport(self):\n success = False\n try:\n from cutlass import Metabolome\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(Metabolome is None)", "def test_bad_module(self):\n callable_name = 'mumbojumbo.thefunc'\n with self.assertRaises(ImportError):\n CallableLoader().load(callable_name)", "def test_load_invalid( self ):\n\n # Path to nowhere\n invalid_path = '/fake/path/to/module.py'\n\n # Make sure the normal ImportError is raised.\n self.assertRaises( ImportError, screener.load, invalid_path )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise ImportError with detailed error message if vowpalwabbit is not installed. Functionality requiring the vowpalwabbit should call this helper and then lazily import.
def check_vowpal_support(caller_name: str) -> None: try: import vowpalwabbit # type: ignore except ImportError as e: raise ImportError( caller_name + " requires vowpalwabbit. You can " "install vowpalwabbit with `pip install vowpalwabbit`." ) from e
[ "def test_import_not_found(self):\n try:\n import_version('bogus', Requirement.parse('bogus==1.0'),\n Environment(['plugins']))\n except DistributionNotFound, err:\n self.assertEqual(str(err),\n 'could not find distribution satisfying bogus==1.0')", "def handleImportError(self, exception):\n first = exception.args[0]\n if first.find('No module named ') < 0:\n raise\n module = first[len('No module named '):]\n module = module.split('.')[0]\n\n if module in self._deps.keys():\n dep = self._deps[module]\n sys.stderr.write(\"Could not import python module '%s'\\n\" % module)\n sys.stderr.write('This module is part of %s.\\n' % dep.name)\n\n self.handleMissingDependency(dep)\n\n # how to confirm the python module got installed\n sys.stderr.write(\"\\n\")\n sys.stderr.write(\n 'You can confirm it is installed by starting Python and running:\\n')\n sys.stderr.write('import %s\\n' % module)\n\n return\n\n # re-raise if we didn't have it\n raise", "def verify_fail(self):\n raise MissingDependencyError(self, self.installed_version)", "def test_unfrozen_importlib(self):\n try:\n request = self.rf.get(\"/test_view/\")\n importlib.import_module(\"abc.def.invalid.name\")\n except Exception:\n exc_type, exc_value, tb = sys.exc_info()\n reporter = ExceptionReporter(request, exc_type, exc_value, tb)\n html = reporter.get_traceback_html()\n self.assertInHTML(\"<h1>ModuleNotFoundError at /test_view/</h1>\", html)", "def test_oldZopeInterface(self):\r\n with SetAsideModule(\"zope\"):\r\n _install(_zope35)\r\n with self.assertRaises(ImportError) as raised:\r\n _checkRequirements()\r\n self.assertEqual(\r\n \"Twisted requires zope.interface 3.6.0 or later.\",\r\n str(raised.exception))", "def test_oldZopeInterface(self):\r\n with SetAsideModule(\"zope\"):\r\n _install(_zope38)\r\n with self.assertRaises(ImportError) as raised:\r\n _checkRequirements()\r\n self.assertEqual(\r\n \"Twisted requires zope.interface 4.0.0 or later.\",\r\n str(raised.exception))", "def test_puppet_import_invalid_mod(self):\n with self.assertRaises(ForgeNotFound):\n call_command_ret('puppet_import', '--module', 'puppetlabs-wrong_name')", "def test_load_fail(self):\n with self.assertRaises(ImportError):\n self.loader.load_plugin('nomodule')", "def ensure_environment() -> None:\n import warnings\n\n try:\n import discord\n except ImportError:\n raise RuntimeError(\"discord.py is not installed!\") from None\n\n try:\n version_info = discord.version_info\n if version_info.major != 1:\n raise RuntimeError(f\"discord.py library major version 1 needed, not {version_info.major}\") from None\n\n if version_info.minor not in {0, 1}:\n warnings.warn(f\"This bot was written for version 1.0.0, you're using {version_info}. \"\n f\"No guarantee that things will work out\")\n\n except Exception:\n warnings.warn(\"Couldn't access discord's version information! \"\n \"Don't be surprised if something doesn't work as it should\")", "def test_missingZopeInterface(self):\r\n with SetAsideModule(\"zope\"):\r\n # Create a minimal module to represent the zope namespace package,\r\n # but don't give it an \"interface\" attribute.\r\n sys.modules[\"zope\"] = ModuleType(\"zope\")\r\n with self.assertRaises(ImportError) as raised:\r\n _checkRequirements()\r\n self.assertEqual(\r\n \"Twisted requires zope.interface %d.%d.%d or later: no module \"\r\n \"named zope.interface.\" % self.supportedZopeInterfaceVersion,\r\n str(raised.exception))", "def _handleImportError(self, reason):\r\n \r\n message = \"Cannot import '%s'. Reason: '%s'\" % (self.name, reason)\r\n _log.warning(message)\r\n return UnknownDomainObject", "def test_raises_unknown_module(self):\n self.assertRaises(NonExistentModuleError, factorize, module='unknown', object_type='UnknownClass')", "def test_python3_2_raises_exception(monkeypatch):\n import blessed\n\n monkeypatch.setattr('sys.version_info', (3, 2, 2, 'final', 0))\n\n try:\n reload_module(blessed)\n except ImportError as err:\n assert err.args[0] == (\n 'Blessed needs Python 3.2.3 or greater for Python 3 '\n 'support due to http://bugs.python.org/issue10570.')\n monkeypatch.undo()\n reload_module(blessed)\n else:\n assert False, 'Exception should have been raised'", "def test_puppet_import_invalid_user(self):\n with self.assertRaises(ForgeNotFound):\n call_command_ret('puppet_import', '--module', 'wrong_name-wrong_name')", "def test_missingZopeNamespace(self):\r\n with SetAsideModule(\"zope\"):\r\n # After an import for a module fails, it gets a None value in\r\n # sys.modules as a cache of that negative result. Future import\r\n # attempts see it and fail fast without checking the system again.\r\n sys.modules[\"zope\"] = None\r\n with self.assertRaises(ImportError) as raised:\r\n _checkRequirements()\r\n self.assertEqual(\r\n \"Twisted requires zope.interface %d.%d.%d or later: no module \"\r\n \"named zope.interface.\" % self.supportedZopeInterfaceVersion,\r\n str(raised.exception))", "def check_installed() -> None:\n try:\n spec = importlib.util.find_spec(\"pyscf\") # type: ignore\n if spec is not None:\n return\n except Exception as ex: # pylint: disable=broad-except\n logger.debug(\"PySCF check error %s\", str(ex))\n raise MissingOptionalLibraryError(\n libname=\"PySCF\",\n name=\"PySCFDriver\",\n pip_install=\"pip install 'qiskit-nature[pyscf]'\",\n msg=\"See https://pyscf.org/install.html\",\n ) from ex\n\n raise MissingOptionalLibraryError(\n libname=\"PySCF\",\n name=\"PySCFDriver\",\n pip_install=\"pip install 'qiskit-nature[pyscf]'\",\n msg=\"See https://pyscf.org/install.html\",\n )", "def failedImport(self, name):\r\n message = 'Failed to import test module: %s' % name\r\n if hasattr(traceback, 'format_exc'):\r\n # Python 2.3 compatibility\r\n # format_exc returns two frames of discover.py as well XXX ?\r\n message += '\\n%s' % traceback.format_exc()\r\n return self._makeFailedTest(\r\n 'ModuleImportFailure', name, ImportError(message))", "def test_import_function_missing_module():\n with pytest.raises(ModuleNotFoundError):\n some_function = import_function(\"should_not_exist\", \"some_function\")\n some_function()", "def test_load_invalid( self ):\n\n # Path to nowhere\n invalid_path = '/fake/path/to/module.py'\n\n # Make sure the normal ImportError is raised.\n self.assertRaises( ImportError, screener.load, invalid_path )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise ImportError with detailed error message if pandas is not installed. Functionality requiring the pandas should call this helper and then lazily import.
def check_pandas_support(caller_name: str) -> None: try: import pandas # type: ignore except ImportError as e: raise ImportError( caller_name + " requires pandas. You can " "install pandas with `pip install pandas`." ) from e
[ "def handleImportError(self, exception):\n first = exception.args[0]\n if first.find('No module named ') < 0:\n raise\n module = first[len('No module named '):]\n module = module.split('.')[0]\n\n if module in self._deps.keys():\n dep = self._deps[module]\n sys.stderr.write(\"Could not import python module '%s'\\n\" % module)\n sys.stderr.write('This module is part of %s.\\n' % dep.name)\n\n self.handleMissingDependency(dep)\n\n # how to confirm the python module got installed\n sys.stderr.write(\"\\n\")\n sys.stderr.write(\n 'You can confirm it is installed by starting Python and running:\\n')\n sys.stderr.write('import %s\\n' % module)\n\n return\n\n # re-raise if we didn't have it\n raise", "def failedImport(self, name):\r\n message = 'Failed to import test module: %s' % name\r\n if hasattr(traceback, 'format_exc'):\r\n # Python 2.3 compatibility\r\n # format_exc returns two frames of discover.py as well XXX ?\r\n message += '\\n%s' % traceback.format_exc()\r\n return self._makeFailedTest(\r\n 'ModuleImportFailure', name, ImportError(message))", "def test_unfrozen_importlib(self):\n try:\n request = self.rf.get(\"/test_view/\")\n importlib.import_module(\"abc.def.invalid.name\")\n except Exception:\n exc_type, exc_value, tb = sys.exc_info()\n reporter = ExceptionReporter(request, exc_type, exc_value, tb)\n html = reporter.get_traceback_html()\n self.assertInHTML(\"<h1>ModuleNotFoundError at /test_view/</h1>\", html)", "def test_import_function_missing_module():\n with pytest.raises(ModuleNotFoundError):\n some_function = import_function(\"should_not_exist\", \"some_function\")\n some_function()", "def _get_import_name(self):\n pandas_import_name = None\n for script_line in self.script_body:\n if script_line['_type'] == 'Import':\n if script_line['names'][0]['asname'] and \\\n script_line['names'][0]['name'] == 'pandas':\n pandas_import_name = script_line['names'][0]['asname']\n elif script_line['names'][0]['name'] == 'pandas':\n pandas_import_name = 'pandas'\n\n if pandas_import_name:\n return pandas_import_name\n\n raise RuntimeError('No pandas module import found')", "def redirect_importerror_to_optionalimporterror() -> Iterator[None]:\n try:\n yield\n except ImportError as e: # pylint: disable=invalid-name\n raise OptionalImportError from e", "def import_or_raise(library, error_msg=None, warning=False):\n try:\n return importlib.import_module(library)\n except ImportError:\n if error_msg is None:\n error_msg = \"\"\n msg = (f\"Missing optional dependency '{library}'. Please use pip to install {library}. {error_msg}\")\n if warning:\n warnings.warn(msg)\n else:\n raise ImportError(msg)\n except Exception as ex:\n msg = (f\"An exception occurred while trying to import `{library}`: {str(ex)}\")\n if warning:\n warnings.warn(msg)\n else:\n raise Exception(msg)", "def show_dataframe_not_present_error():\n print(\"Pandas dataframe not present.\")\n print(\"Read the data file first. Use read_csv function to read the datafile\")", "def test_pandas_pkg(self): \n \n # Tests fail with an error \"undefined symbol: xstrtod\"\n # on ppc64le for python 3.8 \n try:\n from pandas.testing import assert_frame_equal\n \n df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})\n df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})\n logging.info(\"validating Pandas dataframes, values of df1 and df2 {} {}\".format(df1, df2))\n assert_frame_equal(df1, df2, check_dtype=False)\n except ImportError as e:\n print(e)", "def attempt_import(name, error_message=None, only_catch_importerror=True,\n minimum_version=None):\n try:\n module = importlib.import_module(name)\n if minimum_version is None:\n return module, True\n elif _check_version(module, minimum_version):\n return module, True\n elif error_message:\n error_message += \" (version %s does not satisfy the minimum \" \\\n \"version %s)\" % (\n module.__version__, minimum_version)\n else:\n error_message = \"The %s module version %s does not satisfy \" \\\n \"the minimum version %s\" % (\n name, module.__version__.minimum_version)\n except ImportError:\n pass\n except:\n if only_catch_importerror:\n raise\n\n if not error_message:\n error_message = \"The %s module (an optional Pyomo dependency) \" \\\n \"failed to import\" % (name,)\n return ModuleUnavailable(error_message), False", "def init_pandas_structure():\n\n # Test that allows to ignore certain Pandas submodules, classes, or attributes\n def _skip_pandas_test(obj):\n if ismodule(obj):\n name = obj.__name__\n for mod_name in exclude_pandas_submodules:\n if name.startswith(mod_name):\n return True\n return not name.startswith('pandas')\n\n global pandas_modules\n pandas_modules = dict()\n inspected_mods = set()\n init_module_structure(pandas, pandas_modules, inspected_mods, _skip_pandas_test)", "def test_import_function_missing_function():\n with pytest.raises(AttributeError):\n some_function = import_function(\"anndata\", \"some_function\")\n some_function()", "def test_bad_module(self):\n callable_name = 'mumbojumbo.thefunc'\n with self.assertRaises(ImportError):\n CallableLoader().load(callable_name)", "def _handleImportError(self, reason):\r\n \r\n message = \"Cannot import '%s'. Reason: '%s'\" % (self.name, reason)\r\n _log.warning(message)\r\n return UnknownDomainObject", "def _importAndCheckStack(importName):\n try:\n try:\n return __import__(importName)\n except ImportError:\n excType, excValue, excTraceback = sys.exc_info()\n while excTraceback:\n execName = excTraceback.tb_frame.f_globals[\"__name__\"]\n if (\n execName is None\n or execName == importName # python 2.4+, post-cleanup\n ): # python 2.3, no cleanup\n raise excType(excValue).with_traceback(excTraceback)\n excTraceback = excTraceback.tb_next\n raise _NoModuleFound()\n except:\n # Necessary for cleaning up modules in 2.3.\n sys.modules.pop(importName, None)\n raise", "def test_import_not_found(self):\n try:\n import_version('bogus', Requirement.parse('bogus==1.0'),\n Environment(['plugins']))\n except DistributionNotFound, err:\n self.assertEqual(str(err),\n 'could not find distribution satisfying bogus==1.0')", "def fail_noexcel(self):\n self.fail(\"no excel was available, Pls. install one\")", "def _loadDataWithPandas(self):\r\n\r\n dataFileName, delim = self._internalModel.dataFileAndDelimiter()\r\n try:\r\n data = pd.read_csv(dataFileName, delimiter=delim, header=0)\r\n return data, dataFileName\r\n except IOError as e:\r\n QMessageBox.critical(self, 'Could not load file {}'.format(dataFileName),\r\n str(e), QMessageBox.Ok)\r\n return None, None", "def test_get_specs_for_module():\n\n # Return None for non-existent modules...\n specs = m.get_specs_for_module(module_name='SomeModuleThatDoesNotExist')\n assert specs is None\n\n # ... but raise an error if explicitly told to do so\n with pytest.raises(ImportError):\n m.get_specs_for_module(module_name='SomeModuleThatDoesNotExist', raise_error=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get raid configuration by serial number
def get(self, sn): config, err = self.dbapi.get_raid_config_by_sn(sn) if err or config is None: LOG.error("error fetching configuration by given sn %s" % sn) return { 'is_ok': False } else: LOG.info("successfully get configuration by given serial number") return { 'is_ok': True, 'config': config.config, 'sn': config.sn }
[ "def _get_raid(self, raid_devicefile):\r\n if self._data is not None:\r\n for raid in self._data[\"raid\"]:\r\n if raid[\"devicefile\"] == raid_devicefile:\r\n return raid", "def get_device_index_by_serial(serial):\n if PY3 and isinstance(serial, str):\n serial = bytes(serial, 'UTF-8')\n\n result = librtlsdr.rtlsdr_get_index_by_serial(serial)\n if result < 0:\n raise IOError('Error code %d when searching device by serial' % (result))\n\n return result", "def _get_raid_adapter(node):\n irmc_info = irmc_common.parse_driver_info(node)\n LOG.info('iRMC driver is gathering RAID adapter info for node %s',\n node.uuid)\n try:\n return client.elcm.get_raid_adapter(irmc_info)\n except client.elcm.ELCMProfileNotFound:\n reason = ('Cannot find any RAID profile in \"%s\"' % node.uuid)\n raise exception.IRMCOperationError(operation='RAID config',\n error=reason)", "def devicefile_from_raid(self, disk):\r\n for raid in self.raids:\r\n for device in self.raid_devices(raid):\r\n if device[0:-1] == disk[\"devicefile\"]:\r\n print(raid)\r\n return raid\r\n\r\n return raid", "def get_mdadm_detail_raid_devices(self):\n\t\n\tdict = self.get_mdadm_detail()\n\t\n\tif not dict:\n\t return None\n\t\n\t#print dict\n\tdevices = dict['devices']\n\traidLevel = dict['Raid Level']\n\t\n\t# remove raid from raid0\n\tm = re.search(\"raid(\\d+)\",raidLevel)\n\traidLevel = m.group(1)\n\t\n\t\n\treturn (devices,len(devices),raidLevel)", "def get_device(self, sn):\n return self.fetch_data(\"devices/{}\".format(sn))", "def configure_raid(ctx, profile, configuration):\n\n raid_recipe = RAIDRecipe(ctx.obj['client'])\n raid_recipe.configure_raid(configuration, profile=profile)", "def getRaidDeviceDetails (self, diskguid, jobguid = \"\", executionparams = {}):\n params =dict()\n params['diskguid'] = diskguid\n executionparams['rootobjecttype'] = 'cmc'\n\n \n return q.workflowengine.actionmanager.startRootobjectActionSynchronous('cmc', 'getRaidDeviceDetails', params, jobguid=jobguid, executionparams=executionparams)", "def get_NSBconfig(self):\r\n len_data = 0\r\n len_data_bytes = len_data.to_bytes(2, 'little')\r\n bytes_to_write = bytes([0x06]) + len_data_bytes\r\n\r\n try:\r\n self.instr.write_raw(bytes_to_write)\r\n data = self.instr.read_raw()\r\n\r\n # print(int.from_bytes(data[0:1], 'little')) # read uart cmd byte\r\n # print(int.from_bytes(data[1:3], 'big')) # length of data\r\n\r\n # config byte structure:\r\n # ------------------------------------------------------#\r\n # bit7, bit6, bit5, bit4 | bit3, bit2, bit1 | bit0 #\r\n # RS_CHAR_8 | RS_PARITY_NONE | RS_STOP_1 #\r\n # ------------------------------------------------------#\r\n config_byte = data[3]\r\n fifo = (config_byte & 0x80) >> 7\r\n numbits = ((config_byte & 0x70) >> 4) + 5\r\n parity = (config_byte & 0x06) >> 1\r\n stopbits = (config_byte & 0x01) + 1\r\n\r\n baud = int.from_bytes(data[4:8], 'little')\r\n m_timo = int.from_bytes(data[8:12], 'little')\r\n b_timo = int.from_bytes(data[12:16], 'little')\r\n\r\n config = {\r\n \"baud\": baud,\r\n \"numbits\": numbits,\r\n \"parity\": parity,\r\n \"stopbits\": stopbits,\r\n \"m_timo\": m_timo,\r\n \"b_timo\": b_timo,\r\n \"fifo\": fifo\r\n }\r\n\r\n return config\r\n except ValueError:\r\n print(\"uart failed read\")", "def raids(self):\r\n if self._data is not None:\r\n raids = []\r\n for raid in self._data[\"raid\"]:\r\n raids.append(raid[\"devicefile\"])\r\n return raids", "def get_device_id_config():\n stdout, stderr, rc = bsu.bmc_execute_command(\"cat /usr/share/ipmi-providers/dev_id.json\")\n\n result = json.loads(stdout)\n\n # Create device revision field for the user.\n # Reference IPMI specification v2.0 \"Get Device ID Command\"\n # [7] 1 = device provides Device SDRs\n # 0 = device does not provide Device SDRs\n # [6:4] reserved. Return as 0.\n # [3:0] Device Revision, binary encoded.\n\n result['device_revision'] = result['revision'] & 0x0F\n\n return result", "def get_data_from_discovered_bridges(self, serialno):\n result = {}\n for db in self.discovered_bridges:\n if db['serialNumber'] == serialno:\n result = db\n break\n if result == {}:\n # if bridge is not in list of discovered bridges, rediscover bridges and try again\n self.discovered_bridges = self.discover_bridges()\n for db in self.discovered_bridges:\n if db['serialNumber'] == serialno:\n result = db\n break\n\n if result != {}:\n api_config = self.get_api_config_of_bridge(result.get('URLBase',''))\n result['datastoreversion'] = api_config.get('datastoreversion', '')\n result['apiversion'] = api_config.get('apiversion', '')\n result['swversion'] = api_config.get('swversion', '')\n\n return result", "def get_config(self) -> None:\n body = Helpers.req_body(self.manager, 'devicedetail')\n body['method'] = 'configurations'\n body['uuid'] = self.uuid\n\n r, _ = Helpers.call_api(\n '/131airpurifier/v1/device/configurations',\n 'post',\n headers=Helpers.req_headers(self.manager),\n json_object=body,\n )\n\n if r is not None and Helpers.code_check(r):\n self.config = Helpers.build_config_dict(r)\n else:\n logger.debug('Unable to get config info for %s',\n self.device_name)", "def get_raids(ns):\n for raid in ns.LMI_MDRAIDStorageExtent.instances():\n yield raid", "def get_device_config(controller, ticket, id):\n print(\"Fetching Config for device id {}\".format(id))\n url = controller + (\"network-device/{}/config\".format(id))\n header = {\"content-type\": \"application/json\", \"X-Auth-Token\": ticket}\n response = requests.get(url, headers=header, verify=False)\n r_json = response.json()\n config = r_json['response']\n return config", "def get_specific_attribute_value_in_libgen_configuration_file(master_name_index,attribute_index,ru_type): \n\n command=\"sed -n '/%s/,/^[[:space:]]*$/p' /etc/LibgenConfig_%s.ini | grep %s\"%(master_name_index,ru_type,attribute_index)\n out = connections.execute_mml_without_check(command)\n\n if out.count('=') == 1: \n return out.split(\"=\")[1].strip()", "def get_config(self):\r\n len_data = 0\r\n len_data_bytes = len_data.to_bytes(2, 'little')\r\n bytes_to_write = bytes([0x04]) + len_data_bytes\r\n\r\n try:\r\n self.instr.write_raw(bytes_to_write)\r\n data = self.instr.read_raw()\r\n\r\n # we already know these values\r\n # print(int.from_bytes(data[0:1], 'little')) # read uart cmd byte\r\n # print(int.from_bytes(data[1:3], 'big')) # length of data\r\n\r\n config_byte = data[3]\r\n numbits = ((config_byte & 0xf0) >> 4) + 5\r\n parity = (config_byte & 0x06) >> 1\r\n stopbits = (config_byte & 0x01) + 1\r\n\r\n baud = int.from_bytes(data[4:8], 'little')\r\n m_timo = int.from_bytes(data[8:12], 'little')\r\n b_timo = int.from_bytes(data[12:16], 'little')\r\n\r\n config = {\r\n \"baud\": baud,\r\n \"numbits\": numbits,\r\n \"parity\": parity,\r\n \"stopbits\": stopbits,\r\n \"m_timo\": m_timo,\r\n \"b_timo\": b_timo\r\n }\r\n\r\n return config\r\n except ValueError:\r\n print(\"uart failed read\")", "def arm_config(self):\n return self._arm_config", "def getConfig(ir, memconfig_file_path):\n if not os.path.isfile(memconfig_file_path):\n print(\"\\tUser did not specify any arrays to partition\")\n return []\n\n configFile = open(memconfig_file_path,'r')\n partitions=[]\n while True:\n # Read line by line and exit when done\n line = configFile.readline()\n if not line:\n break\n partition = line.split()[0]\n dimention_to_partition= line.split()[1]\n settings = line.split()[2:]\n partitions.append([partition,dimention_to_partition,settings])\n return partitions" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resolves to the final node instance that can be used to perform the matching
def _resolve_target_node_from_path(self, node): if not self.path: return node for path in self.path.split("."): # Since a node type can have multiple paths to a given parent # this check allows instances that do not have this specific path if len(node[path]) == 0: return None node = node[path][0] return node
[ "def _object_resolution(self, object_to_resolve):\n # Below is the original comment that came with the code extracted in\n # this method. It is not very clear but I decided to keep it just in\n # case\n #\n #\n # For some reason, while the type of `a_result_attribute[1]`\n # as reported by the neo4j driver is `Node` for Node-type data\n # retrieved from the database.\n # When the retrieved data are Relationship-Type,\n # the returned type is `abc.[REL_LABEL]` which is however\n # a descendant of Relationship.\n # Consequently, the type checking was changed for both\n # Node, Relationship objects\n if isinstance(object_to_resolve, Node):\n return self._NODE_CLASS_REGISTRY[\n frozenset(object_to_resolve.labels)\n ].inflate(object_to_resolve)\n\n if isinstance(object_to_resolve, Relationship):\n rel_type = frozenset([object_to_resolve.type])\n return self._NODE_CLASS_REGISTRY[rel_type].inflate(object_to_resolve)\n\n if isinstance(object_to_resolve, Path):\n from .path import NeomodelPath\n\n return NeomodelPath(object_to_resolve)\n\n if isinstance(object_to_resolve, list):\n return self._result_resolution([object_to_resolve])\n\n return object_to_resolve", "def resolutionNode():\n pass", "def _resolve_ref(elem: Union[str, MatchableType]) -> MatchableType:\n ...", "def resolve_xref(self, env, fromdocname, builder,\n typ, target, node, contnode):\n # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA\n for fullname, (docname, objtype) in self.data['objects'].items():\n if fullname.name == target:\n return make_refnode(builder, fromdocname, docname, fullname2id(fullname), contnode, fullname.name)\n return None", "def find_best_matching_node(self, new, old_nodes):\n name = new.__class__.__name__\n #: TODO: We should pick the BEST one from this list\n #: based on some \"matching\" criteria (such as matching ref name or params)\n matches = [c for c in old_nodes if name == c.__class__.__name__]\n if self.debug:\n print(\"Found matches for {}: {} \".format(new, matches))\n return matches[0] if matches else None", "def match(self, node, results=None):\r\n if self.type is not None and node.type != self.type:\r\n return False\r\n if self.content is not None:\r\n r = None\r\n if results is not None:\r\n r = {}\r\n if not self._submatch(node, r):\r\n return False\r\n if r:\r\n results.update(r)\r\n if results is not None and self.name:\r\n results[self.name] = node\r\n return True", "def generic_visit(self, node):\n if (\n not self.replaced\n and hasattr(node, \"_location\")\n and node._location == self.search\n ):\n self.replaced = True\n return self.replacement_node\n else:\n return NodeTransformer.generic_visit(self, node)", "def get_object(self, node2, label):\n for node1, _, _, _ in self.get_edges(None, label, node2):\n return node1\n return None", "def resolve(self):\n\t\traise NotImplementedError()", "def find_node(self, primary_label, primary_property, primary_property_value):\n try:\n query = 'MATCH (n:`%s`{`%s`:%r}) return n' % (primary_label, primary_property, primary_property_value)\n result = self.graph.evaluate(query)\n return result\n except Exception as error:\n print(error)\n return None", "def match(self, node):\r\n results = {\"node\": node}\r\n return self.pattern.match(node, results) and results", "def suggested_node(self):\n for _ in range(0, len(self.node.children)):\n if self.current_idx == len(self.node.children):\n self.current_idx = 0\n node = self.node.children[self.current_idx]\n if node:\n return node\n\n return None", "def Item(self) -> Match:", "def map_to(self, ns1, id1, ns2):\n targets = [target for target in\n self.descendants_rel(ns1, id1, {'xref'})\n if target[0] == ns2]\n if len(targets) == 1:\n return targets[0]\n return None", "def lookup(self):", "def find_node(self, name_or_representation):\n for node in self.all_nodes:\n if name_or_representation.lower() in {node.name.lower(), \\\n node.representation.lower()}:\n return node\n #if name_or_representation -ends\n #for node -ends\n return None", "def matchRefPos(self, node):\n\t\tself.refPos = node.pos()", "def suggested_node(self):\n pass", "def new_resolve(self, context):\n try:\n value = old_resolve(self, context)\n except template_base.VariableDoesNotExist as e:\n # if it's not a variable that's allowed to not exist then raise a\n # base Exception so Nodes can't catch it (which will make the test\n # fail)\n if self.var not in allowed_undefined_variables:\n raise Exception(e)\n\n # re-raise the original and let the individual Nodes deal with it\n # however they'd like\n raise e\n\n return value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if a node has a value matching the prop and values field if it does, the particular instance will not participate in the entire tagging process
def match(self, node): node = self._resolve_target_node_from_path(node) return node and node[self.prop] in self.values
[ "def tag_value_input_test(node, tag, old, new):\n return True", "def _value_match(self, pattern, value):\n v1, t1, l1 = value.get('@value'), value.get('@type'), value.get('@language')\n v2 = JsonLdProcessor.get_values(pattern, '@value')\n t2 = JsonLdProcessor.get_values(pattern, '@type')\n l2 = JsonLdProcessor.get_values(pattern, '@language')\n\n if not v2 and not t2 and not l2:\n return True\n if not (v1 in v2 or _is_empty_object(v2[0])):\n return False\n if not ((not t1 and not t2) or (t1 in t2) or (t1 and t2 and _is_empty_object(t2[0]))):\n return False\n if not ((not l1 and not l2) or (l1 in l2) or (l1 and l2 and _is_empty_object(l2[0]))):\n return False\n return True", "def contains(self, prop):\n pass", "def is_property_instance(cls, elem):\n # We can't use ExtendedFieldURI.from_xml(). It clears the XML element, but we may not want to consume it here.\n kwargs = {\n f.name: f.from_xml(elem=elem.find(ExtendedFieldURI.response_tag()), account=None)\n for f in ExtendedFieldURI.FIELDS\n }\n xml_obj = ExtendedFieldURI(**kwargs)\n cls_obj = cls.as_object()\n return cls._normalize_obj(cls_obj) == cls._normalize_obj(xml_obj)", "def instance_property_exists(self, instance, property, value):\n if instance[0:3] == \"WD_\":\n instance = get_wbs_local_id(instance[3:])\n entity_number = wb_SQL_query(self.label, \"item\")\n for ID in entity_number:\n item = WBItem(ID=ID)\n item_array = item.get_value('WD_P31')\n for instance_item in item_array:\n if instance_item == instance:\n value_array = item.get_value(property)\n for value_item in value_array:\n if value_item == value:\n return ID\n return None", "def has_attr(product):\n if len(product.attribute_value_ids) > 0:\n return True\n return False", "def has_node(gr: nx.Graph, att:str, val:str) -> bool:\n\n return any([node for node in gr.nodes(data=True) if node[1].get(att, None) == val])", "def node_with_attr(attr_name,value):\n result = [ node for node in NODE if attr_name in NODE[node] and NODE[node][attr_name] == value ]\n BuiltIn().log(\"Found %d nodes with condition `%s`=`%s`\" % (len(result),attr_name,value))\n return result", "def is_non_value(self):\n return self.type >= ParmType.NONVALUE_START and\\\n self.type <= ParmType.NONVALUE_END", "def isProperty(self,uid):\n return( self.id2node[uid].group==\"Property\" )", "def _node_match(self, state, pattern, value, flags):\n if '@id' not in value:\n return False\n node_object = state['subjects'][value['@id']]\n return node_object and self._filter_subject(state, node_object, pattern, flags)", "def _region_contains_props(self, region):\n props = region.get(\"props\")\n if props is None:\n return False\n return pandas_query_matches_props(\n self.prev_pois_bool_exp,\n pd.DataFrame(props))", "def _goodNodeData(self, nodeData):\n for val in nodeData:\n if not (type(val) == tuple or type(val) == list):\n return False\n elif len(val) != 2:\n return False\n else:\n v1 = val[0]\n v2 = val[1]\n if not (type(v1) in [int, float] and type(v2) in [int, float]):\n return False\n # Only if every node value is right should we return True\n return True", "def values_equal(self, instance, field, value) -> bool:\n return getattr(instance, field) == value", "def supports_value_search(self):\n return # boolean", "def check_property(instance, attr, reference):\n attr_val = getattr(instance, attr)\n return check_value(attr_val, reference=reference)", "def svn_fs_node_has_props(*args) -> \"svn_boolean_t *\":\n return _fs.svn_fs_node_has_props(*args)", "def match(self,field,value):\n success= -1\n for e in self.entities:\n if e.name == field:\n #print(\"Found the Field\")\n if e.value == value:\n #print \"************ Matched the Value **************\"\n success=1\n break \n #else:\n #print (\"Field %s EValue <%s> != <%s>\" % (field,e.value,value))\n return success", "def is_node_with(obj: AnyNode, cls: NodeTypes, val_cls: NodeTypes, **kwargs) -> bool:\n if not isinstance(obj, cls) or not isinstance(obj.value, val_cls):\n return False\n if kwargs:\n return all(getattr(obj, k).lower() == v for k, v in kwargs.items())\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes unique tag for given node
def compute_tag(node): keys = node.get_tag_property_values() keys += sorted( compute_tag(p.dst) for p in node.edges_out if p.dst.is_taggable() and p.label != "relates_to" ) return __generate_hash(keys, node.label)
[ "def create_tag( self, node, scene ):\r\n\r\n\t\tnode_is_tag = get_node_properties( node, property_name = 'p_tag_name' )\r\n\t\tif node_is_tag or node.GetName().startswith( 'tag_' ):\r\n\t\t\ttag = Node_Tag( node, scene )\r\n\t\t\tself.tags.append( tag )", "def _(self, node: AnnCastString):\n node_uid = uuid.uuid4()\n self.G.add_node(node_uid, label=f'\"{node.string}\"')\n return node_uid", "def _tag(sent):\n # from nltk.tag.simplify import simplify_brown_tag\n tagged_sent = t2.tag(sent)\n # simplified = [(word, simplify_brown_tag(tag)) for\n # word, tag in tagged_sent]\n return tagged_sent # simplified", "def _(self, node: AnnCastBinaryOp):\n left = self.visit(node.left)\n right = self.visit(node.right)\n node_uid = uuid.uuid4()\n self.G.add_node(node_uid, label=node.op)\n self.G.add_edge(node_uid, left)\n self.G.add_edge(node_uid, right)\n\n return node_uid", "def get_tag_name(self):\n return self.node.tag_name()", "def _(self, node: AnnCastUnaryOp):\n val = self.visit(node.value)\n node_uid = uuid.uuid4()\n self.G.add_node(node_uid, label=node.op)\n self.G.add_edge(node_uid, val)\n\n return node_uid", "def XMLNodeBase(node, justme):\n s = \"<comment>%s</comment>\\n\"%escape(node.comment)\n for idname in ['nodeID', 'nextID','previousID', 'parentID', 'notebookID']:\n value = getattr(node, idname)\n if value is None:\n s += \"<%s></%s>\\n\"%(idname, idname)\n else:\n s += \"<%s>%i</%s>\\n\"%(idname, value, idname)\n s += \"<tags>\"\n s += ','.join([tag.name for tag in node.tags])\n s += \"</tags>\\n\"\n s += \"<dateCreated>%s</dateCreated>\\n\"%(node.dateCreated.strftime(tformat))\n s += \"<dateModified>%s</dateModified>\\n\"%(node.dateModified.strftime(tformat))\n return s", "def get_unique_prefix(self, ast):\n self.visit(ast)\n prefix = \"censor\"\n counter = 0\n while True:\n if not is_a_prefix(prefix + str(counter), self.ids):\n break\n counter += 1\n\n self.ids = set()\n return prefix + str(counter)", "def tag(self):\n return self._tag", "def do_tag_node(self, p: Position, tag: str) -> None:\n c = self.c\n tc = getattr(c, 'theTagController', None)\n if not tc:\n if not g.unitTesting: # pragma: no cover (skip)\n g.es_print('nodetags not active')\n return\n tc.add_tag(p, tag)\n if not g.unitTesting: # pragma: no cover (skip)\n g.es_print(f\"Added {tag} tag to {p.h}\")", "def NodeId(self) -> int:", "def crf_tag(self,sentence):\n entity_tag_R=self.rule_tag(sentence)\n vectorWord=[]\n words=sentence.split()\n idWord=0\n for word in words:\n tag_r=entity_tag_R[idWord]\n result_tag=(word,tag_r)\n vectorWord.append(result_tag)\n idWord+=1\n entity_tag=self.tagger.tag(self.sent2features(vectorWord))\n return entity_tag", "def _compatibility_node_id(self, node):\n return ((node.t_lemma.lower(), node.formeme)\n if self.compatible_dais_type == 'node' else node.t_lemma.lower())", "def tagged_hash(tag, msg):\r\n tag_hash = HASHED_TAGS.get(tag, False)\r\n if not tag_hash:\r\n tag_hash = hash_sha256(tag)\r\n HASHED_TAGS[tag] = tag_hash\r\n return hash_sha256(tag_hash + tag_hash + msg)", "def get_ns_id(node):\n return IndraOntology.reverse_label(node)", "def get_node_id(node):\n return str(node.id)", "def _hash_func(self, node):\n symbols = {'A': \"1\", 'T': \"5\", 'C': \"7\", 'G': \"9\"}\n coded_node = \"\"\n\n for strand in node:\n coded_node += symbols[strand]\n\n return int(coded_node) % self.map_size", "def tag_gen(current_tag, bump_type):\n current_semver = current_tag[1:] # Trim off the 'v'\n new_semver = increment_maker(current_semver, bump_type)\n new_tag = f\"v{new_semver}\"\n\n return new_tag", "def _get_private_creator_tag(self, data_element):\n group = data_element.tag.group\n element = (data_element.tag.element & 0xff00) >> 8\n return dicom.tag.Tag(group, element)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Injects an event listener that sets the tag and version properties on nodes, just before they are inserted
def inject_set_tag_after_insert(cls): @event.listens_for(cls, "after_insert") def set_node_tag(mapper, conn, node): table = node.__table__ if not node.is_taggable(): return # do nothing tag = compute_tag(node) version = __get_tagged_version(node.node_id, table, tag, conn) node._sysan[TagKeys.tag] = tag node._sysan[TagKeys.latest] = True node._sysan[TagKeys.version] = version # update tag and version conn.execute( table.update() .where(table.c.node_id == node.node_id) .values(_sysan=node._sysan) )
[ "def addNodeAddedCallback(*args, **kwargs):\n \n pass", "def on_pre_sync(self, changed):\n _add_tags(changed)", "def addObservers(self):\n tag = slicer.mrmlScene.AddObserver(slicer.mrmlScene.NodeAddedEvent, self.landmarksWidget.requestNodeAddedUpdate)\n self.observerTags.append( (slicer.mrmlScene, tag) )\n tag = slicer.mrmlScene.AddObserver(slicer.mrmlScene.NodeRemovedEvent, self.landmarksWidget.requestNodeAddedUpdate)\n self.observerTags.append( (slicer.mrmlScene, tag) )", "def register_listener(self):\n self._clear_node_listener = self._node.add_on_changed_listener(\n self._on_node_updated\n )", "def addNodeDirtyPlugCallback(*args, **kwargs):\n \n pass", "def addNodeDirtyCallback(*args, **kwargs):\n \n pass", "def addNodeAddedToModelCallback(*args, **kwargs):\n \n pass", "def do_tag_node(self, p: Position, tag: str) -> None:\n c = self.c\n tc = getattr(c, 'theTagController', None)\n if not tc:\n if not g.unitTesting: # pragma: no cover (skip)\n g.es_print('nodetags not active')\n return\n tc.add_tag(p, tag)\n if not g.unitTesting: # pragma: no cover (skip)\n g.es_print(f\"Added {tag} tag to {p.h}\")", "def bd_define_basic_event(self, xml_node, tree):\r\n\r\n # search for existing event (should already be created before)\r\n existing_nodes = self.tree_find_nodes(tree,\r\n xml_node.get('name'),\r\n OPSA_Basic_Event)\r\n\r\n if len(existing_nodes) < 1:\r\n raise RuntimeError('Node does not exist for tag:',\r\n xml_node.get('name'))\r\n\r\n for a_node in existing_nodes:\r\n d = a_node.data\r\n\r\n for e in xml_node:\r\n\r\n if e.tag == 'float':\r\n # while reading value convert it diretly to a float\r\n d.set_probability(float(e.get('value')))\r\n elif e.tag == 'label':\r\n d.set_label(e.text.strip())\r\n elif e.tag == 'attributes':\r\n for attrib in e:\r\n d.add_attribute(attrib.get('name'),\r\n attrib.get('value'))\r\n elif e.tag == 'parameter':\r\n d.set_parameter(e.get('name'))\r\n else:\r\n raise NotImplementedError('bd_define_basic_event not' +\r\n 'implemented property:', e.tag)", "def programTreeAdded(self, id: long, type: int, oldValue: object, newValue: object) -> None:\n ...", "def _onNodeAdded(self, m_obj, data):\r\n \r\n self._processNodeQueue(m_obj=m_obj)", "def on_change(node, fingerprint):\n _add_attr(node, ATTR_FINGERPRINT)\n _set_attr(node, ATTR_FINGERPRINT, fingerprint)", "def addChildAddedCallback(*args, **kwargs):\n \n pass", "def register_for_new_superseding_events(self):\n pass", "def register_for_new_events(self):\n pass", "def register_for_changed_superseding_events(self):\n pass", "def register_for_changed_events(self):\n pass", "def register_for_new_hierarchy_nodes(self):\n pass", "def child_added(self, child):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cooccurrence constraint as described in the paper.
def compute_cooccurrence_constraint(self, nodes): num_nodes, num_attrs = self.modified_nx.shape words_graph = self.cooc_matrix - sp.diags(self.cooc_matrix.diagonal()) words_graph.eliminate_zeros() # words_graph.setdiag(0) words_graph.data = words_graph.data > 0 word_degrees = words_graph.sum(0).A1 inv_word_degrees = np.reciprocal(word_degrees.astype(float) + 1e-8) sd = np.zeros(num_nodes) for n in range(num_nodes): n_idx = self.modified_nx[n, :].nonzero()[1] sd[n] = np.sum(inv_word_degrees[n_idx.tolist()]) scores_matrix = sp.lil_matrix((num_nodes, num_attrs)) for n in nodes: common_words = words_graph.multiply(self.modified_nx[n]) idegs = inv_word_degrees[common_words.nonzero()[1]] nnz = common_words.nonzero()[0] scores = np.array( [idegs[nnz == ix].sum() for ix in range(num_attrs)]) scores_matrix[n] = scores self.cooc_constraint = sp.csr_matrix( scores_matrix - 0.5 * sd[:, None] > 0)
[ "def test_co_occurrence(adata: AnnData):\n co_occurrence(adata, cluster_key=\"leiden\")\n\n # assert occurrence in adata.uns\n assert \"leiden_co_occurrence\" in adata.uns.keys()\n assert \"occ\" in adata.uns[\"leiden_co_occurrence\"].keys()\n assert \"interval\" in adata.uns[\"leiden_co_occurrence\"].keys()\n\n # assert shapes\n arr = adata.uns[\"leiden_co_occurrence\"][\"occ\"]\n assert arr.ndim == 3\n assert arr.shape[2] == 49\n assert arr.shape[1] == arr.shape[0] == adata.obs[\"leiden\"].unique().shape[0]", "def calculate_cooccurences(bool_topic_matrix):\n logging.info(\"calculating co-occurrences\")\n num_topics = bool_topic_matrix.shape[1]\n cooccurrence_matrix = numpy.zeros((num_topics, num_topics))\n logging.debug(num_topics)\n for i in range(num_topics):\n logging.debug(i)\n topic_i = bool_topic_matrix[:, i]\n cooccurrence_matrix[i, i] = numpy.nan\n for j in range(i + 1, num_topics):\n topic_j = bool_topic_matrix[:, j]\n count_ij = bool_topic_matrix[numpy.where(topic_i & topic_j)].shape[0]\n cooccurrence_matrix[i, j] = count_ij\n cooccurrence_matrix[j, i] = count_ij\n return cooccurrence_matrix", "def n_eq_constraints(self):\n pass", "def compute_node_cc(self, node):\n neighbors = self.find_neighbors(node)\n kv = len(neighbors)\n nv = 0\n\n if kv < 2:\n return 0\n\n for i in range(kv-1):\n for j in range(i+1, kv):\n if neighbors[j] in self.find_neighbors(neighbors[i]):\n nv += 1\n\n return 2.0*nv/(kv*(kv-1))", "def connection_objective(self):\n return max([cc.objective for cc in self.cross_section.connected_components])", "def _cei(x, gp_objective, xi, gp_constraint, constraint_upper):\n ei = UtilityFunction._ei(x, gp_objective, xi)\n\n mean, std = gp_constraint.predict(x, return_std=True)\n z = (constraint_upper - mean) / std\n\n cumulative_probabiliy = norm.cdf(z)\n return cumulative_probabiliy * ei", "def get_co_occurrence(multi_labels, delimiter=',', T=1.0, eps=1e-12):\n classes = sorted(list(set(flatten_list([ml.split(delimiter) for ml in multi_labels]))))\n c = len(classes)\n # Once get one hot labels\n train_one_hots = np.array([np.sum([np.eye(c)[classes.index(l)]\n for l in labels.split(delimiter)], axis=0)\n for labels in multi_labels])\n # Calculate frequency of label occurances per classes\n n_co_occurrence = np.array([np.sum(train_one_hots[np.where(train_one_hots[:, i])], axis=0)\n for i in range(c)])\n n_co_occurrence = np.power(n_co_occurrence, 1./T)\n n_co_oc_sum = np.clip(n_co_occurrence.sum(axis=1).reshape(-1, 1), eps, 1e12)\n # Calculate prior probability distribution of label co-occurence P(L)\n p_co_occurrence = n_co_occurrence / n_co_oc_sum\n # Adjust co-occuerence probability:\n # - Higher alpha than 1. will make probabilities harder\n # - Alpha in range (0., 1.) will make them softer\n # ---> 1.0 is the only confirmed to be effective, others didn't work\n return p_co_occurrence", "def calculate_unique_cooccurrences(self, cooccurrences):\n\n return len(set([cooc[1] for cooc in cooccurrences]))", "def count_constraints(self, cell):\n\t\tcount = 0\n\t\tfor neighbor in self.get_neighbor(cell):\n\t\t\tif neighbor.value != '_':\n\t\t\t\tcount += 1\n\t\treturn count", "def cost(self, queens, n):\n \n conflicts = 0\n\n for i in range(n):\n for j in range(i + 1, n):\n if i != j:\n # Horizontal axis\n if queens[i] == queens[j]:\n conflicts = conflicts + 1\n # Diagonal Axis Positive\n if abs(queens[i] - queens[j]) == abs(i - j):\n conflicts = conflicts + 1\n return int(conflicts)", "def get_crosstree_constraints(self): \n # Method Stub: return [((\"c83_Measurement\", 1), (\"c47_AbstractSort\", 1))]\n print \"get_corsstree_constraints\"\n list_implications = []\n for element in [x for x in self.get_features_as_xml_elements() if not self.is_product_level_attribute(self.get_clafer_UniqueId(x))]:\n element_id = self.get_clafer_UniqueId(element)\n might_have_crosstree_constraint = False\n for constraint in element.findall(\"./c1:Declaration[@xsi:type='cl:IConstraint']\", _namespaces):\n if self.is_cross_tree_constraint(element, constraint):\n print \"Adding Cross Tree for element_id %s \" % element_id\n list_implications.append(((element_id, 1), self.extract_implied_feature(element, constraint)))\n might_have_crosstree_constraint = True\n if might_have_crosstree_constraint == True:\n print self.get_clafer_Id(element)\n \n return list_implications", "def n_constraints(self) -> int:\n return self.constraint_matrix.shape[0]", "def as_constraint(self, *args):\n return []", "def constrain_visited_counts(sg):\n for y, count in enumerate(ROW_COUNTS):\n if count is None:\n continue\n row = [sg.grid[(y, x)] for x in range(SIZE)]\n terms = [(c != SYM.EMPTY, 1) for c in row]\n sg.solver.add(PbEq(terms, count))\n for x, count in enumerate(COL_COUNTS):\n if count is None:\n continue\n col = [sg.grid[(y, x)] for y in range(SIZE)]\n terms = [(c != SYM.EMPTY, 1) for c in col]\n sg.solver.add(PbEq(terms, count))", "def make_arc_consistent(cn):\n queue = list(cn.get_constraints())\n queue = list(set().union(queue,[tuple(reversed(x)) for x in queue]))\n while queue:\n (xi, xj) = queue.pop(0)\n if arc_reduce(cn, xi, xj): \n # if a cell has 0 possibilities, sudoku has no solution\n if len(cn.get_domain(xi)) == 0:\n return False\n for Xk in cn.get_vars_in_contraint_with(xi):\n if Xk != xi:\n queue.append((Xk, xi)) \n return True", "def capp1_constraints(self):\n constraints = []\n for i in range(1, self.x + 1):\n for k in range(1, self.y + 1):\n equation = f\"\\tcapS{i}{k}: \" # Need S to differentiate between the two capacity constraints\n capp1 = []\n for j in range(1, self.z + 1):\n capp1.append(f\"x{i}{k}{j}\")\n equation += \" + \".join(capp1) + f\" - c{i}{k} <= 0\"\n constraints.append(equation)\n capp1_constraints = \"\\n\".join(constraints)\n capp1_constraints += \"\\n\"\n return capp1_constraints", "def create_coassoc(ensemble=None, N=None):\n\n # check inputs\n if ensemble is None:\n raise TypeError(\"Please specify the clustering ensemble.\")\n\n if N is None:\n raise TypeError(\n \"Please specify the number of samples in the original data set.\")\n\n nparts = len(ensemble)\n assoc = 0\n for part in ensemble:\n nsamples = np.array([len(part[key]) for key in part])\n dim = np.sum(nsamples * (nsamples - 1)) // 2\n\n I = np.zeros(dim)\n J = np.zeros(dim)\n X = np.ones(dim)\n ntriplets = 0\n\n for v in six.itervalues(part):\n nb = len(v)\n if nb > 0:\n for h in range(nb):\n for f in range(h + 1, nb):\n I[ntriplets] = v[h]\n J[ntriplets] = v[f]\n ntriplets += 1\n\n assoc_aux = sp.csc_matrix((X, (I, J)), shape=(N, N))\n assoc += assoc_aux\n\n a = assoc + assoc.T\n a.setdiag(nparts * np.ones(N))\n coassoc = a.todense()\n\n return utils.ReturnTuple((coassoc,), ('coassoc',))", "def brute_vc(g: Graph) -> int:\n return min([len(sub) for sub in subsets(g.vertices()) if gh.is_vc(g,sub)])", "def get_constraints(row):\n new_lst = [0]\n for i in row:\n if i == 1:\n new_lst[-1] += 1\n else:\n new_lst.append(0)\n const = []\n for value in new_lst:\n if value != 0:\n const.append(value)\n return const" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the logits of the surrogate model, i.e. linearized GCN. Returns np.array, [num_nodes, num_classes] The log probabilities for each node.
def compute_logits(self): return (self.adj_norm @ self.adj_norm @ self.modified_nx @ self.W)[self.target].ravel()
[ "def logits(self) -> T.Tensor:\n if self._logits is None:\n self._logits = T.random.bernoulli_probs_to_logits(self._probs,\n self.epsilon)\n return self._logits", "def logits(self, x):", "def loglike(self, nodeinput=None):\n if nodeinput is None:\n nodeinput = {}\n problist = []\n for n in self.iterator:\n if n.name in nodeinput:\n problist.append(n.logprob(valueinput=nodeinput[n.name]))\n else:\n problist.append(n.logprob())\n r = np.sum(problist)\n return r", "def get_score(self, logits):\n return tf.nn.log_softmax(logits)", "def compute_log_reg(self):\n \n self.X = self.data.iloc[:,:-1].values\n self.X = sm.add_constant(self.X)\n self.y = self.data.iloc[:,-1]\n self.model = sm.Logit(self.y, self.X).fit(disp=False)", "def transform_logits(self, logits):", "def log_prob(actions, logits):\n # Equivalent to tf.sparse_softmax_cross_entropy_with_logits.", "def softmax(logits):\n # print(\"logit\", logits.shape)\n\n clas = np.exp(np.minimum(logits, 22.))\n clas = clas / np.maximum(np.sum(clas, axis=-1, keepdims=True), 1e-10)\n return clas", "def categorical_log_prob(self, logits):\n return self.base_dist.log_prob(logits)", "def predict_logit(self, x):\n self.model.train()\n with torch.no_grad():\n y_ = self.model(x)\n return y_", "def _log_prob(self, x):\n return tf.math.log(tf.cast(self.prob(x), dtype=tf.float32))", "def log_bernoulli_with_logits_nosigmoid(x, logits):\n\n\tlog_prob = bce2(logits, x).sum(-1)\n\n\treturn log_prob", "def _compute_logits(\n self, params: PyTree, batch: Mapping[str, jnp.ndarray], *args, **kwargs\n ):\n # Flatten (batch_size, list_size, ...) into (batch_size * list_size, ...)\n # for model inputs.\n batch_size, list_size, *_ = batch[\"encoder_input_tokens\"].shape\n flattened_batch = {\n **batch,\n \"encoder_input_tokens\": jnp.reshape(\n batch[\"encoder_input_tokens\"],\n (batch_size * list_size,) + batch[\"encoder_input_tokens\"].shape[2:],\n ),\n \"decoder_input_tokens\": jnp.reshape(\n batch[\"decoder_input_tokens\"],\n (batch_size * list_size,) + batch[\"decoder_input_tokens\"].shape[2:],\n ),\n \"decoder_target_tokens\": jnp.reshape(\n batch[\"decoder_target_tokens\"],\n (batch_size * list_size,)\n + batch[\"decoder_target_tokens\"].shape[2:],\n ),\n }\n\n # Compute logits on flattened inputs.\n output = super()._compute_logits(params, flattened_batch, *args, **kwargs)\n\n # Reshape output logits back to (batch_size, list_size, ...)\n output = jnp.reshape(output, (batch_size, list_size) + output.shape[1:]) # pytype: disable=attribute-error # jax-ndarray\n\n # Compute per-item scores. We do three vmaps here for each of the dimensions\n # (batch_size, list_size, sequence_length, ...)\n output = jax.vmap(jax.vmap(jax.vmap(jnp.take)))(\n output, batch[\"decoder_target_tokens\"]\n )\n output = jnp.squeeze(output, -1)\n return output", "def log_probability(self):\n return tf.reduce_sum(self.log_ps, axis=0)", "def multinomial_logistic_regression_cost_gradient(parameters, input, output):\n prediction = softmax(np.dot(parameters.T, input))\n cost = -np.log(prediction[output])\n # Create one-hot vector\n one_hot = np.zeros_like(prediction)\n one_hot[output] = 1\n gradient = np.dot(input.reshape(-1, 1), (prediction - one_hot).reshape(-1, 1).T)\n return cost, gradient", "def model_logprobs(model, collator, input_texts, output_texts, length_penalty=0, batchsize=128):\n total_logprobs = []\n for batch in splitevery(zip(input_texts, output_texts), batchsize):\n input_batch, output_batch = zip(*batch)\n encoded_inputs = collator.encode_inputs(input_batch)\n encoded_outputs = collator.encode_outputs(output_batch)\n with torch.no_grad():\n output = model(**encoded_inputs, **encoded_outputs)\n # Normalize probabilities\n normalized = output[\"logits\"].log_softmax(dim=2)\n for k in range(len(input_batch)):\n total_logprob = sum([\n normalized[k, i, encoded_outputs[\"labels\"][k][i]]\n for i in range(1, len(encoded_outputs[\"labels\"][k]) - 1)\n ]).cpu().numpy().item()\n # Normalize by length: https://www.aclweb.org/anthology/W18-6322.pdf\n num_output_tokens = len(encoded_outputs[\"labels\"][k]) - 2 # Ignore tokens for text start/end\n total_logprob /= (5+num_output_tokens)**length_penalty / (5+1)**length_penalty\n total_logprobs.append(total_logprob) \n return total_logprobs", "def predict_objectness_logits(self):\n pred_objectness_logits = [\n # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)\n score.permute(0, 2, 3, 1).reshape(self.num_images, -1)\n for score in self.pred_objectness_logits\n ]\n if self.ious is not None:\n pred_ious_logits = [\n # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)\n iou.permute(0, 2, 3, 1).reshape(self.num_images, -1)\n for iou in self.ious\n ]\n pred_objectness_iou_logits = []\n for p1, p2 in zip(pred_objectness_logits, pred_ious_logits):\n # apply (objectness prob * iou score) ** 0.05 to normalize\n p = (p1.sigmoid()*p2.sigmoid()).sqrt()\n # apply inverse sigmoid to turn probs into logits\n pred_objectness_iou_logits.append((p/(1-p)).log())\n return pred_objectness_iou_logits\n else:\n return pred_objectness_logits", "def scale_log(self) -> None:\n # Problem are probabilities below 1\n self.values = [log(1.01 + x, 2) for x in self.values]", "def log_prob(self, weights):\n return self.dirichlet.logpdf(weights)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the incorrect class with largest logits.
def strongest_wrong_class(self, logits): target_label_onehot = np.eye(self.num_classes)[self.target_label] return (logits - 1000 * target_label_onehot).argmax()
[ "def decode_logits(self, logits):\n # Choose the class with maximimum probability.\n best = list(np.argmax(logits, axis=1))\n return self.decode(best)", "def _find_best_class(self, x: pd.Series) -> int:\n\n optimal_score, optimal_class = float('-inf'), None\n for k in self._pi_k.keys():\n prob_k = self._prob(x, self._mu_k[k], self._pi_k[k])\n if prob_k >= optimal_score:\n optimal_score, optimal_class = prob_k, k\n\n return optimal_class", "def classify(me, observation):\n scores = calculate(me, observation)\n max_score, klass = scores[0], me.classes[0]\n for i in range(1, len(scores)):\n if scores[i] > max_score:\n max_score, klass = scores[i], me.classes[i]\n return klass", "def softmax(logits):\n # print(\"logit\", logits.shape)\n\n clas = np.exp(np.minimum(logits, 22.))\n clas = clas / np.maximum(np.sum(clas, axis=-1, keepdims=True), 1e-10)\n return clas", "def get_majority_class(classes):\n num_class = {}\n for clazz in classes:\n if clazz not in num_class.keys():\n num_class[clazz] = 0\n num_class += 1\n sorted_num_class = sorted(num_class.iteritems(), key=operator.itemgetter(1), reverse=True)\n return sorted_num_class[0][0]", "def _get_majority_class(y):\n from collections import Counter\n mc = Counter(y).most_common()\n return mc[0][0]", "def support_t_except_class(self, rule, label=0):\n all_classes = [i for i in range(self.number_of_classes)]\n all_classes.remove(label)\n max_sup = self.support_t(rule, label=all_classes[0])\n\n for i in range(1, len(all_classes)):\n sup = self.support_t(rule, label=all_classes[i])\n if sup > max_sup:\n max_sup = sup\n\n return max_sup", "def _compute_thresholds_max_random(self):\n for c_ in self.classes_:\n rnd_clf_tpr = np.linspace(0, 1, self.thresholds_[c_].size)\n self.theta_opt_[c_] = self.thresholds_[\n c_][np.argmax(self.tpr_[c_] - rnd_clf_tpr)]", "def calculate_majority_class(examples):\n label_counts = calculate_label_counts(examples)\n label_with_max_count = max(label_counts, key=(lambda key: label_counts[key]))\n count = label_counts[label_with_max_count] # type: int\n return label_with_max_count, count", "def get_max_num_classes(self):\n num = 0\n for task in self.task_list:\n if hasattr(task, \"num_classes\"):\n if num < task.num_classes:\n num = task.num_classes\n\n return num", "def maximal_valance(self) -> int:\n\n if self.is_metal:\n return 6\n\n if self.label in _max_valances:\n return _max_valances[self.label]\n\n logger.warning(\n f\"Could not find a valid valance for {self}. \" f\"Guessing at 6\"\n )\n return 6", "def MaximumThreshold(self) -> int:", "def classify(self, data):\n return np.argmax(self.feedforward(data))", "def _classify(self, X: np.ndarray) -> int:\n assert len(X) == self.num_features\n posteriors = []\n for c in self.classes:\n prior = self.priors[c] # Pr(c)\n likelihood = 1\n for i, dist in enumerate(self.distributions[c]):\n likelihood *= norm(dist.mean, dist.std).pdf(X[i]) # Pr(X | c)\n posteriors.append(prior * likelihood)\n # Normalize to add up to 1\n posteriors = normalize(posteriors)\n # Return class with highest posterior\n return self.classes[np.argmax(posteriors)]", "def accuracy_of_digit_class(model, input_, classes, mini_batch_size = 10):\n nb_errors = 0\n for b in range(0, input_.size(0), mini_batch_size): \n _, out1, _ = model(input_.narrow(0, b, mini_batch_size))\n _, pred = torch.max(out1, dim=1)\n gt = classes.narrow(0, b, mini_batch_size)[:, 0]\n nb_errors += (pred != gt).sum().item()\n N = input_.shape[0]\n return 100*(N-nb_errors)/N", "def getMostCommonClass(examples,className):\n counts = getClassCounts(examples,className)\n if len(examples) > 0:\n return max(counts, key=counts.get)\n else:\n return None", "def test_naive_bayes(document, log_prior, log_likelihood, classes, vocabulary):\n\n # Calculate this document's probability of being each class\n document = tok.tokenize(document)\n sum = {}\n\n for c in classes:\n sum[c] = log_prior[c]\n\n for word in document:\n if word in vocabulary:\n sum[c] = sum[c] + log_likelihood[c][word]\n\n # Find the class with the highest probability\n best_class = None\n best_sum = -float(\"inf\")\n\n for c in sum:\n if sum[c] > best_sum:\n best_class = c\n best_sum = sum[c]\n\n return best_class", "def num_classes(self):\n return 2", "def test_fit_majority_class():\n model, _ = load_iris_mdoel()\n np.testing.assert_array_equal(model.majority_class, 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute feature scores for all possible feature changes.
def feature_scores(self): if self.cooc_constraint is None: self.compute_cooccurrence_constraint(self.influence_nodes) logits = self.compute_logits() best_wrong_class = self.strongest_wrong_class(logits) gradient = self.gradient_wrt_x( self.target_label) - self.gradient_wrt_x(best_wrong_class) surrogate_loss = logits[self.target_label] - logits[best_wrong_class] gradients_flipped = (gradient * -1).tolil() gradients_flipped[self.modified_nx.nonzero()] *= -1 X_influencers = sp.lil_matrix(self.modified_nx.shape) X_influencers[self.influence_nodes] = self.modified_nx[ self.influence_nodes] gradients_flipped = gradients_flipped.multiply( (self.cooc_constraint + X_influencers) > 0) nnz_ixs = np.array(gradients_flipped.nonzero()).T sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1 sorted_ixs = nnz_ixs[sorting] grads = gradients_flipped[tuple(nnz_ixs[sorting].T)] scores = surrogate_loss - grads return sorted_ixs[::-1], scores.A1[::-1]
[ "def calculate_scores(self):\n return [self.fitness.eval(individual) for individual in self.population]", "def compute_score(scores):\n\tcurr_score = 50\n\tfor classification in scores: \n\t\tif classification == 1 or classification == 3: \n\t\t\tcurr_score += 0.08\n\t\tif classification == 2 or classification == 4: \n\t\t\tcurr_score -= 0.03\n\treturn curr_score", "def cv_score(iterableFolds):\n dblCorrectTotal = dblWeightTotal = 0.0\n for cvf in iterableFolds:\n rslt = evaluate_classification(cvf)\n dblCorrect,dblIncorrect = weight_correct_incorrect(rslt)\n dblCorrectTotal += dblCorrect\n dblWeightTotal += dblCorrect + dblIncorrect\n return dblCorrectTotal/dblWeightTotal", "def compute_feature_importances(self):\n self.feature_importances = np.zeros(len(self.forest[0].feature_importances_))\n for i in xrange(self.n_trees):\n self.feature_importances = self.feature_importances + self.forest[i].feature_importances_\n\n self.feature_importances = self.feature_importances/self.n_trees", "def calculate_fscore(self, node):\n # TODO: Calculate and returns the f score of a node. \n # REMEMBER F = G + H\n return self.get_gScore(node) + self.heuristic_cost_estimate(node)", "def calculateMacroValuesAndFScore(legitimateCounter, spamCounter, legitimateFileCount, spamFileCount):\n macroAveragedPrecision = (legitimateCounter / (legitimateCounter + spamFileCount - spamCounter) + spamCounter / (spamCounter + legitimateFileCount - legitimateCounter)) / 2\n macroAveragedRecall = (legitimateCounter / legitimateFileCount + spamCounter / spamFileCount) / 2\n fScore = (2 * macroAveragedPrecision * macroAveragedRecall) / (macroAveragedPrecision + macroAveragedRecall)\n\n return macroAveragedPrecision, macroAveragedRecall, fScore", "def f1_score(self):\n c = 0\n for matrix in self._all_confusion_matrix:\n # print(\"========== {0} ========\".format(c))\n self._all_F1_score.append(self.get_f1_score(matrix))\n c = c + 1", "def _get_feature_score(self, coords):\n # Weighted average pixel of area detected, by channel\n # B, G, R channels are multiplied by 1, 2, 3 respectively\n # Results in: [[avg(B1), avg(G1), avg(R1)], ... , [avg(Bn), avg(Gn), avg(Rn)]]\n wavgs = [[np.average(self.heat_map[y: y + h, x: x + w, c]) * (c + 1) for c in range(self.hm_lvl, NUM_CHANNELS)]\n for x, y, w, h in coords]\n debug(\"Weighted channel averages before summation: {}\".format(wavgs))\n # Other options to consider:\n # Not weighted average:\n # avgs = [[np.average(self.heat_map[y: y + h, x: x + w, c]) for c in range(self.hm_lvl, NUM_CHANNELS)] for x, y, w, h in coords]\n # BGR channels weighted by powers of 1, 2, 3 respectively\n # pavgs = [[np.average(self.heat_map[y: y + h, x: x + w, c]) * ((c + 1) ** 2) for c in range(self.hm_lvl, NUM_CHANNELS)] for x, y, w, h in coords]\n return np.average(wavgs, axis=0)", "def updateFitnessSet(self):\r\n\r\n accuracySum=0.0\r\n accuracies = []\r\n\r\n #First, calculate the accuracies of the classifier and the accuracy sums\r\n i = 0\r\n for cl in self.clSet:\r\n accuracies.append(cl.getAccuracy())\r\n accuracySum = accuracySum + accuracies[i]*cl.getNumerosity()\r\n i = i + 1\r\n\r\n #Next, update the fitness accordingly\r\n for i in range(self.getSize()):\r\n self.clSet[i].updateFitness(accuracySum, accuracies[i])", "def _cv_total_score(self):\n scores = self.scores\n numbers = self.number_predicted\n total = sum(numbers)\n number_correct = sum([s*n for s,n in zip(scores,numbers)])\n total_score = number_correct / total\n return total_score", "def calculate_fscore(self, node):\n # TODO: Calculate and returns the f score of a node. \n # REMEMBER F = G + H\n \n G=self.get_gScore(node)\n H=self.heuristic_cost_estimate(node)\n F=G+H\n return F", "def calc_score(self):\n if self.exc:\n return 0\n for set_result in self.arg_sets_res:\n if not set_result.is_correct:\n return 0\n return self.score", "def calculateScore(board,gameState):\n pass", "def get_listener_scores(self, eval_contexts):\n\n ll_scores = np.exp(self.get_lit_listener_scores(eval_contexts))\n pl_scores = np.exp(self.get_prag_listener_scores(eval_contexts))\n results = {\n \"Literal Listener\": (np.mean(ll_scores), ll_scores),\n \"Pragmatic Listener\": (np.mean(pl_scores), pl_scores)\n }\n return results", "def calculate_all_scores(scores, Tran):\n # logadd for first token. the transition score of the starting tag must be used.\n # it turns out that logadd = log(exp(score)) = score\n # (use long double because taking exp's leads to very very big numbers)\n scores = np.longdouble(scores)\n scores[0] += Tran[-1]\n \n # logadd for the following tokens\n transitions = Tran[:-1].T\n for token, _ in enumerate(scores[1:], start=1):\n logadd = np.log(np.sum(np.exp(scores[token - 1] + transitions), 1))\n scores[token] += logadd\n \n# scores[np.where(scores==np.inf)] = 1000 #bhanu\n return scores #np.inf a very large number is replaced with a finite large number", "def test_calculated_feature_importances(self):\n # Setting up lorax\n lrx = TheLorax(global_clf, data, id_col='entity_id')\n lrx_out = lrx.explain_example(idx=1, pred_class=1, graph=False)\n\n feature1_contrib = lrx_out.contribution.loc['feature1']\n feature5_contrib = lrx_out.contribution.loc['feature5']\n\n # Test cases for correct feature importances\n self.assertEqual(feature1_contrib, 0.04889021376498209)\n self.assertEqual(feature5_contrib, -0.31556073962118303)\n self.assertFalse('feature3' in lrx_out.contribution)", "def evaluate_features():\n # training set is from Stanford Sentiment Training Set\n training_set = parse_stanford(\"data/stanfordSentimentTreebank/stanfordSentimentTreebank/dictionary.txt\", \n \"data/stanfordSentimentTreebank/stanfordSentimentTreebank/sentiment_labels.txt\")\n # train weights for maxent model\n weights = train_maxent(training_set)\n # sort weights in descending order\n sorted_weights = { sentiment: sorted(weights[sentiment].iteritems(), \n key=lambda x:x[1], \n reverse=True) \n for sentiment in weights}\n\n # evaluate model for the top i weights, in this range (There should be # ~130000 weights total)\n for i in range(10000, 130000, 10000):\n # get the top i weights\n new_weights = {\"positive\": {}, \"negative\": {}, \"neutral\": {}}\n for sentiment in sorted_weights:\n new_weights[sentiment] = {w[0]:weights[sentiment][w[0]] \n for w in sorted_weights[sentiment][:i-1]}\n\n # load the episode that has gold standard features already assigned\n episode = parse_goldstandard(\"data/s1e9_gold.txt\", 1, 9)\n # calculate bag of words sentiments\n word_sentiments = parse_NRC(\"data/NRC-Emotion-Lexicon-v0.92/NRC-Emotion-Lexicon-v0.92/NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt\")\n bag_of_words(episode, word_sentiments)\n # calculate maxent sentiments\n run_maxent(episode, new_weights)\n\n # evaulate maxent and bag_of_words sentiments against baseline\n print \"%s max_ent vs gold: %s\" % (i, compare_scores(episode, \n score1=\"maxent_score\", \n score2=\"gold_score\"))\n print \"%s bow vs gold: %s\" % (i, compare_scores(episode, \n \"bow_score\", \n score2=\"gold_score\"))", "def get_fscore(classifier, data):\n true_positives = 0\n true_negatives = 0\n false_positives = 0\n false_negatives = 0\n \n # predict labels and verify, get values for confusion matrix\n for(tweet, label) in data:\n guess = classifier.classify(tweet)\n if guess == 'tourism' and label == 'tourism':\n true_positives += 1\n elif guess == 'tourism' and label == 'nontourism':\n false_positives += 1\n elif guess == 'nontourism' and label == 'nontourism':\n true_negatives += 1\n else:\n false_negatives += 1\n\n precision = 0\n recall = 0\n fscore = 0\n\n # compute precision, recall, f-score\n if (true_positives + false_positives) != 0:\n precision = (true_positives * 1.0) / (true_positives + false_positives)\n if (true_positives + false_negatives) != 0:\n recall = (true_positives * 1.0) / (true_positives + false_negatives)\n if (precision + recall) != 0:\n fscore = 2 * (precision * recall) / (precision + recall)\n\n # print 'Precision: ' + str(precision)\n # print 'Recall: ' + str(recall)\n # print 'F-score: ' + str(fscore)\n performance = {'fscore': fscore, 'precision': precision, 'recall': recall}\n return performance", "def calculate_fitness(self):\n\n fitness_scores = []\n for agent in self.agents:\n fitness_scores_ = []\n for _ in range(self.runs):\n fitness_scores_.append(self.interact(agent, self.env, self.params))\n fitness_scores.append(sum(fitness_scores_) / self.runs)\n return fitness_scores" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the influencer nodes to attack node i based on the weights W and the attributes X.
def get_attacker_nodes(self, n=5, add_additional_nodes=False): assert n < self.num_nodes - 1, "number of influencers cannot be >= number of nodes in the graph!" # neighbors = self.modified_adj[self.target].nonzero()[1] neighbors = self.modified_adj[self.target].indices # assert self.target not in neighbors potential_edges = np.column_stack((np.tile(self.target, len(neighbors)), neighbors)).astype("int32") # The new A_hat_square_uv values that we would get if we removed the edge from u to each of the neighbors, respectively a_hat_uv = self.compute_new_a_hat_uv(potential_edges) XW = self.compute_XW() # compute the struct scores for all neighbors struct_scores = self.struct_score(a_hat_uv, XW) if len(neighbors) >= n: # do we have enough neighbors for the number of desired influencers? influence_nodes = neighbors[np.argsort(struct_scores)[:n]] if add_additional_nodes: return influence_nodes, np.array([]) return influence_nodes else: influence_nodes = neighbors if add_additional_nodes: # Add additional influencers by connecting them to u first. # Compute the set of possible additional influencers, i.e. all nodes except the ones # that are already connected to u. poss_add_infl = np.setdiff1d(np.setdiff1d(np.arange(self.num_nodes), neighbors), self.target) n_possible_additional = len(poss_add_infl) n_additional_attackers = n - len(neighbors) possible_edges = np.column_stack((np.tile(self.target, n_possible_additional), poss_add_infl)).astype("int32") # Compute the struct_scores for all possible additional influencers, and choose the one # with the best struct score. a_hat_uv_additional = self.compute_new_a_hat_uv(possible_edges) additional_struct_scores = self.struct_score(a_hat_uv_additional, XW) additional_influencers = poss_add_infl[np.argsort(additional_struct_scores)[-n_additional_attackers::]] return influence_nodes, additional_influencers else: return influence_nodes
[ "def learn_initial_weights(self, X):\n output = torch.tensor(X, dtype=torch.float32)\n for i in range(len(self.weights)):\n torch.nn.init.xavier_normal_(self.weights[i].weight, torch.nn.init.calculate_gain('tanh'))\n self.weights[i].bias.data.fill_(0)\n output2 = self.weights[i].forward(output)\n mean = output2.mean(axis=0)\n self.weights[i].bias.data = -mean\n output = self.weights[i].forward(output)\n output = self.acts[i](output)\n # print(output.mean(axis=0), output.mean(axis=0).shape)\n torch.nn.init.xavier_normal_(self.outlayer.weight, torch.nn.init.calculate_gain('tanh'))\n self.outlayer.bias.data.fill_(0)\n # self.outlayer.bias.data[1].fill_(np.log(np.exp(1) - 1))\n # Noise can be tuned here...\n self.outlayer.bias.data[1] = -5", "def set_weights(self, weights):\n\n weight_index = 0\n for layer in self.NN:\n for node in layer:\n for i in range(len(node.weights)):\n #print(weight_index)\n try:\n node.weights[i] = weights[weight_index]\n except Exception as e:\n print(weight_index)\n print(len(weights))\n sys.exit()\n\n weight_index += 1", "def _initialize_weights(self):\n for n1, n2 in self.weights_graph.edges():\n data = self.weights_graph.edges[n1, n2]\n data[\"weight\"] = float(1)\n for _, data in self.weights_graph.nodes(data=True):\n data[\"usage\"] = 0\n\n for vs in self.terminals.values():\n self._increase_weights(vs)", "def _connected_weights(self, i):\n return [self.weights[j].W() if j < i else self.weights[j].W_T()\n for j in self.weight_connections[i]]", "def activation_level(I,w):\n \n A = []\n \n for index, _ in enumerate(I):\n # If it is not the first and not the last element in the list\n if index != 0 and index != len(I)-1:\n # Calculate the activation level\n a = I[index] - w*(I[index-1]+I[index+1])\n A.append(a)\n return A", "def testInactivatedNodesReallyUseless(self):\n\t\ti_nodes = self.som_hands.activation_response(self.som_hands.data)\n\t\tidx_inact = np.where(i_nodes.flatten()==0)[0]\n\t\t_, w = self.som_hands.get_weights()\n\t\t\n\t\tact = np.setdiff1d(np.arange(w.shape[0]), idx_inact)\t\t\n\t\ti_w = w[idx_inact, :]\n\t\ta_w = w[act, :]\n\n\t\tfor i, dp in enumerate(self.som_hands.data):\n\t\t\ts, _ = get_similar_vector(w, dp)\n\t\t\t\n\t\t\t# make sure the closest vector is not in i_w\n\t\t\t# too many for loops here.. ugly!\n\t\t\t_, qi, _ = get_similar_vector(i_w, s[0])\n\t\t\t_, qa, _ = get_similar_vector(a_w, s[0])\n\t\t\tself.assertGreater(qi[0], qa[0])", "def compute_weights(self) -> list:\n weights = []\n for num in self.population:\n # Our purpose: find x with fitness value near 0 as much as possible\n # So if abs(x) is large, negative of it (weight) will be small\n weights.append(0 - abs(self.equation(num+self.offset))) # abs to find x near 0\n return weights", "def inputs_weights_init(self):\n input_user, input_item, input_rating = self.inputs_init()\n user_embeddings, item_embeddings = self.embeddings_layers_init()\n\n return input_user, input_item, input_rating, user_embeddings, item_embeddings", "def _compute_interpolation_weights(self,x):\n\n sz = x.size()\n dim = sz[1]\n\n index = MyLongTensor(*([self.n+1]+list(x.size())))\n weight = MyTensor(*([self.n+1]+list(x.size()))).zero_()\n\n # compute the interpolation indexes\n # todo: can likely be simplified (without loop over dimension)\n if self.n%2==0: # even\n for d in range(dim):\n i = (torch.floor(x[:,d,...].data + 0.5) - self.n//2)\n for k in range(0,self.n+1):\n index[k,:,d,...] = i+k\n else:\n for d in range(dim):\n i = (torch.floor(x[:,d,...].data)-self.n//2)\n for k in range(0,self.n+1):\n index[k,:,d,...] = i+k\n\n # compute the weights\n if self.n==2:\n w = x - index[1,...].float()\n weight[1,...] = 3.0 / 4.0 - w * w\n weight[2,...] = (1.0 / 2.0) * (w - weight[1,...] + 1.0)\n weight[0,...] = 1.0 - weight[1,...] - weight[2,...]\n elif self.n==3:\n w = x - index[1,...].float()\n weight[3,...] = (1.0 / 6.0) * w * w * w\n weight[0,...] = (1.0 / 6.0) + (1.0 / 2.0) * w * (w - 1.0) - weight[3,...]\n weight[2,...] = w + weight[0,...] - 2.0 * weight[3,...]\n weight[1,...] = 1.0 - weight[0,...] - weight[2,...] - weight[3,...]\n elif self.n==4:\n w = x - index[2].float()\n w2 = w * w\n t = (1.0 / 6.0) * w2\n weight[0] = 1.0 / 2.0 - w\n weight[0] *= weight[0]\n weight[0] *= (1.0 / 24.0) * weight[0]\n t0 = w * (t - 11.0 / 24.0)\n t1 = 19.0 / 96.0 + w2 * (1.0 / 4.0 - t)\n weight[1] = t1 + t0\n weight[3] = t1 - t0\n weight[4] = weight[0] + t0 + (1.0 / 2.0) * w\n weight[2] = 1.0 - weight[0] - weight[1] - weight[3] - weight[4]\n elif self.n==5:\n w = x - index[2].float()\n w2 = w * w\n weight[5] = (1.0 / 120.0) * w * w2 * w2\n w2 -= w\n w4 = w2 * w2\n w -= 1.0 / 2.0\n t = w2 * (w2 - 3.0)\n weight[0] = (1.0 / 24.0) * (1.0 / 5.0 + w2 + w4) - weight[5]\n t0 = (1.0 / 24.0) * (w2 * (w2 - 5.0) + 46.0 / 5.0)\n t1 = (-1.0 / 12.0) * w * (t + 4.0)\n weight[2] = t0 + t1\n weight[3] = t0 - t1\n t0 = (1.0 / 16.0) * (9.0 / 5.0 - t)\n t1 = (1.0 / 24.0) * w * (w4 - w2 - 5.0)\n weight[1] = t0 + t1\n weight[4] = t0 - t1\n elif self.n==6:\n w = x - index[3].float()\n weight[0] = 1.0 / 2.0 - w\n weight[0] *= weight[0] * weight[0]\n weight[0] *= weight[0] / 720.0\n weight[1] = (361.0 / 192.0 - w * (59.0 / 8.0 + w\n * (-185.0 / 16.0 + w * (25.0 / 3.0 + w * (-5.0 / 2.0 + w)\n * (1.0 / 2.0 + w))))) / 120.0\n weight[2] = (10543.0 / 960.0 + w * (-289.0 / 16.0 + w\n * (79.0 / 16.0 + w * (43.0 / 6.0 + w * (-17.0 / 4.0 + w\n * (-1.0 + w)))))) / 48.0\n w2 = w * w\n weight[3] = (5887.0 / 320.0 - w2 * (231.0 / 16.0 - w2\n * (21.0 / 4.0 - w2))) / 36.0\n weight[4] = (10543.0 / 960.0 + w * (289.0 / 16.0 + w\n * (79.0 / 16.0 + w * (-43.0 / 6.0 + w * (-17.0 / 4.0 + w\n * (1.0 + w)))))) / 48.0\n weight[6] = 1.0 / 2.0 + w\n weight[6] *= weight[6] * weight[6]\n weight[6] *= weight[6] / 720.0\n weight[5] = 1.0 - weight[0] - weight[1] - weight[2] - weight[3] - weight[4] - weight[6]\n elif self.n==7:\n w = x - index[3].float()\n weight[0] = 1.0 - w\n weight[0] *= weight[0]\n weight[0] *= weight[0] * weight[0]\n weight[0] *= (1.0 - w) / 5040.0\n w2 = w * w\n weight[1] = (120.0 / 7.0 + w * (-56.0 + w * (72.0 + w\n * (-40.0 + w2 * (12.0 + w * (-6.0 + w)))))) / 720.0\n weight[2] = (397.0 / 7.0 - w * (245.0 / 3.0 + w * (-15.0 + w\n * (-95.0 / 3.0 + w * (15.0 + w * (5.0 + w\n * (-5.0 + w))))))) / 240.0\n weight[3] = (2416.0 / 35.0 + w2 * (-48.0 + w2 * (16.0 + w2\n * (-4.0 + w)))) / 144.0\n weight[4] = (1191.0 / 35.0 - w * (-49.0 + w * (-9.0 + w\n * (19.0 + w * (-3.0 + w) * (-3.0 + w2))))) / 144.0\n weight[5] = (40.0 / 7.0 + w * (56.0 / 3.0 + w * (24.0 + w\n * (40.0 / 3.0 + w2 * (-4.0 + w * (-2.0 + w)))))) / 240.0\n weight[7] = w2\n weight[7] *= weight[7] * weight[7]\n weight[7] *= w / 5040.0\n weight[6] = 1.0 - weight[0] - weight[1] - weight[2] - weight[3] - weight[4] - weight[5] - weight[7]\n elif self.n==8:\n w = x - index[4].float()\n weight[0] = 1.0 / 2.0 - w\n weight[0] *= weight[0]\n weight[0] *= weight[0]\n weight[0] *= weight[0] / 40320.0\n w2 = w * w\n weight[1] = (39.0 / 16.0 - w * (6.0 + w * (-9.0 / 2.0 + w2)))\\\n *(21.0 / 16.0 + w * (-15.0 / 4.0 + w * (9.0 / 2.0 + w\n * (-3.0 + w)))) / 5040.0;\n weight[2] = (82903.0 / 1792.0 + w * (-4177.0 / 32.0 + w\n * (2275.0 / 16.0 + w * (-487.0 / 8.0 + w * (-85.0 / 8.0 + w\n * (41.0 / 2.0 + w * (\n -5.0 + w * (-2.0 + w)))))))) / 1440.0\n weight[3] = (310661.0 / 1792.0 - w * (14219.0 / 64.0 + w\n * (-199.0 / 8.0 + w * (-1327.0 / 16.0 + w * (245.0 / 8.0 + w\n * (53.0 / 4.0 + w * (\n -8.0 + w * (-1.0 + w)))))))) / 720.0\n weight[4] = (2337507.0 / 8960.0 + w2 * (-2601.0 / 16.0 + w2\n * (387.0 / 8.0 + w2 * (-9.0 + w2)))) / 576.0\n weight[5] = (310661.0 / 1792.0 - w * (-14219.0 / 64.0 + w\n * (-199.0 / 8.0 + w * (1327.0 / 16.0 + w * (245.0 / 8.0 + w\n * (-53.0 / 4.0 + w * (\n -8.0 + w * (1.0 + w)))))))) / 720.0\n weight[7] = (39.0 / 16.0 - w * (-6.0 + w * (-9.0 / 2.0 + w2)))*(21.0 / 16.0 + w * (15.0 / 4.0 + w * (9.0 / 2.0 + w\n * (3.0 + w)))) / 5040.0\n weight[8] = 1.0 / 2.0 + w\n weight[8] *= weight[8]\n weight[8] *= weight[8]\n weight[8] *= weight[8] / 40320.0\n weight[6] = 1.0 - weight[0] - weight[1] - weight[2] - weight[3] - weight[4] - weight[5] - weight[7] - weight[8]\n elif self.n==9:\n w = x - index[4].float()\n weight[0] = 1.0 - w\n weight[0] *= weight[0]\n weight[0] *= weight[0]\n weight[0] *= weight[0] * (1.0 - w) / 362880.0\n weight[1] = (502.0 / 9.0 + w * (-246.0 + w * (472.0 + w\n * (-504.0 + w * (308.0 + w * (-84.0 + w * (-56.0 / 3.0 + w\n * (24.0 + w * (\n -8.0 + w))))))))) / 40320.0\n weight[2] = (3652.0 / 9.0 - w * (2023.0 / 2.0 + w * (-952.0 + w\n * (938.0 / 3.0 + w * (112.0 + w * (-119.0 + w * (56.0 / 3.0 + w\n * (14.0 + w * (\n -7.0 + w))))))))) / 10080.0\n weight[3] = (44117.0 / 42.0 + w * (-2427.0 / 2.0 + w * (66.0 + w\n * (434.0 + w * (-129.0 + w * (-69.0 + w * (34.0 + w * (6.0 + w\n * (-6.0 + w))))))))) / 4320.0\n w2 = w * w\n weight[4] = (78095.0 / 63.0 - w2 * (700.0 + w2 * (-190.0 + w2\n * (100.0 / 3.0 + w2 * (-5.0 + w))))) / 2880.0\n weight[5] = (44117.0 / 63.0 + w * (809.0 + w * (44.0 + w\n * (-868.0 / 3.0 + w * (-86.0 + w * (46.0 + w * (68.0 / 3.0 + w\n * (-4.0 + w * (\n -4.0 + w))))))))) / 2880.0\n weight[6] = (3652.0 / 21.0 - w * (-867.0 / 2.0 + w * (-408.0 + w\n * (-134.0 + w * (48.0 + w * (51.0 + w * (-4.0 + w) * (-1.0 + w)\n * (2.0 + w))))))) / 4320.0\n weight[7] = (251.0 / 18.0 + w * (123.0 / 2.0 + w * (118.0 + w\n * (126.0 + w * (77.0 + w * (21.0 + w * (-14.0 / 3.0 + w\n * (-6.0 + w * (\n -2.0 + w))))))))) / 10080.0\n weight[9] = w2 * w2\n weight[9] *= weight[9] * w / 362880.0\n weight[8] = 1.0 - weight[0] - weight[1] - weight[2] - weight[3]- weight[4] - weight[5] - weight[6] - weight[7] - weight[9]\n else:\n raise ValueError('Unsupported spline order')\n\n return index,weight", "def get_infected(self):\n return [n[0] for n in self.G.nodes.items() if n[1]['active']]", "def init_weights(self):\r\n self.weights = [0 for i in range(len(self.inputs[0][0]))]", "def weight_loss(self):\n weight_loss_hash = {}\n for key in self.graph.nodes():\n print key, type(key),self.candidates[40961]['weight_change'],self.canid\n weight_loss_hash[key] = float(self.candidates[key]['weight_change'])\n\n weight_loss_nodes = self._annotate_graph(weight_loss_hash, \"weight_change\")", "def update_weights(self) -> None:\n for neuron in self.__neurons__:\n neuron.update_weight(self.__inputs__)", "def get_weights(self):\n\n weights = []\n for layer in self.NN:\n for node in layer:\n for weight in node.weights:\n weights.append(weight)\n return weights", "def build_node_index(self):\n self.tile_wire_index_to_node_index = {}\n for node_idx, node in enumerate(self.device_resource_capnp.nodes):\n for wire_idx in node.wires:\n wire = self.device_resource_capnp.wires[wire_idx]\n key = wire.tile, wire.wire\n self.tile_wire_index_to_node_index[key] = node_idx", "def feed_forward(self,x_i):\n\n n = len(self.specs)\n for i in range(n):\n if i == 0:\n self[i] = x_i\n else:\n ips = self[i-1] + [1] #bias\n for neu in self.layers[i]: #can parallelize\n i_m = Matrix([[ip] for ip in ips])\n out = neu.params.T()*i_m\n neu.out = out.array[0][0]", "def fwd(self, x):\n\n # Check for consistency\n errstring = self.consist('som', x)\n if errstring != None:\n raise Exception(errstring)\n\n # Turn nodes into matrix of centres\n nodes = np.reshape(self.map, (self.nin, self.num_nodes), order='F').T\n # Compute squared distance matrix\n d2 = dist2(x, nodes)\n # Find winning node for each pattern: minimum value in each row\n win_nodes = np.argmin(d2, 1)\n w = np.min(d2, 1)\n return d2, win_nodes", "def _weight(root, tower):\n weight, leaves = tower[root]\n weights = []\n for leaf in leaves:\n w, _ = _weight(leaf, tower)\n weights.append(w)\n return weight + sum(weights), weights", "def get_weight_variables(self):\n var_names = [pv['name'] for pv in self.data['reference']]\n var_weight = {var: weight for var, weight in zip(var_names, self.data['feat_weights'])}\n return var_weight" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the updated A_hat_square_uv entries that would result from inserting/deleting the input edges, for every edge.
def compute_new_a_hat_uv(self, potential_edges): edges = np.transpose(self.modified_adj.nonzero()) edges_set = {tuple(e) for e in edges} A_hat_sq = self.adj_norm @ self.adj_norm values_before = A_hat_sq[self.target].toarray()[0] node_ixs = np.unique(edges[:, 0], return_index=True)[1].astype("int32") twohop_ixs = np.transpose(A_hat_sq.nonzero()) degrees = self.modified_adj.sum(0).A1 + 1 # Ignore warnings: # NumbaPendingDeprecationWarning: # Encountered the use of a type that is scheduled for deprecation: type 'reflected set' found for argument 'edges_set' of function 'compute_new_a_hat_uv'. # For more information visit http://numba.pydata.org/numba-doc/latest/reference/deprecation.html#deprecation-of-reflection-for-list-and-set-types with warnings.catch_warnings(record=True): warnings.filterwarnings( 'ignore', '.*Encountered the use of a type that is scheduled for deprecation*' ) ixs, vals = compute_new_a_hat_uv(edges, node_ixs, edges_set, twohop_ixs, values_before, degrees, potential_edges, self.target) ixs_arr = np.array(ixs) a_hat_uv = sp.coo_matrix((vals, (ixs_arr[:, 0], ixs_arr[:, 1])), shape=[len(potential_edges), self.num_nodes]) return a_hat_uv
[ "def compute_new_a_hat_uv(edge_ixs, node_nb_ixs, edges_set, twohop_ixs,\r\n values_before, degs, potential_edges, u):\r\n num_nodes = degs.shape[0]\r\n\r\n twohop_u = twohop_ixs[twohop_ixs[:, 0] == u, 1]\r\n nbs_u = edge_ixs[edge_ixs[:, 0] == u, 1]\r\n nbs_u_set = set(nbs_u)\r\n\r\n return_ixs = []\r\n return_values = []\r\n\r\n for ix in range(len(potential_edges)):\r\n edge = potential_edges[ix]\r\n edge_set = set(edge)\r\n degs_new = degs.copy()\r\n delta = -2 * ((edge[0], edge[1]) in edges_set) + 1\r\n degs_new[edge] += delta\r\n\r\n nbs_edge0 = edge_ixs[edge_ixs[:, 0] == edge[0], 1]\r\n nbs_edge1 = edge_ixs[edge_ixs[:, 0] == edge[1], 1]\r\n\r\n affected_nodes = set(np.concatenate((twohop_u, nbs_edge0, nbs_edge1)))\r\n affected_nodes = affected_nodes.union(edge_set)\r\n a_um = edge[0] in nbs_u_set\r\n a_un = edge[1] in nbs_u_set\r\n\r\n a_un_after = connected_after(u, edge[0], a_un, delta)\r\n a_um_after = connected_after(u, edge[1], a_um, delta)\r\n\r\n for v in affected_nodes:\r\n a_uv_before = v in nbs_u_set\r\n a_uv_before_sl = a_uv_before or v == u\r\n\r\n if v in edge_set and u in edge_set and u != v:\r\n if delta == -1:\r\n a_uv_after = False\r\n else:\r\n a_uv_after = True\r\n else:\r\n a_uv_after = a_uv_before\r\n a_uv_after_sl = a_uv_after or v == u\r\n\r\n from_ix = node_nb_ixs[v]\r\n to_ix = node_nb_ixs[v + 1] if v < num_nodes - 1 else len(edge_ixs)\r\n node_nbs = edge_ixs[from_ix:to_ix, 1]\r\n node_nbs_set = set(node_nbs)\r\n a_vm_before = edge[0] in node_nbs_set\r\n\r\n a_vn_before = edge[1] in node_nbs_set\r\n a_vn_after = connected_after(v, edge[0], a_vn_before, delta)\r\n a_vm_after = connected_after(v, edge[1], a_vm_before, delta)\r\n\r\n mult_term = 1 / np.sqrt(degs_new[u] * degs_new[v])\r\n\r\n sum_term1 = np.sqrt(degs[u] * degs[v]) * values_before[v] - a_uv_before_sl / degs[u] - a_uv_before / \\\r\n degs[v]\r\n sum_term2 = a_uv_after / degs_new[v] + a_uv_after_sl / degs_new[u]\r\n sum_term3 = -((a_um and a_vm_before) / degs[edge[0]]) + (\r\n a_um_after and a_vm_after) / degs_new[edge[0]]\r\n sum_term4 = -((a_un and a_vn_before) / degs[edge[1]]) + (\r\n a_un_after and a_vn_after) / degs_new[edge[1]]\r\n new_val = mult_term * (sum_term1 + sum_term2 + sum_term3 +\r\n sum_term4)\r\n\r\n return_ixs.append((ix, v))\r\n return_values.append(new_val)\r\n\r\n return return_ixs, return_values", "def get_Hv():\n \n vn = np.zeros((nx,ny+1)) \n vs = np.zeros((nx,ny+1))\n ve = np.zeros((nx,ny+1))\n vw = np.zeros((nx,ny+1))\n ue = np.zeros((nx,ny+1))\n uw = np.zeros((nx,ny+1))\n τyyn = np.zeros((nx,ny+1))\n τyys = np.zeros((nx,ny+1))\n τyxe = np.zeros((nx,ny+1))\n τyxw = np.zeros((nx,ny+1))\n Hv = np.zeros((nx,ny+1))\n \n j = np.arange(1,ny) # v-cell centers in domain interior\n \n vn[:,j] = (v[:,j+1] + v[:,j])/2\n vs[:,j] = (v[:,j] + v[:,j-1])/2\n \n i = np.arange(0,nx-1)\n ve[IJ(i,j)] = (v[IJ(i+1,j)] + v[IJ(i,j)])/2\n ve[nx-1,j] = vbc_r\n i = np.arange(1,nx)\n vw[IJ(i,j)] = (v[IJ(i,j)] + v[IJ(i-1,j)])/2\n vw[0,j] = vbc_l\n \n i = np.arange(0,nx)\n ue[IJ(i,j)] = (u[IJ(i+1,j-1)] + u[IJ(i+1,j)])/2\n uw[IJ(i,j)] = (u[IJ(i,j-1)] + u[IJ(i,j)]) /2\n \n τyyn[:,j] = -2*ν*(v[:,j+1] - v[:,j]) /Δy\n τyys[:,j] = -2*ν*(v[:,j] - v[:,j-1])/Δy\n \n i = np.arange(0,nx-1)\n τyxe[IJ(i,j)] = -ν*(v[IJ(i+1,j)]-v[IJ(i,j)])/Δx - ν*(u[IJ(i+1,j)]-u[IJ(i+1,j-1)])/Δy\n τyxe[nx-1,j] = -ν*(vbc_r-v[nx-1,j])/(Δx/2) - ν*(u[nx,j]-u[nx,j-1])/Δy \n \n i = np.arange(1,nx)\n τyxw[IJ(i,j)] = -ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx - ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy\n τyxw[0,j] = -ν*(v[0,j]-vbc_l)/(Δx/2) - ν*(u[0,j]-u[0,j-1])/Δy\n \n Hv[:,j] = -((vn[:,j]*vn[:,j] - vs[:,j]*vs[:,j])/Δy + (ve[:,j]*ue[:,j] - vw[:,j]*uw[:,j])/Δx) \\\n -((τyyn[:,j] - τyys[:,j])/Δy + (τyxe[:,j] - τyxw[:,j])/Δx)\n \n return Hv", "def get_Hu():\n \n ue = np.zeros((nx+1,ny)) \n uw = np.zeros((nx+1,ny))\n un = np.zeros((nx+1,ny))\n us = np.zeros((nx+1,ny))\n vn = np.zeros((nx+1,ny))\n vs = np.zeros((nx+1,ny))\n τxxe = np.zeros((nx+1,ny))\n τxxw = np.zeros((nx+1,ny))\n τxyn = np.zeros((nx+1,ny))\n τxys = np.zeros((nx+1,ny))\n Hu = np.zeros((nx+1,ny))\n \n i = np.arange(1,nx) # u-cell centers in domain interior\n \n ue[i,:] = (u[i+1,:] + u[i,:])/2\n uw[i,:] = (u[i,:] + u[i-1,:])/2\n \n j = np.arange(0,ny-1)\n un[IJ(i,j)] = (u[IJ(i,j+1)] + u[IJ(i,j)])/2\n un[i,ny-1] = ubc_t\n j = np.arange(1,ny)\n us[IJ(i,j)] = (u[IJ(i,j)] + u[IJ(i,j-1)])/2\n us[i,0] = ubc_b\n \n j = np.arange(0,ny)\n vn[IJ(i,j)] = (v[IJ(i-1,j+1)]+v[IJ(i,j+1)])/2\n vs[IJ(i,j)] = (v[IJ(i-1,j)] +v[IJ(i,j)]) /2\n \n τxxe[i,:] = -2*ν*(u[i+1,:] - u[i,:]) /Δx\n τxxw[i,:] = -2*ν*(u[i,:] - u[i-1,:])/Δx\n \n j = np.arange(0,ny-1)\n τxyn[IJ(i,j)] = -ν*(u[IJ(i,j+1)]-u[IJ(i,j)])/Δy - ν*(v[IJ(i,j+1)]-v[IJ(i-1,j+1)])/Δx\n τxyn[i,ny-1] = -ν*(ubc_t-u[i,ny-1])/(Δy/2) - ν*(v[i,ny]-v[i-1,ny])/Δx \n \n j = np.arange(1,ny)\n τxys[IJ(i,j)] = -ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy - ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx\n τxys[i,0] = -ν*(u[i,0]-ubc_b)/(Δy/2) - ν*(v[i,0]-v[i-1,0])/Δx\n \n Hu[i,:] = -((ue[i,:]*ue[i,:] - uw[i,:]*uw[i,:])/Δx + (un[i,:]*vn[i,:] - us[i,:]*vs[i,:])/Δy) \\\n -((τxxe[i,:] - τxxw[i,:])/Δx + (τxyn[i,:] - τxys[i,:])/Δy)\n \n return Hu", "def update_feature_vectors(self, h, edge_index, messages):\n\n row, col = edge_index\n #message_aggregate = unsorted_segment_sum(messages, row, num_segments = h.size(0), device = self.device)\n message_aggregate = unsorted_segment_sum(messages, row, num_segments = h.size(1), device = self.device)\n feature_inputs = torch.cat([h, message_aggregate], dim = 2)\n #feature_inputs = torch.cat([h, message_aggregate], dim = 1)\n feature_inputs = feature_inputs.to(self.device)\n out = self.feature_mlp(feature_inputs)\n out = out.to(self.device)\n return out, message_aggregate", "def add_edge_table(self, etab):\n add = ([],[]) # list of edges and h-edges to add\n remove = [] # list of edges to remove\n for (v1,v2),(n1,n2) in etab.items():\n conn_type = self.edge_type(self.edge(v1,v2))\n if conn_type == 1: n1 += 1 #and add to the relevant edge count\n elif conn_type == 2: n2 += 1\n \n t1 = self.type(v1)\n t2 = self.type(v2)\n if (t1 == 1 and t2 == 1) or (t1 == 2 and t2 == 2): #types are ZX & equal,\n n1 = bool(n1) #so normal edges fuse\n pairs, n2 = divmod(n2,2)#while hadamard edges go modulo 2\n self.scalar.add_power(-2*pairs)\n if n1 != 0 and n2 != 0: #reduction rule for when both edges appear\n new_type = 1\n self.add_to_phase(v1, 1)\n self.scalar.add_power(-1)\n elif n1 != 0: new_type = 1\n elif n2 != 0: new_type = 2\n else: new_type = 0\n elif (t1 == 1 and t2 == 2) or (t1 == 2 and t2 == 1): #types are ZX & different\n pairs, n1 = divmod(n1,2)#so normal edges go modulo 2\n n2 = bool(n2) #while hadamard edges fuse\n self.scalar.add_power(-2*pairs)\n if n1 != 0 and n2 != 0: #reduction rule for when both edges appear\n new_type = 2\n self.add_to_phase(v1, 1)\n self.scalar.add_power(-1)\n elif n1 != 0: new_type = 1\n elif n2 != 0: new_type = 2\n else: new_type = 0\n elif (t1 == 1 and t2 == 3) or (t1 == 3 and t2 == 1): # Z & H-box\n n1 = bool(n1)\n if n1 + n2 > 1:\n raise ValueError(\"Unhandled parallel edges between nodes of type (%s,%s)\" % (t1,t2))\n else:\n if n1 == 1: new_type = 1\n elif n2 == 1: new_type = 2\n else: new_type = 0\n else:\n if n1 + n2 > 1:\n raise ValueError(\"Unhandled parallel edges between nodes of type (%s,%s)\" % (t1,t2))\n else:\n if n1 == 1: new_type = 1\n elif n2 == 1: new_type = 2\n else: new_type = 0\n\n\n if new_type != 0: # They should be connected, so update the graph\n if conn_type == 0: #new edge added\n add[new_type-1].append((v1,v2))\n elif conn_type != new_type: #type of edge has changed\n self.set_edge_type(self.edge(v1,v2), new_type)\n elif conn_type != 0: #They were connected, but not anymore, so update the graph\n remove.append(self.edge(v1,v2))\n\n self.remove_edges(remove)\n self.add_edges(add[0],1)\n self.add_edges(add[1],2)", "def reconstruct_hankel(u, s, vh):\n return np.dot(u * s, vh)", "def _update_ph(self, pop):\n for sol, fit in pop[:self._num_ants_ph]:\n ph = 1.0 / (1 + fit)\n for s in range(self._sol_length):\n self._pheromones[s][sol[s]] += ph", "def update_H(self):\n X_div = self.update_data_matrix()\n normalized_rows = normalize(self.W.T, norm = 'l1', axis = 1)\n matrix_mul = np.matmul(normalized_rows, X_div)\n self.H = np.multiply(self.H, matrix_mul)", "def _update_hyperplanes(self):\n self.hyperplanes = np.empty((self.triangulation.nsimplex,\n self.input_dim, self.input_dim),\n dtype=config.np_dtype)\n\n # Use that the bottom-left rectangle has the index zero, so that the\n # index numbers of scipy correspond to ours.\n for i, simplex in enumerate(self.unit_simplices):\n simplex_points = self.discretization.index_to_state(simplex)\n self.hyperplanes[i] = np.linalg.inv(simplex_points[1:] -\n simplex_points[:1])", "def updateFromAdjacentSquare(self, square):\r\n if square.mined():\r\n self.numberOfAdjacentMines += 1", "def update_euclidean(self, U, V):\n X = self.X\n W = self.W\n lmbda = self.lmbda\n L = self.L\n D = self.D\n # update V\n V = V * np.divide(U.T @ X + lmbda * (V @ W), U.T @ U @ V + lmbda * (V @ D))\n # update U\n U = U * np.divide(X @ V.T, U @ V @ V.T)\n # calc objective func\n R = X - (U @ V)\n obj_val = np.sum(R * R) + lmbda * np.trace(V @ L @ V.T)\n return(U, V, obj_val)", "def update_estimation(self):\n self.X_hat = np.matmul(np.transpose(self.U), self.V)\n if self.consider_explicit:\n for i in range(self.n):\n for j in range(self.n):\n self.X_hat[i][j] += np.dot(self.G[i][j], self.theta)", "def upwind_edge(self) :\n\n self.x_down= np.zeros((2,4,4))\n self.x_up = np.zeros((2,4,4))\n self.y_down = np.zeros((2,4,4))\n self.y_up = np.zeros((2,4,4))\n\n x_down_i = np.array([[2,3],[0,1]])\n x_up_i = np.array([[0,1],[2,3]])\n x_down_j = np.array([[2,3],[0,1]])\n x_up_j = np.array([[2,3],[0,1]])\n\n y_down_i = np.array([[1,3],[0,2]])\n y_up_i = np.array([[0,2],[1,3]])\n y_down_j = np.array([[1,3],[0,2]])\n y_up_j = np.array([[1,3],[0,2]])\n\n for k in xrange(0,2) :\n for i in xrange(0,2) :\n for j in xrange(0,2) :\n self.x_down[k,x_down_i[k,i],x_down_j[k,j]] = (-1)**k *\\\n self.fe.vertical_edge_mass_matrix[i,j]\n self.x_up[k,x_up_i[k,i],x_up_j[k,j]] = (-1)**(k+1) *\\\n self.fe.vertical_edge_mass_matrix[i,j]\n self.y_down[k,y_down_i[k,i],y_down_j[k,j]] = (-1)**k *\\\n self.fe.horizontal_edge_mass_matrix[i,j]\n self.y_up[k,y_up_i[k,i],y_up_j[k,j]] = (-1)**(k+1) *\\\n self.fe.horizontal_edge_mass_matrix[i,j]", "def updating_ghost_cells(self,uu):\n\n\n for i in range(1, self.Nx +1):\n uu[i,0] = uu[i,2]\n uu[i,self.Ny+1] = uu[i,self.Ny-1]\n\n\n for j in range(1, self.Ny +1):\n uu[0,j] = uu[2,j]\n uu[self.Nx+1,j] = uu[self.Nx-1,j]\n \"\"\"\n\n uu[1:-1,0] = uu[1:-1,2]\n uu[1:-1,self.Ny+1] = uu[1:-1,self.Ny-1]\n uu[0,1:-1] = uu[2,1:-1]\n uu[self.Nx+1,1:-1] = uu[self.Nx-1,1:-1]\n \"\"\"", "def _update_weighted_matrix(self) -> None:\n self.weighted_map = deepcopy(self.map)\n for connection in self.weighted_map:\n connections = self.weighted_map[connection]\n connections_count = sum(list(connections.values()))\n for key in self.weighted_map[connection]:\n self.weighted_map[connection][key] /= connections_count", "def normalise(self):\n if not self.inputs:\n self.auto_detect_inputs()\n max_r = self.depth() - 1\n if max_r <= 2: \n for o in self.outputs:\n self.set_row(o,4)\n max_r = self.depth() -1\n claimed = []\n for q,i in enumerate(sorted(self.inputs, key=self.qubit)):\n self.set_row(i,0)\n self.set_qubit(i,q)\n #q = self.qubit(i)\n n = list(self.neighbours(i))[0]\n if self.type(n) in (1,2):\n claimed.append(n)\n self.set_row(n,1)\n self.set_qubit(n, q)\n else: #directly connected to output\n e = self.edge(i, n)\n t = self.edge_type(e)\n self.remove_edge(e)\n v = self.add_vertex(1,q,1)\n self.add_edge((i,v),3-t)\n self.add_edge((v,n), 2)\n claimed.append(v)\n for q, o in enumerate(sorted(self.outputs,key=self.qubit)):\n #q = self.qubit(o)\n self.set_row(o,max_r+1)\n self.set_qubit(o,q)\n n = list(self.neighbours(o))[0]\n if n not in claimed:\n self.set_row(n,max_r)\n self.set_qubit(n, q)\n else:\n e = self.edge(o, n)\n t = self.edge_type(e)\n self.remove_edge(e)\n v = self.add_vertex(1,q,max_r)\n self.add_edge((o,v),3-t)\n self.add_edge((v,n), 2)\n\n self.pack_circuit_rows()", "def _dense_convolve_multi_uv(z_hat, uv):\n u, v = uv\n n_channels, = u.shape[1:]\n n_atoms, *valid_support = z_hat.shape\n n_atoms, *atom_support = v.shape\n\n Xi = np.zeros((n_channels, *get_full_support(valid_support, atom_support)))\n\n for zik, uk, vk in zip(z_hat, u, v):\n zik_vk = signal.fftconvolve(zik, vk)\n # Add a new dimension for each dimension in atom_support to uk\n uk = uk.reshape(*uk.shape, *(1,) * len(atom_support))\n Xi += zik_vk[None, :] * uk\n\n return Xi", "def normalize(self):\r\n next_idx = 3\r\n for x in range(self.h):\r\n for y in range(self.w):\r\n if self.board[x][y] == next_idx:\r\n next_idx += 1\r\n elif self.board[x][y] > next_idx:\r\n self.__swap_idx(next_idx, self.board[x][y])\r\n next_idx += 1", "def hessian_analytic(\n self,\n model\n ) -> tf.Tensor:\n\n def _aa_byobs_batched(model):\n \"\"\"\n Compute the mean model diagonal block of the\n closed form hessian of base_glm_all model by observation across features\n for a batch of observations.\n \"\"\"\n W = self._weight_hessian_aa( # [observations x features]\n X=model.X,\n loc=model.model_loc,\n scale=model.model_scale,\n )\n # The computation of the hessian block requires two outer products between\n # feature-wise constants and the coefficient wise design matrix entries, for each observation.\n # The resulting tensor is observations x features x coefficients x coefficients which\n # is too large too store in memory in most cases. However, the full 4D tensor is never\n # actually needed but only its marginal across features, the final hessian block shape.\n # Here, we use the einsum to efficiently perform the two outer products and the marginalisation.\n if self.constraints_loc is not None:\n XH = tf.matmul(model.design_loc, model.constraints_loc)\n else:\n XH = model.design_loc\n\n Hblock = tf.einsum('ofc,od->fcd',\n tf.einsum('of,oc->ofc', W, XH),\n XH)\n return Hblock\n\n def _bb_byobs_batched(model):\n \"\"\"\n Compute the dispersion model diagonal block of the\n closed form hessian of base_glm_all model by observation across features.\n \"\"\"\n W = self._weight_hessian_bb( # [observations=1 x features]\n X=model.X,\n loc=model.model_loc,\n scale=model.model_scale,\n )\n # The computation of the hessian block requires two outer products between\n # feature-wise constants and the coefficient wise design matrix entries, for each observation.\n # The resulting tensor is observations x features x coefficients x coefficients which\n # is too large too store in memory in most cases. However, the full 4D tensor is never\n # actually needed but only its marginal across features, the final hessian block shape.\n # Here, we use the Einstein summation to efficiently perform the two outer products and the marginalisation.\n if self.constraints_scale is not None:\n XH = tf.matmul(model.design_scale, model.constraints_scale)\n else:\n XH = model.design_scale\n\n Hblock = tf.einsum('ofc,od->fcd',\n tf.einsum('of,oc->ofc', W, XH),\n XH)\n return Hblock\n\n def _ab_byobs_batched(model):\n \"\"\"\n Compute the mean-dispersion model off-diagonal block of the\n closed form hessian of base_glm_all model by observastion across features.\n\n Note that there are two blocks of the same size which can\n be compute from each other with a transpose operation as\n the hessian is symmetric.\n \"\"\"\n W = self._weight_hessian_ab( # [observations=1 x features]\n X=model.X,\n loc=model.model_loc,\n scale=model.model_scale,\n )\n # The computation of the hessian block requires two outer products between\n # feature-wise constants and the coefficient wise design matrix entries, for each observation.\n # The resulting tensor is observations x features x coefficients x coefficients which\n # is too large too store in memory in most cases. However, the full 4D tensor is never\n # actually needed but only its marginal across features, the final hessian block shape.\n # Here, we use the Einstein summation to efficiently perform the two outer products and the marginalisation.\n if self.constraints_loc is not None:\n XHloc = tf.matmul(model.design_loc, model.constraints_loc)\n else:\n XHloc = model.design_loc\n\n if self.constraints_scale is not None:\n XHscale = tf.matmul(model.design_scale, model.constraints_scale)\n else:\n XHscale = model.design_scale\n\n Hblock = tf.einsum('ofc,od->fcd',\n tf.einsum('of,oc->ofc', W, XHloc),\n XHscale)\n return Hblock\n\n if self.compute_a and self.compute_b:\n H_aa = _aa_byobs_batched(model=model)\n H_bb = _bb_byobs_batched(model=model)\n H_ab = _ab_byobs_batched(model=model)\n H_ba = tf.transpose(H_ab, perm=[0, 2, 1])\n H = tf.concat(\n [tf.concat([H_aa, H_ab], axis=2),\n tf.concat([H_ba, H_bb], axis=2)],\n axis=1\n )\n elif self.compute_a and not self.compute_b:\n H = _aa_byobs_batched(model=model)\n elif not self.compute_a and self.compute_b:\n H = _bb_byobs_batched(model=model)\n else:\n H = tf.zeros((), dtype=self.dtype)\n\n return H" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the new values [A_hat_square]_u for every potential edge, where u is the target node. C.f. Theorem 5.1 equation 17.
def compute_new_a_hat_uv(edge_ixs, node_nb_ixs, edges_set, twohop_ixs, values_before, degs, potential_edges, u): num_nodes = degs.shape[0] twohop_u = twohop_ixs[twohop_ixs[:, 0] == u, 1] nbs_u = edge_ixs[edge_ixs[:, 0] == u, 1] nbs_u_set = set(nbs_u) return_ixs = [] return_values = [] for ix in range(len(potential_edges)): edge = potential_edges[ix] edge_set = set(edge) degs_new = degs.copy() delta = -2 * ((edge[0], edge[1]) in edges_set) + 1 degs_new[edge] += delta nbs_edge0 = edge_ixs[edge_ixs[:, 0] == edge[0], 1] nbs_edge1 = edge_ixs[edge_ixs[:, 0] == edge[1], 1] affected_nodes = set(np.concatenate((twohop_u, nbs_edge0, nbs_edge1))) affected_nodes = affected_nodes.union(edge_set) a_um = edge[0] in nbs_u_set a_un = edge[1] in nbs_u_set a_un_after = connected_after(u, edge[0], a_un, delta) a_um_after = connected_after(u, edge[1], a_um, delta) for v in affected_nodes: a_uv_before = v in nbs_u_set a_uv_before_sl = a_uv_before or v == u if v in edge_set and u in edge_set and u != v: if delta == -1: a_uv_after = False else: a_uv_after = True else: a_uv_after = a_uv_before a_uv_after_sl = a_uv_after or v == u from_ix = node_nb_ixs[v] to_ix = node_nb_ixs[v + 1] if v < num_nodes - 1 else len(edge_ixs) node_nbs = edge_ixs[from_ix:to_ix, 1] node_nbs_set = set(node_nbs) a_vm_before = edge[0] in node_nbs_set a_vn_before = edge[1] in node_nbs_set a_vn_after = connected_after(v, edge[0], a_vn_before, delta) a_vm_after = connected_after(v, edge[1], a_vm_before, delta) mult_term = 1 / np.sqrt(degs_new[u] * degs_new[v]) sum_term1 = np.sqrt(degs[u] * degs[v]) * values_before[v] - a_uv_before_sl / degs[u] - a_uv_before / \ degs[v] sum_term2 = a_uv_after / degs_new[v] + a_uv_after_sl / degs_new[u] sum_term3 = -((a_um and a_vm_before) / degs[edge[0]]) + ( a_um_after and a_vm_after) / degs_new[edge[0]] sum_term4 = -((a_un and a_vn_before) / degs[edge[1]]) + ( a_un_after and a_vn_after) / degs_new[edge[1]] new_val = mult_term * (sum_term1 + sum_term2 + sum_term3 + sum_term4) return_ixs.append((ix, v)) return_values.append(new_val) return return_ixs, return_values
[ "def compute_new_a_hat_uv(self, potential_edges):\r\n\r\n edges = np.transpose(self.modified_adj.nonzero())\r\n edges_set = {tuple(e) for e in edges}\r\n A_hat_sq = self.adj_norm @ self.adj_norm\r\n values_before = A_hat_sq[self.target].toarray()[0]\r\n node_ixs = np.unique(edges[:, 0], return_index=True)[1].astype(\"int32\")\r\n twohop_ixs = np.transpose(A_hat_sq.nonzero())\r\n degrees = self.modified_adj.sum(0).A1 + 1\r\n\r\n # Ignore warnings:\r\n # NumbaPendingDeprecationWarning:\r\n # Encountered the use of a type that is scheduled for deprecation: type 'reflected set' found for argument 'edges_set' of function 'compute_new_a_hat_uv'.\r\n\r\n # For more information visit http://numba.pydata.org/numba-doc/latest/reference/deprecation.html#deprecation-of-reflection-for-list-and-set-types\r\n with warnings.catch_warnings(record=True):\r\n warnings.filterwarnings(\r\n 'ignore',\r\n '.*Encountered the use of a type that is scheduled for deprecation*'\r\n )\r\n ixs, vals = compute_new_a_hat_uv(edges, node_ixs, edges_set,\r\n twohop_ixs, values_before,\r\n degrees, potential_edges,\r\n self.target)\r\n ixs_arr = np.array(ixs)\r\n a_hat_uv = sp.coo_matrix((vals, (ixs_arr[:, 0], ixs_arr[:, 1])),\r\n shape=[len(potential_edges), self.num_nodes])\r\n\r\n return a_hat_uv", "def update_euclidean(self, U, V):\n X = self.X\n W = self.W\n lmbda = self.lmbda\n L = self.L\n D = self.D\n # update V\n V = V * np.divide(U.T @ X + lmbda * (V @ W), U.T @ U @ V + lmbda * (V @ D))\n # update U\n U = U * np.divide(X @ V.T, U @ V @ V.T)\n # calc objective func\n R = X - (U @ V)\n obj_val = np.sum(R * R) + lmbda * np.trace(V @ L @ V.T)\n return(U, V, obj_val)", "def get_Hu():\n \n ue = np.zeros((nx+1,ny)) \n uw = np.zeros((nx+1,ny))\n un = np.zeros((nx+1,ny))\n us = np.zeros((nx+1,ny))\n vn = np.zeros((nx+1,ny))\n vs = np.zeros((nx+1,ny))\n τxxe = np.zeros((nx+1,ny))\n τxxw = np.zeros((nx+1,ny))\n τxyn = np.zeros((nx+1,ny))\n τxys = np.zeros((nx+1,ny))\n Hu = np.zeros((nx+1,ny))\n \n i = np.arange(1,nx) # u-cell centers in domain interior\n \n ue[i,:] = (u[i+1,:] + u[i,:])/2\n uw[i,:] = (u[i,:] + u[i-1,:])/2\n \n j = np.arange(0,ny-1)\n un[IJ(i,j)] = (u[IJ(i,j+1)] + u[IJ(i,j)])/2\n un[i,ny-1] = ubc_t\n j = np.arange(1,ny)\n us[IJ(i,j)] = (u[IJ(i,j)] + u[IJ(i,j-1)])/2\n us[i,0] = ubc_b\n \n j = np.arange(0,ny)\n vn[IJ(i,j)] = (v[IJ(i-1,j+1)]+v[IJ(i,j+1)])/2\n vs[IJ(i,j)] = (v[IJ(i-1,j)] +v[IJ(i,j)]) /2\n \n τxxe[i,:] = -2*ν*(u[i+1,:] - u[i,:]) /Δx\n τxxw[i,:] = -2*ν*(u[i,:] - u[i-1,:])/Δx\n \n j = np.arange(0,ny-1)\n τxyn[IJ(i,j)] = -ν*(u[IJ(i,j+1)]-u[IJ(i,j)])/Δy - ν*(v[IJ(i,j+1)]-v[IJ(i-1,j+1)])/Δx\n τxyn[i,ny-1] = -ν*(ubc_t-u[i,ny-1])/(Δy/2) - ν*(v[i,ny]-v[i-1,ny])/Δx \n \n j = np.arange(1,ny)\n τxys[IJ(i,j)] = -ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy - ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx\n τxys[i,0] = -ν*(u[i,0]-ubc_b)/(Δy/2) - ν*(v[i,0]-v[i-1,0])/Δx\n \n Hu[i,:] = -((ue[i,:]*ue[i,:] - uw[i,:]*uw[i,:])/Δx + (un[i,:]*vn[i,:] - us[i,:]*vs[i,:])/Δy) \\\n -((τxxe[i,:] - τxxw[i,:])/Δx + (τxyn[i,:] - τxys[i,:])/Δy)\n \n return Hu", "def upwind_edge(self) :\n\n self.x_down= np.zeros((2,4,4))\n self.x_up = np.zeros((2,4,4))\n self.y_down = np.zeros((2,4,4))\n self.y_up = np.zeros((2,4,4))\n\n x_down_i = np.array([[2,3],[0,1]])\n x_up_i = np.array([[0,1],[2,3]])\n x_down_j = np.array([[2,3],[0,1]])\n x_up_j = np.array([[2,3],[0,1]])\n\n y_down_i = np.array([[1,3],[0,2]])\n y_up_i = np.array([[0,2],[1,3]])\n y_down_j = np.array([[1,3],[0,2]])\n y_up_j = np.array([[1,3],[0,2]])\n\n for k in xrange(0,2) :\n for i in xrange(0,2) :\n for j in xrange(0,2) :\n self.x_down[k,x_down_i[k,i],x_down_j[k,j]] = (-1)**k *\\\n self.fe.vertical_edge_mass_matrix[i,j]\n self.x_up[k,x_up_i[k,i],x_up_j[k,j]] = (-1)**(k+1) *\\\n self.fe.vertical_edge_mass_matrix[i,j]\n self.y_down[k,y_down_i[k,i],y_down_j[k,j]] = (-1)**k *\\\n self.fe.horizontal_edge_mass_matrix[i,j]\n self.y_up[k,y_up_i[k,i],y_up_j[k,j]] = (-1)**(k+1) *\\\n self.fe.horizontal_edge_mass_matrix[i,j]", "def shieldvalue(S, adj):\n\n\n eigvals, eigvecs = np.linalg.eig(adj)\n max_eig_ind = np.argmax(np.real(eigvals))\n\n lambd = np.real(eigvals[max_eig_ind])\n u = np.real(eigvecs[:, max_eig_ind])\n\n Sv_p = 0\n for i in S:\n Sv_p += 2*lambd*np.square(u[i])\n\n Sv_q = 0\n for i, j in itertools.combinations(S, 2):\n Sv_q += 2 * adj[i, j] * u[i] * u[j]\n\n return Sv_p - Sv_q", "def reconstruct_hankel(u, s, vh):\n return np.dot(u * s, vh)", "def updateFromAdjacentSquare(self, square):\r\n if square.mined():\r\n self.numberOfAdjacentMines += 1", "def __heuristic(self, game_map, square):\n if square.owner == 0 and square.strength > 0:\n return square.production / square.strength\n else:\n return sum(neighbor.strength\n for neighbor in game_map.neighbors(square)\n if neighbor.owner not in (0, self.bot_id))", "def acres_to_edge_of_square(acres):\n # sqft=acres*43560\n sqmtr=acres*4046.86\n return sqmtr", "def HamSN1D_Hamiltonian(t, u):\n x, y = u.T\n return 0.5*y*y + x**3/3 + 0.5*x*x", "def buildSquaresAndPeers():\n\t#A suduko board is numbered 1-9 and A-I\n\tcolumns = \"123456789\"\n\trows = \"ABCDEFGHI\"\n\n\t#List of all labeled \"squares\": 'A1', 'A2', ... ,'I9'\n\tsquares = cross(rows, columns)\n\n\t#List of \"units\", where a unit is a (column, row, box) that requires all \n\t# unique assignments to be avoid conflict.\n\tunitlist = ([cross(rows, c) for c in columns] +\n\t\t\t\t[cross(r, columns) for r in rows] +\n\t\t\t\t[cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')])\n\n\t#Dictionary to hold all units that a particular square lives in\n\tunits = dict((s, [u for u in unitlist if s in u]) for s in squares)\n\t\n\t#Dictionary maps squares to their respective peers\n\tpeers = dict((s, set(sum(units[s],[]))-set([s])) for s in squares)\n\n\treturn squares, peers", "def update_nodes(self):\n\n # get current level and problem description\n L = self.level\n P = L.prob\n\n # only if the level has been touched before\n assert L.status.unlocked\n\n # get number of collocation nodes for easier access\n M = self.coll.num_nodes\n\n # initialize integral terms with zeros, will add stuff later\n integral = [P.dtype_u(P.init, val=0.0) for l in range(M)]\n\n # gather all terms which are known already (e.g. from the previous iteration)\n # this corresponds to SF(u^k) - SdF(u^k) + tau (note: have integrals in pos and vel!)\n for m in range(M):\n for j in range(M + 1):\n # build RHS from f-terms (containing the E field) and the B field\n f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1])\n # add SQF(u^k) - SxF(u^k) for the position\n integral[m].pos += L.dt * (L.dt * (self.SQ[m + 1, j] - self.Sx[m + 1, j]) * f)\n # add SF(u^k) - STF(u^k) for the velocity\n integral[m].vel += L.dt * (self.S[m + 1, j] - self.ST[m + 1, j]) * f\n # add tau if associated\n if L.tau[m] is not None:\n integral[m] += L.tau[m]\n # tau is 0-to-node, need to change it to node-to-node here\n if m > 0:\n integral[m] -= L.tau[m - 1]\n\n # do the sweep\n for m in range(0, M):\n # build rhs, consisting of the known values from above and new values from previous nodes (at k+1)\n tmp = P.dtype_u(integral[m])\n for j in range(m + 1):\n # build RHS from f-terms (containing the E field) and the B field\n f = P.build_f(L.f[j], L.u[j], L.time + L.dt * self.coll.nodes[j - 1])\n # add SxF(u^{k+1})\n tmp.pos += L.dt * (L.dt * self.Sx[m + 1, j] * f)\n # add pos at previous node + dt*v0\n tmp.pos += L.u[m].pos + L.dt * self.coll.delta_m[m] * L.u[0].vel\n # set new position, is explicit\n L.u[m + 1].pos = tmp.pos\n\n # get E field with new positions and compute mean\n L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])\n\n ck = tmp.vel\n\n # do the boris scheme\n L.u[m + 1].vel = P.boris_solver(ck, L.dt * self.coll.delta_m[m], L.f[m], L.f[m + 1], L.u[m])\n\n # indicate presence of new values at this level\n L.status.updated = True\n\n return None", "def test_square_switch(self):\n self.problem = [[9, 0, 6, 0, 7, 0, 4, 0, 3],\n [0, 0, 0, 4, 0, 0, 2, 0, 0],\n [0, 7, 0, 0, 2, 3, 0, 1, 0],\n [5, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 4, 0, 2, 0, 8, 0, 6, 0],\n [0, 0, 3, 0, 0, 0, 0, 0, 5],\n [0, 3, 0, 7, 0, 0, 0, 5, 0],\n [0, 0, 7, 0, 0, 5, 0, 0, 0],\n [4, 0, 5, 0, 1, 0, 7, 0, 8]]\n\n self.solutions = [\n [[9, 2, 6, 5, 7, 1, 4, 8, 3],\n [3, 5, 1, 4, 8, 6, 2, 7, 9],\n [8, 7, 4, 9, 2, 3, 5, 1, 6],\n [5, 8, 2, 3, 6, 7, 1, 9, 4],\n [1, 4, 9, 2, 5, 8, 3, 6, 7],\n [7, 6, 3, 1, 9, 4, 8, 2, 5], # 9, 4\n [2, 3, 8, 7, 4, 9, 6, 5, 1], # 4, 9\n [6, 1, 7, 8, 3, 5, 9, 4, 2],\n [4, 9, 5, 6, 1, 2, 7, 3, 8]],\n\n [[9, 2, 6, 5, 7, 1, 4, 8, 3],\n [3, 5, 1, 4, 8, 6, 2, 7, 9],\n [8, 7, 4, 9, 2, 3, 5, 1, 6],\n [5, 8, 2, 3, 6, 7, 1, 9, 4],\n [1, 4, 9, 2, 5, 8, 3, 6, 7],\n [7, 6, 3, 1, 4, 9, 8, 2, 5], # 4, 9\n [2, 3, 8, 7, 9, 4, 6, 5, 1], # 9, 4\n [6, 1, 7, 8, 3, 5, 9, 4, 2],\n [4, 9, 5, 6, 1, 2, 7, 3, 8]]\n ]", "def get_attacker_nodes(self, n=5, add_additional_nodes=False):\r\n\r\n assert n < self.num_nodes - 1, \"number of influencers cannot be >= number of nodes in the graph!\"\r\n\r\n # neighbors = self.modified_adj[self.target].nonzero()[1]\r\n neighbors = self.modified_adj[self.target].indices\r\n # assert self.target not in neighbors\r\n\r\n potential_edges = np.column_stack((np.tile(self.target, len(neighbors)), neighbors)).astype(\"int32\")\r\n # The new A_hat_square_uv values that we would get if we removed the edge from u to each of the neighbors, respectively\r\n a_hat_uv = self.compute_new_a_hat_uv(potential_edges)\r\n\r\n XW = self.compute_XW()\r\n\r\n # compute the struct scores for all neighbors\r\n struct_scores = self.struct_score(a_hat_uv, XW)\r\n if len(neighbors) >= n: # do we have enough neighbors for the number of desired influencers?\r\n influence_nodes = neighbors[np.argsort(struct_scores)[:n]]\r\n if add_additional_nodes:\r\n return influence_nodes, np.array([])\r\n return influence_nodes\r\n else:\r\n influence_nodes = neighbors\r\n if add_additional_nodes: # Add additional influencers by connecting them to u first.\r\n # Compute the set of possible additional influencers, i.e. all nodes except the ones\r\n # that are already connected to u.\r\n poss_add_infl = np.setdiff1d(np.setdiff1d(np.arange(self.num_nodes), neighbors), self.target)\r\n n_possible_additional = len(poss_add_infl)\r\n n_additional_attackers = n - len(neighbors)\r\n possible_edges = np.column_stack((np.tile(self.target, n_possible_additional), poss_add_infl)).astype(\"int32\")\r\n\r\n # Compute the struct_scores for all possible additional influencers, and choose the one\r\n # with the best struct score.\r\n a_hat_uv_additional = self.compute_new_a_hat_uv(possible_edges)\r\n additional_struct_scores = self.struct_score(a_hat_uv_additional, XW)\r\n additional_influencers = poss_add_infl[np.argsort(additional_struct_scores)[-n_additional_attackers::]]\r\n\r\n return influence_nodes, additional_influencers\r\n else:\r\n return influence_nodes", "def _compute_q_uv(u, v, Graph, partition):\n\n num_edges = Graph.number_of_edges()\n degree_u = Graph.degree(u)\n degree_v = Graph.degree(v)\n community_u = partition.get(u)\n degree_community_u = _compute_degree_of_community(community_u,\n Graph,\n partition)\n community_v = partition.get(v)\n\n degree_community_v = _compute_degree_of_community(community_v,\n Graph,\n partition)\n\n # Compute the e values (Edges from node to community)\n e_u_Cu = _compute_e(u, community_u, Graph, partition)\n e_u_Cv = _compute_e(u, community_v, Graph, partition)\n e_v_Cu = _compute_e(v, community_u, Graph, partition)\n e_v_Cv = _compute_e(v, community_v, Graph, partition)\n\n A = 4 * (num_edges + 1) * (e_u_Cv + 1 - e_u_Cu)\n B = e_u_Cu\n C = (2 * degree_community_v) - (2 * degree_community_u) - e_u_Cu\n D = 2 * (degree_u + 1)\n E = degree_u + 1 + degree_community_v - degree_community_u\n\n delta_qu = A + (B * C) - (D * E)\n\n A = 4 * (num_edges + 1) * (e_v_Cu + 1 - e_v_Cv)\n B = e_v_Cv\n C = (2 * degree_community_u) - (2 * degree_community_v) - e_v_Cv\n D = 2 * (degree_v + 1)\n E = degree_v + 1 + degree_community_u - degree_community_v\n\n delta_qv = A + (B * C) - (D * E)\n\n return delta_qu, delta_qv", "def auto_square_edges(containerSize): \n\n # initializing/reseting all the lists\n \n leftEdge = []\n rightEdge = []\n topEdge = []\n bottomEdge = []\n\n # the following for loop defines the edges (without corners) as lists of\n # tuples denoting their (columnID, rowID)\n\n for i in range(1,containerSize-1):\n \n topEdge.append((i, 0))\n \n bottomEdge.append((i, containerSize-1))\n \n leftEdge.append((0, i))\n\n rightEdge.append((containerSize - i % 2 - 1, i))\n \n \n return topEdge, bottomEdge, leftEdge, rightEdge", "def evaluate_euclidean_cell_utilities(self):\n for row in self.grid:\n for cell in row:\n cell.distance_utility = get_euclidean_distance(cell, self.target)", "def calculate(self):\n # print(self.raw_data)\n mygen = self.my_gen()\n\n for _ in range(self.n):\n\n rows, cols = next(mygen)\n\n m_sum, square_weight, j_count = self.get_neigbours(rows, cols)\n\n numerator = m_sum - (self.mean * j_count)\n\n S = math.sqrt( (self.square_sum / self.n) - (self.mean**2) )\n denominator = S * math.sqrt( ( (self.n * j_count) - square_weight**2) / self.n )\n self.gi_matrix[rows][cols] = numerator / denominator", "def neighbors(self, u):\r\n if not u in self.vertices:\r\n return\r\n for v in self.vertices[u]:\r\n yield (v, self.vertices[u][v])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns text between first found begin and end
def between(text, begin, end): idx1 = text.find(begin) idx2 = text.find(end,idx1) if idx1 == -1 or idx2 == -1: return '' return text[idx1 + len(begin):idx2].strip()
[ "def extract(text, start, end, end_is_optional=True, inclusive=False):\n if start:\n try:\n s = text.split(start, 1)[1]\n except IndexError:\n raise LookupError\n if inclusive:\n s = start + s\n else:\n s = text\n\n if not end:\n return s\n\n try:\n s, _ = s.split(end, 1)\n except ValueError:\n if not end_is_optional:\n raise EOFError\n if inclusive:\n s += end\n return s", "def get_content_between(s, start_flag, end_flag):\n start_i = s.find(start_flag)\n end_i = s.find(end_flag, start_i)\n return s[start_i + len(start_flag): end_i]", "def extract_between_res(html_txt, start_re, end_re):\n start_match = start_re.search(html_txt)\n if start_match != None:\n start_index = start_match.end()\n \n end_match = end_re.search(html_txt, start_index)\n if end_match != None:\n end_index = end_match.start()\n \n else:\n return None\n else:\n return None\n\n return html_txt[start_index:end_index]", "def extract_token(text, tag_start, tag_end):\n start = text.find(tag_start) + len(tag_start)\n end = text.find(tag_end, start)\n if start < 0 or end < 0:\n return None\n return text[start:end]", "def text_fragment(self):\n\n\t\treturn self.view.substr(sublime.Region(self.start, self.end))", "def GetMiddleStr(self, content, startStr, endStr):\r\n if '[' in startStr:\r\n startStr = startStr.replace('[', '\\[')\r\n if ']' in endStr:\r\n endStr = endStr.replace(']', '\\]')\r\n patternStr = r'%s(.+?)%s' % (startStr, endStr)\r\n p = re.compile(patternStr)\r\n res = p.search(content).groups()\r\n return res[0]", "def get_substring_from_offsets(\n text: str, start: Optional[int], end: Optional[int], byte_offset: bool = True\n) -> str:\n text = text.encode(\"utf8\") if byte_offset else text\n out = text[start:end]\n return out.decode(\"utf8\") if byte_offset else out", "def find_between(string, first, last):\n try:\n start = string.index(first) + len(first)\n end = string.index(last, start)\n return string[start:end]\n except ValueError:\n return \"\"", "def get_selection(self):\n (sl, si), (el, ei) = sorted([self.selection_start, self.selection_end])\n sublines = self.sublines[sl:el+1]\n if not sublines:\n raise IndexError(\"selection out of range\")\n if len(sublines) == 1:\n return sublines[0][1][si:ei]\n # more than one line\n text = sublines[0][1][si:] + \" \" * sublines[0][0][\"offset\"] # first\n for subline in sublines[1:-1]: # middle\n text += \"\\n\" * subline[0][\"first_subline\"] + subline[1] + \" \" * subline[0][\"offset\"]\n if el >= len(self.sublines):\n text += \"\\n\" * sublines[-1][0][\"first_subline\"] + sublines[-1][1] # last, full line\n else:\n text += \"\\n\" * sublines[-1][0][\"first_subline\"] + sublines[-1][1][:ei] # last, end contained\n return strip_attributes(text)", "def get_substring(self, start_index, end_index):\n return self._input[start_index:end_index]", "def __reverse_select_text_portion_from_offset(self, begin, end):\r\n cursor = self.textCursor()\r\n cursor_position = cursor.position()\r\n cursor.setPosition(cursor_position + begin)\r\n #QT silently fails on invalid position, ergo breaks when EOF < begin\r\n while (cursor.position() == cursor_position) and begin > 0:\r\n begin -= 1\r\n cursor.setPosition(cursor_position + begin)\r\n cursor.setPosition(cursor_position - end, QTextCursor.KeepAnchor)\r\n selected_text = cursor.selectedText()\r\n return selected_text", "def get_text(self, raw_file):\n start_flag = ['TEXT:']\n section_flag = ['UNCLAS', '(MORE)']\n end_flag = ['(ENDALL)']\n\n started = False\n text = ''\n offset_list = []\n curr_pos = 0\n for ori_line in open(raw_file, 'r'):\n prev_pos = curr_pos\n curr_pos += len(ori_line)\n line = ori_line.rstrip()\n if not started:\n if line in start_flag:\n started = True\n else:\n if line in start_flag:\n continue\n elif line in section_flag:\n started = False\n elif line in end_flag:\n break\n else:\n offset_list.extend(range(prev_pos, prev_pos+len(line)+1))\n text += (line + ' ')\n return (text, offset_list)", "def _split_preamble(self, text):\n for i, line in enumerate(text):\n if r'\\begin{document}' in line:\n begin_document_line = i\n break\n return text[:begin_document_line], text[begin_document_line:]", "def substring(s, start, end):\n startless = start is None\n endless = end is None\n if startless and endless:\n return s\n if endless:\n return s[start:]\n if startless:\n return s[:end]\n return s[start:end]", "def string_between(s: str, before: str, after: str):\n\tindexes = find_between(s, before, after)\n\tif indexes == (0, 0): return None\n\treturn s[indexes[0]:indexes[1]]", "def styleText(self, start: Any, end: Any) -> None:", "def get_stripped_offsets(text: str, tag: str) -> Offset:\n stripped_text = text.strip()\n start = text.find(stripped_text)\n end = start + len(stripped_text)\n return Offset(start, end, tag)", "def extract_substrings(self, text, start='^', end='$'):\n start = self.add_escape_chars(start)\n end = self.add_escape_chars(end)\n\n substring_regex = '.*' + start + '(.*?)' + end\n matches = re.findall(substring_regex, text)\n\n return matches", "def find_between(s: str, before: str, after: str):\n\tstart = s.find(before) + len(before)\n\tif start < 0: return 0, 0\n\n\tend = s[start:].find(after)\n\tif end < 0: return 0, 0\n\n\treturn start, start + end" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns sha256 hexdigest of given data.
def sha256_hex(data): if isinstance(data, unicode): return hashlib.sha256(data.encode('utf-8')).hexdigest() else: return hashlib.sha256(data).hexdigest()
[ "async def __get_sha256(self, data):\n\n m = hashlib.sha256()\n m.update(data)\n return m.hexdigest()", "def doubleHashHex (data):\n\n hasher = hashlib.sha256 ()\n hasher.update (binascii.unhexlify (data))\n data = hasher.digest ()\n\n hasher = hashlib.sha256 ()\n hasher.update (data)\n\n return reverseHex (hasher.hexdigest ())", "def hash_data(data):\n return hashlib.md5(data).hexdigest()", "def get_checksum(data):\n return hashlib.sha1(data).hexdigest()", "def hash(bytes):\n return unpack(sha256(bytes).digest())", "def md5hex(data):\n return md5(data).hexdigest()", "def human_hash(data):\n return human_bytes(hashlib.sha1(data).digest())", "def bytes_to_hex(data):\n\n #from binascii import hexlify\n #return hex_string\n #hex_string = hexlify(data)\n return ''.join([\"%02X \" % ord(x) for x in data]).strip()", "def hexdigest(self, msg):\n\t\tcommitment = SHA256.new()\n\t\tcommitment.update(msg.encode())\n\t\treturn commitment.hexdigest()", "def get_hash(self, data, secret):\n _hash = HMAC.new(secret, msg=data, digestmod=SHA256).hexdigest()\n return _hash", "def crypto_hash(*args):\n \n # convert every arg into a string\n stringfiedargs = sorted(map(lambda data: json.dumps(data), args))\n\n #generate a single string with all args\n joined_data = ''.join(stringfiedargs)\n\n return hashlib.sha256(joined_data.encode('utf-8')).hexdigest()", "def hash_input(self, input_data: Union[bytes, str]) -> bytes:\n return cast(bytes, self.hash_method(self.bytes_from_input(input_data)).digest()) # We know this is always a hashlib hash that returns bytes", "def hash_bytes(buf: _binary_type) -> str:\n sha256 = hashlib.sha256()\n sha256.update(buf)\n return 'sha256:' + sha256.hexdigest()", "def bytes_hex(data):\n res = \"0x\"\n for byte in data:\n res += \"%02X\" % byte\n\n return res", "def crypto_hash(*args):\n string_args = sorted(map(json.dumps, args))\n joined_args = ''.join(string_args)\n return hashlib.sha256(joined_args.encode('utf-8')).hexdigest()", "def get_digest(data: bytes) -> str:\n return hmac.new(settings.GITHUB_WEBHOOK_KEY.encode(), data, hashlib.sha1).hexdigest()", "def hashhex(s):\r\n h = hashlib.sha1()\r\n h.update(s.encode('utf-8'))\r\n return h.hexdigest()", "def _hash_it(self, _data):\n m = hashlib.sha256(_data)\n if m.hexdigest() not in self.hashed_graphics:\n self.hashed_graphics[m.hexdigest()] = _data\n length = int(len(_data)/1024 + 0.5)\n return m.hexdigest(), \"b'{}Kb'\".format(length)", "async def sha256cmd(self, message):\r\n\t\tawait hashing(message, 3)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read the TraceAttributes for a specific traces in the file args
def get_trace_attrs(self, idx: int) -> TraceAttributes: return read_trace(self, idx=idx, what="attrs")
[ "def read_trace(\n cf: CacheFile, idx: int, what: str = \"attrs\"\n) -> Union[TraceData, TraceAttributes]:\n if type(idx) != int:\n raise ValueError(\"Index must be an integer\")\n if idx >= 0:\n cnt = -1\n with read_file(cf.fname) as f:\n for origin in f.keys():\n for ix, key in enumerate(f[origin][\"traces\"], start=cnt + 1):\n if idx == ix:\n dset = f[origin][\"traces\"][key]\n dset.id.refresh() # load fresh from file\n if what == \"attrs\":\n attrs = parse_traceattrs(dset.attrs)\n attrs[\"original_file\"] = str(cf.fname)\n attrs[\"original_index\"] = idx\n check_metadata(str(attrs[\"readout\"]), attrs)\n return attrs\n elif what == \"data\":\n data = parse_tracedata(dset)\n return data\n else:\n raise NotImplementedError(f\"{what} can not be loaded\")\n cnt = idx\n\n raise IndexError(f\"{idx} not in cachefile\")", "def read_args(self,filename,varnames):\n for name in varnames:\n self.args[name]=ebf.read(filename,'/'+name)", "def load(trace_file: str, args: Dict ={}) -> trace.Trace:\n if trace_file.endswith('.prv'):\n trace_hdf5_file = file_parser(trace_file, args)\n if not trace_hdf5_file:\n print(f'Could not completely parse the file.')\n return ''\n elif trace_file.endswith('.h5'):\n trace_hdf5_file = trace_file\n else:\n print(f'==ERROR== File {trace_file} has not a valid extension.')\n return ''\n\n trace_metadata = trace.TraceMetaData(hdf5_file=trace_hdf5_file)\n\n df_states = try_read_hdf5(trace_hdf5_file, key=trace.HDF5_RECORDS + \"/\" + trace.HDF5_STATES_DF)\n df_events = try_read_hdf5(trace_hdf5_file, key=trace.HDF5_RECORDS + \"/\" + trace.HDF5_EVENTS_DF)\n df_comms = try_read_hdf5(trace_hdf5_file, key=trace.HDF5_RECORDS + \"/\" + trace.HDF5_COMMS_DF)\n\n return trace.Trace(trace_metadata, df_states, df_events, df_comms)", "def get_trace_flags(config):\n flags = {}\n user_flags = {}\n if \"trace\" in config.keys():\n if \"flags\" in config[\"trace\"].keys():\n user_flags = config[\"trace\"][\"flags\"]\n fn = os.path.join(os.path.dirname(__file__), TRACE_DEFAULT_FLAG_FILE)\n\n default_flags = json.load(open(fn, \"r\"))\n\n #Setup some default values\n default_flags[\"-ucf\"][\"value\"] = ngd_utils.get_ucf_filename(config)\n default_flags[\"-xml\"][\"value\"] = get_trace_xml_filename(config)\n default_flags[\"-o\"][\"value\"] = get_trace_filename(config)\n\n for key in default_flags:\n flags[key] = default_flags[key]\n if key in user_flags.keys():\n flags[key][\"value\"] = user_flags[key]\n return flags", "def read_trace_data(filename):\n\n global current_max_cpu\n global sample_num, last_sec_cpu, last_usec_cpu, start_time\n\n try:\n data = open(filename, 'r').read()\n except:\n print('Error opening ', filename)\n sys.exit(2)\n\n for line in data.splitlines():\n search_obj = \\\n re.search(r'(^(.*?)\\[)((\\d+)[^\\]])(.*?)(\\d+)([.])(\\d+)(.*?core_busy=)(\\d+)(.*?scaled=)(\\d+)(.*?from=)(\\d+)(.*?to=)(\\d+)(.*?mperf=)(\\d+)(.*?aperf=)(\\d+)(.*?tsc=)(\\d+)(.*?freq=)(\\d+)'\n , line)\n\n if search_obj:\n cpu = search_obj.group(3)\n cpu_int = int(cpu)\n cpu = str(cpu_int)\n\n time_pre_dec = search_obj.group(6)\n time_post_dec = search_obj.group(8)\n core_busy = search_obj.group(10)\n scaled = search_obj.group(12)\n _from = search_obj.group(14)\n _to = search_obj.group(16)\n mperf = search_obj.group(18)\n aperf = search_obj.group(20)\n tsc = search_obj.group(22)\n freq = search_obj.group(24)\n common_comm = search_obj.group(2).replace(' ', '')\n\n # Not all kernel versions have io_boost field\n io_boost = '0'\n search_obj = re.search(r'.*?io_boost=(\\d+)', line)\n if search_obj:\n io_boost = search_obj.group(1)\n\n if sample_num == 0 :\n start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000)\n sample_num += 1\n\n if last_sec_cpu[cpu_int] == 0 :\n last_sec_cpu[cpu_int] = time_pre_dec\n last_usec_cpu[cpu_int] = time_post_dec\n else :\n duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int]))\n duration_ms = Decimal(duration_us) / Decimal(1000)\n last_sec_cpu[cpu_int] = time_pre_dec\n last_usec_cpu[cpu_int] = time_post_dec\n elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time\n load = Decimal(int(mperf)*100)/ Decimal(tsc)\n freq_ghz = Decimal(freq)/Decimal(1000000)\n# Sanity check calculation, typically anomalies indicate missed samples\n# However, check for 0 (should never occur)\n tsc_ghz = Decimal(0)\n if duration_ms != Decimal(0) :\n tsc_ghz = Decimal(tsc)/duration_ms/Decimal(1000000)\n store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz)\n\n if cpu_int > current_max_cpu:\n current_max_cpu = cpu_int\n# End of for each trace line loop\n# Now seperate the main overall csv file into per CPU csv files.\n split_csv()", "def _read_attributes (self, log_entry):\n\n\t\t# regex to extract \"normally formatted attributes\"\n\t\tinfoRE = \"WorkFlowServices:[ \\t]?(?P<attr>.*?):[ \\t]+?(?P<val>.*)\"\n\n\t\t# kludge for attributes that didn't include a \" :\" separator for current status\n\t\tcurrentStatusRE = \"WorkFlowServices:[ \\t]?(?P<attr2>current status)(?P<val2>.*)\"\n\n\t\tpat = re.compile (\"%s|%s\" % (infoRE, currentStatusRE))\n\t\tfor line in string.split (log_entry, '\\n'):\n\t\t\tm = pat.match (line)\n\t\t\tattr = val = \"\"\n\t\t\tif m:\n\t\t\t\tg_dict = m.groupdict()\n\t\t\t\tfor key in g_dict.keys():\n\t\t\t\t\tif g_dict[key] is not None and key[:3] == 'val':\n\t\t\t\t\t\tval = g_dict[key]\n\t\t\t\t\tif g_dict[key] is not None and key[:4] == 'attr':\n\t\t\t\t\t\tattr = g_dict[key]\n\t\t\t\tif attr and key:\n\t\t\t\t\t# print (\"adding %s: %s\" % (attr, val))\n\t\t\t\t\tself[attr]=val", "def parse_trace(trace_file, families):\n print(\"\\n- Parsing trace file... %s\" % trace_file)\n with open(trace_file, \"r\") as fd:\n trace = fd.readlines()\n parsed_trace = []\n for line in trace:\n if not line.startswith(COMMENT_LABEL) and not line.startswith(\"1:\"):\n elements = line.split(\":\")\n event_elements = elements[6:] # remove the headers\n # Parse the rest by pairs\n for event_type, event_number in __pairwise__(event_elements):\n parsed_trace.append(\n (int(event_type.strip()), int(event_number.strip()))\n )\n print(\"\\t- Filtering families... %s\" % str(families))\n filtered_trace = []\n for line in parsed_trace:\n for family in families:\n if line[0] - family[0] >= 0 and line[0] - family[0] < 1000000:\n # belongs to the million\n filtered_trace.append(line)\n return filtered_trace", "def recover_parts(cf: CacheFile) -> Tuple[List[Annotations], List[List[TraceData]]]:\n with read_file(cf.fname) as f:\n events, traces = [], []\n for origin in f.keys():\n yml = dict()\n yml[\"origin\"] = origin\n yml[\"attrs\"] = parse_traceattrs(f[origin].attrs)\n\n trace_attrs = []\n trace_data = []\n for idx in f[origin][\"traces\"]:\n dset = f[origin][\"traces\"][idx]\n dset.id.refresh() # load fresh from file\n trace_attrs.append(parse_traceattrs(dset.attrs))\n trace_data.append(parse_tracedata(dset))\n yml[\"traces\"] = trace_attrs\n events.append(yml)\n traces.append(trace_data)\n return events, traces", "def load_rokdoc2d_attr(infile):\r\n \r\n # read in raw ascii file\r\n with open(infile, 'r') as fd:\r\n buf = fd.readlines()\r\n \r\n # The below for loop iterates over each line in the file until it finds\r\n # the attribute data block indicated by the line \"START OF SECTION\". Once\r\n # it files that line, the following lines are parsed into trace and attr\r\n # columns, converted to floats or np.nan values, and added to trc, and attr\r\n # lists\r\n in_data_block = False\r\n trc = []\r\n attr = []\r\n for line in buf:\r\n \r\n # remove leading/trailing whitespace etc.\r\n line = line.strip()\r\n \r\n # test for start of data block\r\n if line[0:16] == 'START OF SECTION':\r\n in_data_block = True\r\n next\r\n \r\n # skip blank lines\r\n elif len(line)==0:\r\n next\r\n \r\n # test for end of data block\r\n elif line == 'END OF SECTION: Default':\r\n break\r\n \r\n # start parsing data values\r\n elif in_data_block:\r\n \r\n line = line.split()\r\n trc.append(float(line[0])) # trace number in 2D model\r\n \r\n # attribute value from 2D model\r\n if line[1] == 'NULL': \r\n # deal with nulls properly\r\n cur_attr = np.nan\r\n else:\r\n cur_attr = float(line[1])\r\n \r\n attr.append(cur_attr)\r\n \r\n # convert to numpy arrays\r\n trc = np.array(trc)\r\n attr = np.array(attr)\r\n \r\n return trc, attr", "def get_tracefile_data(filename):\n\n tracefile_fd = open(filename, 'r')\n tracefile_lines = tracefile_fd.readlines()\n return tracefile_lines", "def __parse_uniforms_and_attributes(self, filename):\n uniforms = set()\n attributes = set()\n attribute_types = {}\n stream = open(filename, 'r')\n for line in stream:\n # NOTE: Here we assume a simple subset of the syntax for glsl\n # declarations, this is all I am using at the moment and we can\n # handle more cases as needed. We're also using the old 'attribute'\n # form, not 'in'. This is because we're targetting glsl 130 (opengl\n # 3.0) since that's what my laptop supports!\n pattern = \"(attribute|uniform) ([a-zA-Z0-9_]+) ([a-zA-Z0-9_]+)\"\n match = re.match(pattern, line)\n if match:\n storage_type = match.group(1)\n data_type = match.group(2)\n variable_name = match.group(3)\n if storage_type == \"attribute\":\n attributes.add(variable_name)\n data_dims = 0\n data_array_type = None\n if data_type == \"float\":\n data_dims = 1\n data_array_type = \"f\"\n elif data_type == \"vec2\":\n data_dims = 2\n data_array_type = \"f\"\n elif data_type == \"vec3\":\n data_dims = 3\n data_array_type = \"f\"\n else:\n raise Exception(\"Unknown attribute data type: %s\" % data_type)\n attribute_types[variable_name] = (data_dims, data_array_type)\n elif storage_type == \"uniform\":\n uniforms.add(variable_name)\n return (uniforms, attributes, attribute_types)", "def update_trace_attributes(attrs: TraceAttributes):\n index: int\n index = attrs[\"original_index\"] # type: ignore\n if type(index) != int:\n raise ValueError(\"Index must be an integer\")\n fname = attrs[\"original_file\"]\n attrs = filter_trace_attrs(attrs)\n\n if index >= 0:\n cnt = -1\n with write_file(fname) as f:\n for origin in f.keys():\n for idx, key in enumerate(f[origin][\"traces\"], start=cnt + 1):\n if idx == index:\n dset = f[origin][\"traces\"][key]\n for key in attrs.keys():\n dset.attrs[str(key)] = str(attrs[key])\n return\n cnt = idx\n\n raise IndexError(f\"{index} not in cachefile\")", "def test_header_to_trace_set_params(self):\n trace_count = 100\n sample_count = 1000\n\n try:\n with trsfile.open(self.tmp_path, 'w', headers={\n Header.LABEL_X: \"s\",\n Header.LABEL_Y: \"V\",\n Header.OFFSET_X: 100,\n Header.SCALE_X: 1.1,\n Header.SCALE_Y: 0.9,\n Header.TRACE_OFFSET: 200,\n Header.LOGARITHMIC_SCALE: False,\n Header.ACQUISITION_RANGE_OF_SCOPE: 1.0,\n Header.ACQUISITION_COUPLING_OF_SCOPE: 2,\n Header.ACQUISITION_OFFSET_OF_SCOPE: 3.0,\n Header.ACQUISITION_INPUT_IMPEDANCE: 4.0,\n Header.ACQUISITION_DEVICE_ID: '5',\n Header.ACQUISITION_TYPE_FILTER: 6,\n Header.ACQUISITION_FREQUENCY_FILTER: 7.0,\n Header.ACQUISITION_RANGE_FILTER: 8.0,\n Header.EXTERNAL_CLOCK_USED: True,\n Header.EXTERNAL_CLOCK_THRESHOLD: 9.0,\n Header.EXTERNAL_CLOCK_MULTIPLIER: 10,\n Header.EXTERNAL_CLOCK_PHASE_SHIFT: 11,\n Header.EXTERNAL_CLOCK_RESAMPLER_MASK: 12,\n Header.EXTERNAL_CLOCK_RESAMPLER_ENABLED: False,\n Header.EXTERNAL_CLOCK_FREQUENCY: 13.0,\n Header.EXTERNAL_CLOCK_BASE: 14,\n Header.NUMBER_VIEW: 15,\n Header.TRACE_OVERLAP: True,\n Header.NUMBER_OF_ENABLED_CHANNELS: 16,\n Header.NUMBER_OF_USED_OSCILLOSCOPES: 17,\n Header.XY_SCAN_WIDTH: 18,\n Header.XY_SCAN_HEIGHT: 19,\n Header.XY_MEASUREMENTS_PER_SPOT: 20,\n }) as trs_traces:\n trs_traces.extend([\n Trace(\n SampleCoding.FLOAT,\n [0] * sample_count,\n TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))})\n )\n for i in range(0, trace_count)]\n )\n expected_trace_set_parameters = TraceSetParameterMap()\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_X_LABEL, \"s\")\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_Y_LABEL, \"V\")\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.X_OFFSET, 100)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.X_SCALE, 1.1)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.Y_SCALE, 0.9)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.TRACE_OFFSET, 200)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_USE_LOG_SCALE, False)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_RANGE, 1.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_COUPLING, 2)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_OFFSET, 3.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_INPUT_IMPEDANCE, 4.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_DEVICE_IDENTIFIER, '5')\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_FILTER_TYPE, 6)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_FILTER_FREQUENCY, 7.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_FILTER_RANGE, 8.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_ENABLED, True)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_THRESHOLD, 9.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_MULTIPLIER, 10)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_PHASESHIFT, 11)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_RESAMPLER_MASK, 12)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_RESAMPLER_MASK_ENABLED, False)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_FREQUENCY, 13.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_TIMEBASE, 14)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_NUM_TRACES_SHOWN, 15)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_TRACES_OVERLAP, True)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_ACTIVE_CHANNEL_COUNT, 16)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_COUNT, 17)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_XYZ_GRID_COUNT_X, 18)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_XYZ_GRID_COUNT_Y, 19)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_XYZ_MEASUREMENTS_PER_SPOT, 20)\n self.assertDictEqual(trs_traces.get_headers()[Header.TRACE_SET_PARAMETERS], expected_trace_set_parameters)\n except Exception as e:\n self.fail('Exception occurred: ' + str(e))", "def _parse_tracefile(self, trace_file):\n with open(trace_file, 'r') as f:\n num_workers = int(f.readline().strip())\n for line in f:\n self._cmds.append(line.split())\n return num_workers", "def read_patient_traces(filename):\r\n trace_data = []\r\n patient_ids = []\r\n first_line = True\r\n with open(filename) as f:\r\n for line in f:\r\n if first_line:\r\n patient_ids = line.split()\r\n patient_ids = map(int, patient_ids)\r\n first_line = False\r\n elif len(line) > 5:\r\n trace_data.append(line.rstrip('\\n'))\r\n return compute_pairwise_epi_distances(trace_data, patient_ids)", "def trace_file(self):\n ret = self._get_attr(\"traceFile\")\n return ret", "def get_trace(file_path, run_name):\n parsed_file = []\n file_path = file_path + \"/out/nextflow_reports\"\n fn = os.path.join(file_path, run_name + \"_execution_trace.txt\")\n\n if pathlib.Path(fn).exists(): \n\n fh = open(fn, 'r')\n\n for line in fh:\n record = line.strip().split(\"\\t\")\n\n if record[0] == \"task_id\":\n parsed_file.append(record)\n continue\n\n record[1] = record[1].split(\":\")[-1].replace(\"__\",\"-\")\n record[3] = record[3][0] \n\n parsed_file.append(record) \n\n return parsed_file\n\n else:\n return None", "def _read_structure_attributes(f):\n\n line = ''\n variogram_info = {}\n while \"end structure\" not in line:\n line = f.readline()\n if line == '':\n raise Exception(\"EOF while reading structure\")\n line = line.strip().lower().split()\n if line[0].startswith('#'):\n continue\n if line[0] == \"nugget\":\n nugget = float(line[1])\n elif line[0] == \"transform\":\n transform = line[1]\n elif line[0] == \"numvariogram\":\n numvariograms = int(line[1])\n elif line[0] == \"variogram\":\n variogram_info[line[1]] = float(line[2])\n elif line[0] == \"end\":\n break\n elif line[0] == \"mean\":\n warning.warn(\"'mean' attribute not supported, skipping\",PyemuWarningF)\n else:\n raise Exception(\"unrecognized line in structure definition:{0}\".\\\n format(line[0]))\n assert numvariograms == len(variogram_info)\n return nugget,transform,variogram_info", "def _get_read_args(self, file_name, *args, **kwargs):\n options = dict(self.read_options)\n if self._ggpk:\n options['file_path_or_raw'] = self._ggpk[file_name].record.extract()\n elif self._path:\n options['file_path_or_raw'] = os.path.join(self._path, file_name)\n\n return options" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
overwrite the traceattributes for a trace the original file and index of the trace are specified as field within the TraceAttributes args
def update_trace_attributes(attrs: TraceAttributes): index: int index = attrs["original_index"] # type: ignore if type(index) != int: raise ValueError("Index must be an integer") fname = attrs["original_file"] attrs = filter_trace_attrs(attrs) if index >= 0: cnt = -1 with write_file(fname) as f: for origin in f.keys(): for idx, key in enumerate(f[origin]["traces"], start=cnt + 1): if idx == index: dset = f[origin]["traces"][key] for key in attrs.keys(): dset.attrs[str(key)] = str(attrs[key]) return cnt = idx raise IndexError(f"{index} not in cachefile")
[ "def get_trace_attrs(self, idx: int) -> TraceAttributes:\n return read_trace(self, idx=idx, what=\"attrs\")", "def reset_trace_record():\n global _TRACE_RECORD\n _TRACE_RECORD = {}", "def test_header_to_trace_set_params(self):\n trace_count = 100\n sample_count = 1000\n\n try:\n with trsfile.open(self.tmp_path, 'w', headers={\n Header.LABEL_X: \"s\",\n Header.LABEL_Y: \"V\",\n Header.OFFSET_X: 100,\n Header.SCALE_X: 1.1,\n Header.SCALE_Y: 0.9,\n Header.TRACE_OFFSET: 200,\n Header.LOGARITHMIC_SCALE: False,\n Header.ACQUISITION_RANGE_OF_SCOPE: 1.0,\n Header.ACQUISITION_COUPLING_OF_SCOPE: 2,\n Header.ACQUISITION_OFFSET_OF_SCOPE: 3.0,\n Header.ACQUISITION_INPUT_IMPEDANCE: 4.0,\n Header.ACQUISITION_DEVICE_ID: '5',\n Header.ACQUISITION_TYPE_FILTER: 6,\n Header.ACQUISITION_FREQUENCY_FILTER: 7.0,\n Header.ACQUISITION_RANGE_FILTER: 8.0,\n Header.EXTERNAL_CLOCK_USED: True,\n Header.EXTERNAL_CLOCK_THRESHOLD: 9.0,\n Header.EXTERNAL_CLOCK_MULTIPLIER: 10,\n Header.EXTERNAL_CLOCK_PHASE_SHIFT: 11,\n Header.EXTERNAL_CLOCK_RESAMPLER_MASK: 12,\n Header.EXTERNAL_CLOCK_RESAMPLER_ENABLED: False,\n Header.EXTERNAL_CLOCK_FREQUENCY: 13.0,\n Header.EXTERNAL_CLOCK_BASE: 14,\n Header.NUMBER_VIEW: 15,\n Header.TRACE_OVERLAP: True,\n Header.NUMBER_OF_ENABLED_CHANNELS: 16,\n Header.NUMBER_OF_USED_OSCILLOSCOPES: 17,\n Header.XY_SCAN_WIDTH: 18,\n Header.XY_SCAN_HEIGHT: 19,\n Header.XY_MEASUREMENTS_PER_SPOT: 20,\n }) as trs_traces:\n trs_traces.extend([\n Trace(\n SampleCoding.FLOAT,\n [0] * sample_count,\n TraceParameterMap({'LEGACY_DATA': ByteArrayParameter(i.to_bytes(8, byteorder='big'))})\n )\n for i in range(0, trace_count)]\n )\n expected_trace_set_parameters = TraceSetParameterMap()\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_X_LABEL, \"s\")\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_Y_LABEL, \"V\")\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.X_OFFSET, 100)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.X_SCALE, 1.1)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.Y_SCALE, 0.9)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.TRACE_OFFSET, 200)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_USE_LOG_SCALE, False)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_RANGE, 1.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_COUPLING, 2)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_OFFSET, 3.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_INPUT_IMPEDANCE, 4.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_DEVICE_IDENTIFIER, '5')\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_FILTER_TYPE, 6)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_FILTER_FREQUENCY, 7.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_FILTER_RANGE, 8.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_ENABLED, True)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_THRESHOLD, 9.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_MULTIPLIER, 10)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_PHASESHIFT, 11)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_RESAMPLER_MASK, 12)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_RESAMPLER_MASK_ENABLED, False)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_FREQUENCY, 13.0)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_ICWAVES_EXT_CLK_TIMEBASE, 14)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_NUM_TRACES_SHOWN, 15)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.DISPLAY_HINT_TRACES_OVERLAP, True)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_ACTIVE_CHANNEL_COUNT, 16)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_OSCILLOSCOPE_COUNT, 17)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_XYZ_GRID_COUNT_X, 18)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_XYZ_GRID_COUNT_Y, 19)\n expected_trace_set_parameters.add_standard_parameter(StandardTraceSetParameters.SETUP_XYZ_MEASUREMENTS_PER_SPOT, 20)\n self.assertDictEqual(trs_traces.get_headers()[Header.TRACE_SET_PARAMETERS], expected_trace_set_parameters)\n except Exception as e:\n self.fail('Exception occurred: ' + str(e))", "def setTraceHeader(self, theader): \n self.traceHeader = theader", "def update_attributes(self, override: Dict):\n self.additional_attributes.update(override)", "def _fix_error_trace(self, trace):\n trace.file = trace.file.replace('<', '{').replace('>', '}')\n trace.routine = trace.routine.replace('<', '{').replace('>', '}')\n for index in range(len(trace.previousError)):\n previousTrace = trace.previousError[index]\n previousTrace = self._fix_error_trace(previousTrace)\n trace.previousError[index] = previousTrace\n return trace", "def renameAttr():\n pass", "def read_trace(\n cf: CacheFile, idx: int, what: str = \"attrs\"\n) -> Union[TraceData, TraceAttributes]:\n if type(idx) != int:\n raise ValueError(\"Index must be an integer\")\n if idx >= 0:\n cnt = -1\n with read_file(cf.fname) as f:\n for origin in f.keys():\n for ix, key in enumerate(f[origin][\"traces\"], start=cnt + 1):\n if idx == ix:\n dset = f[origin][\"traces\"][key]\n dset.id.refresh() # load fresh from file\n if what == \"attrs\":\n attrs = parse_traceattrs(dset.attrs)\n attrs[\"original_file\"] = str(cf.fname)\n attrs[\"original_index\"] = idx\n check_metadata(str(attrs[\"readout\"]), attrs)\n return attrs\n elif what == \"data\":\n data = parse_tracedata(dset)\n return data\n else:\n raise NotImplementedError(f\"{what} can not be loaded\")\n cnt = idx\n\n raise IndexError(f\"{idx} not in cachefile\")", "def reindex(self):\n for idx, line in enumerate(self.line_map):\n line.index = idx\n if line.annotations:\n for x in line.annotations:\n x.line_num = idx", "def setTFETraceFlag(trace: bool = False) -> None:\n global __TFE_TRACE__\n if trace is True:\n logger.info(\"Writing trace files for every session.run() call with a tag\")\n\n __TFE_TRACE__ = trace", "def setTraceData(self, data): \n\n self.traceData = data", "def apply_attributes_to_stops(self, new_attributes: dict):\n self._verify_no_id_change(new_attributes)\n stops = list(new_attributes.keys())\n old_attribs = [deepcopy(self._graph.nodes[stop]) for stop in stops]\n new_attribs = [{**self._graph.nodes[stop], **new_attributes[stop]} for stop in stops]\n\n self._graph.graph['change_log'] = self.change_log().modify_bunch('stop', stops, old_attribs, stops, new_attribs)\n\n nx.set_node_attributes(self._graph, dict(zip(stops, new_attribs)))\n logging.info(f'Changed Stop attributes for {len(stops)} stops')", "def reset_time_trace(self):\n self.__time_trace_data = []\n self.time_trace_level = -2", "def clone_attributes():\n _clone_attributes(utils.get_sentiwordnet_groups(SENTIWORDNET_FILE))\n _clone_attributes(utils.get_e_lemma_groups(E_LEMMA_FILE))", "def renameAttribute(*args, **kwargs):\n \n pass", "def apply_attributes_to_routes(self, new_attributes: dict):\n self._verify_no_id_change(new_attributes)\n routes = list(new_attributes.keys())\n old_attribs = [deepcopy(self._graph.graph['routes'][route]) for route in routes]\n new_attribs = [{**self._graph.graph['routes'][route], **new_attributes[route]} for route in routes]\n\n self._graph.graph['change_log'] = self.change_log().modify_bunch('route', routes, old_attribs, routes,\n new_attribs)\n\n for route, new_route_attribs in zip(routes, new_attribs):\n self._graph.graph['routes'][route] = new_route_attribs\n logging.info(f'Changed Route attributes for {len(routes)} routes')", "def setRaytrace(self, raytrace):\n \n self.raytrace = raytrace", "def PatchTraces(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def update_traceparent(self, traceparent, in_trace_id, in_span_id, level):\n if traceparent is None: # modify the trace_id part only when it was not present at all\n trace_id = in_trace_id.zfill(32)\n else:\n # - We do not need the incoming upstream parent span ID for the header we sent downstream.\n # - We also do not care about the incoming version: The version field we sent downstream needs to match the\n # format of the traceparent header we produce here, so we always send the version _we_ support downstream,\n # even if the header coming from upstream supported a different version.\n # - Finally, we also do not care about the incoming sampled flag , we only need to communicate our own\n # sampling decision downstream. The sampling decisions from our upstream is irrelevant for what we send\n # downstream.\n _, trace_id, _, _ = self.get_traceparent_fields(traceparent)\n\n parent_id = in_span_id.zfill(16)\n flags = level & SAMPLED_BITMASK\n flags = format(flags, '0>2x')\n\n traceparent = \"{version}-{traceid}-{parentid}-{flags}\".format(version=self.SPECIFICATION_VERSION,\n traceid=trace_id,\n parentid=parent_id,\n flags=flags)\n return traceparent" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read either metadata or attributes for a specific trace args
def read_trace( cf: CacheFile, idx: int, what: str = "attrs" ) -> Union[TraceData, TraceAttributes]: if type(idx) != int: raise ValueError("Index must be an integer") if idx >= 0: cnt = -1 with read_file(cf.fname) as f: for origin in f.keys(): for ix, key in enumerate(f[origin]["traces"], start=cnt + 1): if idx == ix: dset = f[origin]["traces"][key] dset.id.refresh() # load fresh from file if what == "attrs": attrs = parse_traceattrs(dset.attrs) attrs["original_file"] = str(cf.fname) attrs["original_index"] = idx check_metadata(str(attrs["readout"]), attrs) return attrs elif what == "data": data = parse_tracedata(dset) return data else: raise NotImplementedError(f"{what} can not be loaded") cnt = idx raise IndexError(f"{idx} not in cachefile")
[ "def read_args(self,filename,varnames):\n for name in varnames:\n self.args[name]=ebf.read(filename,'/'+name)", "def _read_attributes (self, log_entry):\n\n\t\t# regex to extract \"normally formatted attributes\"\n\t\tinfoRE = \"WorkFlowServices:[ \\t]?(?P<attr>.*?):[ \\t]+?(?P<val>.*)\"\n\n\t\t# kludge for attributes that didn't include a \" :\" separator for current status\n\t\tcurrentStatusRE = \"WorkFlowServices:[ \\t]?(?P<attr2>current status)(?P<val2>.*)\"\n\n\t\tpat = re.compile (\"%s|%s\" % (infoRE, currentStatusRE))\n\t\tfor line in string.split (log_entry, '\\n'):\n\t\t\tm = pat.match (line)\n\t\t\tattr = val = \"\"\n\t\t\tif m:\n\t\t\t\tg_dict = m.groupdict()\n\t\t\t\tfor key in g_dict.keys():\n\t\t\t\t\tif g_dict[key] is not None and key[:3] == 'val':\n\t\t\t\t\t\tval = g_dict[key]\n\t\t\t\t\tif g_dict[key] is not None and key[:4] == 'attr':\n\t\t\t\t\t\tattr = g_dict[key]\n\t\t\t\tif attr and key:\n\t\t\t\t\t# print (\"adding %s: %s\" % (attr, val))\n\t\t\t\t\tself[attr]=val", "def artAttrCtx(*args, **kwargs):\n\n pass", "def _get_read_args(self, file_name, *args, **kwargs):\n options = dict(self.read_options)\n if self._ggpk:\n options['file_path_or_raw'] = self._ggpk[file_name].record.extract()\n elif self._path:\n options['file_path_or_raw'] = os.path.join(self._path, file_name)\n\n return options", "def get_args (args):\n\n args = args[1:]\n args_list = ''.join(args).strip().replace(']][[',']]|[[').split('|')\n\n adjM = ast.literal_eval(args_list[0])\n samples = ast.literal_eval(args_list[1])\n return adjM, samples", "def readMetadata(hdf_file, attr=None, as_list=True):\n metadata = hdf_file.attributes() # reads metadata into a dictionary\n # If no attribute is stated, print out a list of the available types\n if not attr:\n # If no attribute given, enumerate tuples returned by metadata items \n for ind, key_val_tpl in enumerate(sorted(metadata.items())):\n print('% 3d %s' % (ind, key_val_tpl[0])) # key is first in tuple\n return \n else:\n if as_list == True:\n return metadata[attr].split('\\n') \n else:\n return metadata[attr] # or metadata.get(attr)", "def read_args():\n rtype_dict = {'P':'PP', 'R':'BR', 'S':'SP'}\n\n rindex = int(sys.argv[1])\n try:\n request_type = rtype_dict[sys.argv[2]]\n except (IndexError, KeyError):\n logging.warning(\"Missing or invalid request type. Default request type SP assumed.\")\n request_type = 'SP'\n\n return rindex, request_type", "def get_attr(self, arg):\n if not arg:\n pass\n else:\n if arg == 'log_path':\n return self.__dict__['log_path']\n elif arg == 'logfile':\n return self.__dict__['logfile']\n elif arg == 'ini_path':\n return self.__dict__['ini_path']", "def get_trace_attrs(self, idx: int) -> TraceAttributes:\n return read_trace(self, idx=idx, what=\"attrs\")", "def __jpg_file_info(*args):\n attrs = [\"name\", \"location\", \"attribute\"]\n return dict(zip(attrs, args))", "def process_func_args(func_name, in_args):\n\n\tdebug(2, \"reading arguments for function \" + func_name)\n\n\t# initialize the structure for the function.\n\tg.args[func_name] = {}\n\tg.args[func_name]['dict'] = {}\n\tg.args[func_name]['list'] = []\n\n\tif not in_args:\n\t\treturn\n\n\t# split in_args into each argument and its type\n\t# split on commas\n\tags = split_args(in_args)\n\n\tsplit_in_args = []\n\tfor a in ags:\n\t\t#arg = re.match(\"(\\w+)\\s*([&*]{0,1})*\\s*(\\w+)\", a)\n\t\targ = re.match(\"([&]{0,1})\\s*[\\(]{0,1}\\s*(\\w+)\\s*[\\)]{0,1}\", a)\n\t\tif arg:\n\t\t\t#arg_type = arg.group(1)\n\t\t\targ_modifier = arg.group(1)\n\t\t\targ_name = arg.group(2)\n\t\t\t#split_in_args.append({'name': arg_name, 'modifier': arg_modifier, 'type': arg_type})\n\t\t\tsplit_in_args.append({'name': arg_name, 'modifier': arg_modifier})\n\t\telse:\n\t\t\terror(\"couldn't match the arg in process_func_args: '\" + a + \"' from in_args: '\" + in_args + \"', ags: \" + str(ags))\n\n\t# set the type for each arg from the spit information\n\tfor sia in split_in_args:\n\t\t# output debug info\n\t\tdebug_string = \"\"\n\t\tif sia['modifier']:\n\t\t\tdebug_string += \" \" + sia['modifier']\n\t\tdebug_string += \" \" + sia['name'] + \"\\n\"\n\t\tdebug(3, debug_string)\n\n\t\t# save info\n\t\tg.args[func_name]['list'].append(sia['name'])\n\t\tg.args[func_name]['dict'][sia['name']] = {}\n\t\tif sia['modifier']:\n\t\t\tg.args[func_name]['dict'][sia['name']]['modifier'] = sia['modifier']\n\t\telse:\n\t\t\tg.args[func_name]['dict'][sia['name']]['modifier'] = None", "def get_line_properties(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def handle_info(self, args: \"List[str]\") -> None:\n if args:\n try:\n fullargs = [self.expand_info_arg(arg) for arg in args]\n except HERAError as e:\n print(\"Error: \" + str(e) + \".\")\n return\n else:\n fullargs = [\"registers\", \"flags\", \"stack\"]\n\n for i, fullarg in enumerate(fullargs):\n if fullarg == \"stack\":\n self.info_stack()\n elif fullarg == \"symbols\":\n self.info_symbols()\n elif fullarg == \"registers\":\n self.info_registers()\n elif fullarg == \"flags\":\n self.info_flags()\n else:\n raise RuntimeError(\"this should never happen!\")\n\n if i != len(fullargs) - 1:\n print()", "def getargvalues(frame):\n if not isframe(frame):\n raise TypeError('{0!r} is not a frame object'.format(frame))\n\n args, varargs, kwonlyargs, varkw = _getfullargs(frame.f_code)\n return ArgInfo(args + kwonlyargs, varargs, varkw, frame.f_locals)", "def read_arguments(self, args, name):\n ba = CliBoundArguments(self, args, name)\n ba.process_arguments()\n return ba", "def parse_extended_attributes(self, issue, extended_attributes): # pragma: no cover", "def get_line(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def recover_parts(cf: CacheFile) -> Tuple[List[Annotations], List[List[TraceData]]]:\n with read_file(cf.fname) as f:\n events, traces = [], []\n for origin in f.keys():\n yml = dict()\n yml[\"origin\"] = origin\n yml[\"attrs\"] = parse_traceattrs(f[origin].attrs)\n\n trace_attrs = []\n trace_data = []\n for idx in f[origin][\"traces\"]:\n dset = f[origin][\"traces\"][idx]\n dset.id.refresh() # load fresh from file\n trace_attrs.append(parse_traceattrs(dset.attrs))\n trace_data.append(parse_tracedata(dset))\n yml[\"traces\"] = trace_attrs\n events.append(yml)\n traces.append(trace_data)\n return events, traces", "def GetEVPNLearnedInfo(self, *args, **kwargs):\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('getEVPNLearnedInfo', payload=payload, response_object=None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
recover the two parts of a cachefile, i.e. annotations and traces args
def recover_parts(cf: CacheFile) -> Tuple[List[Annotations], List[List[TraceData]]]: with read_file(cf.fname) as f: events, traces = [], [] for origin in f.keys(): yml = dict() yml["origin"] = origin yml["attrs"] = parse_traceattrs(f[origin].attrs) trace_attrs = [] trace_data = [] for idx in f[origin]["traces"]: dset = f[origin]["traces"][idx] dset.id.refresh() # load fresh from file trace_attrs.append(parse_traceattrs(dset.attrs)) trace_data.append(parse_tracedata(dset)) yml["traces"] = trace_attrs events.append(yml) traces.append(trace_data) return events, traces
[ "def cacheFile(cacheInfo=\"string\", noBackup=bool, prefix=bool, pointCount=bool, fileName=\"string\", refresh=bool, geometry=bool, runupFrames=int, interpEndTime=(), format=\"string\", inTangent=\"string\", worldSpace=bool, doubleToFloat=bool, sampleMultiplier=int, cacheFileNode=\"string\", outAttr=\"string\", pointsAndNormals=\"string\", startTime=(), inAttr=\"string\", replaceCachedFrame=bool, staticCache=bool, deleteCachedFrame=bool, simulationRate=(), channelName=\"string\", createCacheNode=bool, attachFile=bool, pc2File=\"string\", replaceWithoutSimulating=bool, channelIndex=bool, points=\"string\", dataSize=bool, singleCache=bool, cacheFormat=\"string\", convertPc2=bool, cacheableNode=\"string\", directory=\"string\", outTangent=\"string\", interpStartTime=(), appendFrame=bool, endTime=(), creationChannelName=\"string\", cacheableAttrs=\"string\", descriptionFileName=bool):\n pass", "def getFileCache(self, *args):\r\n return _osgDB.Registry_getFileCache(self, *args)", "def _arg_cache(name):\n return wf().cachefile(name + '.argcache')", "def extract_cache(self):\n try:\n with open(self.output_filename+\"_cache\"+\".txt\", \"r\") as cache:\n cache_data = cache.read()\n cache_dict={}\n for e in cache_data.split(\"\\r\\n\"):\n if \":\" in e:\n key, value = e.split(\": \")\n cache_dict[key]=value\n if cache_dict.has_key(\"ETag\"):\n self.ETag = cache_dict[\"ETag\"]\n if cache_dict.has_key(\"Last-Modified\"):\n self.Last_Modified = cache_dict[\"Last-Modified\"]\n if cache_dict.has_key(\"Content-Length\"):\n self.content_length = int(cache_dict[\"Content-Length\"])\n if cache_dict.has_key(\"byte_count\"):\n self.byte_count = int(cache_dict[\"byte_count\"])\n except IOError, KeyboardInterrupt:\n print \"There is a lost of connenction\"", "def setFileCache(self, *args):\r\n return _osgDB.Registry_setFileCache(self, *args)", "def __init__(self, cachefile, srcfile, *args, **kwargs):\n self._dump_mutations = False\n self.cachefile = cachefile\n self._srcfile = self.srchash = None\n # the cache maps md5 sums of keys to a 2-list of ['filename', {'srcfile': 'srchash'}]\n self.cache = {}\n # maps the sources to the current MD5 hash\n self.sources = {}\n # cache keys, not stored\n self._hashes = {}\n self.load()\n self.srcfile = srcfile\n self.update(*args, **kwargs)\n self._dump_mutations = True\n self.dump()", "def cache_extract_raw():\n\n def use_cache_decorator(function):\n \"\"\"Function result caching wrapper.\"\"\"\n\n @wraps(function)\n def wrapper(*args):\n cache_path = args[0].cache_path\n cache_read = args[0].read_cache\n hashed_name = sha256((args[1]).encode()).hexdigest()\n filepath = Path(cache_path) / f\"{hashed_name}.pkl\"\n\n if cache_read and filepath.exists():\n result = from_pickle(filepath)\n logger.info(\"Using cached model: %s.\", filepath)\n else:\n result = function(*args)\n Path.mkdir(Path(cache_path), exist_ok=True, parents=True)\n to_pickle(filepath, result)\n logger.info(\"Cached model to: %s.\", filepath)\n return result\n\n return wrapper\n\n return use_cache_decorator", "def load_data(cache_file_h5py,cache_file_pickle):\n if not os.path.exists(cache_file_h5py) or not os.path.exists(cache_file_pickle):\n raise RuntimeError(\"############################ERROR##############################\\n. \"\n \"please download cache file, it include training data and vocabulary & labels. \"\n \"link can be found in README.md\\n download zip file, unzip it, then put cache files as FLAGS.\"\n \"cache_file_h5py and FLAGS.cache_file_pickle suggested location.\")\n print(\"INFO. cache file exists. going to load cache file\")\n f_data = h5py.File(cache_file_h5py, 'r')\n print(\"f_data.keys:\",list(f_data.keys()))\n train_X=f_data['train_X'] # np.array(\n print(\"train_X.shape:\",train_X.shape)\n train_Y=f_data['train_Y'] # np.array(\n print(\"train_Y.shape:\",train_Y.shape,\";\")\n vaild_X=f_data['vaild_X'] # np.array(\n valid_Y=f_data['valid_Y'] # np.array(\n test_X=f_data['test_X'] # np.array(\n test_Y=f_data['test_Y'] # np.array(\n\n\n word2index, label2index=None,None\n with open(cache_file_pickle, 'rb') as data_f_pickle:\n word2index, label2index=pickle.load(data_f_pickle)\n print(\"INFO. cache file load successful...\")\n return word2index, label2index,train_X,train_Y,vaild_X,valid_Y,test_X,test_Y", "def sculptMeshCacheCtx(*args, **kwargs):\n\n pass", "def invalidate_caches():", "def cache_data(file_suffix_or_index):\n def actual_decorator(fun):\n @functools.wraps(fun)\n def func_wrapper(*args, **kwargs):\n course = args[1]\n file_suffix = file_suffix_or_index\n if isinstance(file_suffix, int):\n file_suffix = args[file_suffix]\n cache_file_path = '{}-{}.xml'.format(\n path.join(XML_CACHE_PATH, course['@courseid']),\n file_suffix)\n should_download = True\n if path.exists(cache_file_path):\n should_download = False\n with open(cache_file_path, 'r', encoding='utf-8') as cache:\n try:\n return xmltodict.parse(cache.read())\n except ExpatError:\n # error when parsing, redownload\n should_download = True\n if should_download:\n course_data = fun(*args, **kwargs)\n with open(cache_file_path, 'w', encoding='utf-8') as cache:\n cache.write(course_data)\n return xmltodict.parse(course_data)\n return func_wrapper\n return actual_decorator", "def read_trace(\n cf: CacheFile, idx: int, what: str = \"attrs\"\n) -> Union[TraceData, TraceAttributes]:\n if type(idx) != int:\n raise ValueError(\"Index must be an integer\")\n if idx >= 0:\n cnt = -1\n with read_file(cf.fname) as f:\n for origin in f.keys():\n for ix, key in enumerate(f[origin][\"traces\"], start=cnt + 1):\n if idx == ix:\n dset = f[origin][\"traces\"][key]\n dset.id.refresh() # load fresh from file\n if what == \"attrs\":\n attrs = parse_traceattrs(dset.attrs)\n attrs[\"original_file\"] = str(cf.fname)\n attrs[\"original_index\"] = idx\n check_metadata(str(attrs[\"readout\"]), attrs)\n return attrs\n elif what == \"data\":\n data = parse_tracedata(dset)\n return data\n else:\n raise NotImplementedError(f\"{what} can not be loaded\")\n cnt = idx\n\n raise IndexError(f\"{idx} not in cachefile\")", "def extract_file_from_cache(shared_cache_map):\r\n\r\n def process_index_array(scm, array_pointer, level, limit, vacbary=[]):\r\n\r\n \"\"\" Recursively process the sparse multilevel VACB index array\r\n\r\n Args:\r\n array_pointer: The address of a possible index array\r\n shared_cache_map: The associated SHARED_CACHE_MAP object\r\n level: The current level\r\n limit: The level where we abandon all hope. Ideally this is 7\r\n vacbary: An array of collected VACBs\r\n\r\n Returns:\r\n vacbary: Collected VACBs\r\n \"\"\"\r\n\r\n if level > limit:\r\n return []\r\n\r\n ArrayHead = array_pointer\r\n\r\n\r\n # Iterate through the entries\r\n for _i in range(0, 128):\r\n vacb_addr = ArrayHead + (_i * size_of_pointer)\r\n vacb_entry = int(_pointer_struct.unpack(self.context.layers['primary'].read(vacb_addr, size_of_pointer))[0])\r\n\r\n # Check if the VACB entry is in use\r\n if vacb_entry == 0x0:\r\n continue\r\n\r\n Vacb = def_conf.object(def_conf.symbols + constants.BANG + '_VACB',\r\n offset=vacb_entry, layer_name='primary')\r\n\r\n if Vacb.SharedCacheMap.real == scm.vol.offset:\r\n vacbinfo = extract_vacb(Vacb, 0x40000, scm)\r\n if vacbinfo:\r\n vacbary.append(vacbinfo)\r\n else:\r\n # Process the next level of the multi-level array\r\n vacbary = process_index_array(scm, vacb_entry, level + 1, limit, vacbary)\r\n\r\n return vacbary\r\n\r\n def extract_vacb(vacbs, size, shared_cache_map):\r\n vacbinfo = {}\r\n # Validata vacbs\r\n try:\r\n if not vacbs.Overlay:\r\n return vacbinfo\r\n if vacbs.SharedCacheMap.real != shared_cache_map.vol.offset:\r\n return vacbinfo\r\n if vacbs.Overlay.FileOffset.QuadPart == \"ATZ\": # just need to check that the member is valid\r\n return vacbinfo\r\n except exceptions.InvalidAddressException:\r\n return vacbinfo\r\n\r\n ActiveCount = vacbs.Overlay.ActiveCount\r\n FileOffset = vacbs.Overlay.FileOffset.QuadPart & 0xFFFFFFFFFFFF0000\r\n BaseAddress = vacbs.BaseAddress.real\r\n\r\n vacbinfo['foffset'] = int(FileOffset)\r\n vacbinfo['acount'] = int(ActiveCount)\r\n vacbinfo['voffset'] = int(vacbs.vol.offset)\r\n vacbinfo['baseaddr'] = int(BaseAddress)\r\n vacbinfo['size'] = int(size)\r\n\r\n return vacbinfo\r\n\r\n # https://www.youtube.com/watch?v=U6JJC7TmY-U\r\n scm = shared_cache_map\r\n valid = True\r\n try:\r\n\r\n file_size = scm.FileSize.QuadPart\r\n valid_data_len = scm.ValidDataLength.QuadPart\r\n section_size = scm.SectionSize.QuadPart\r\n if file_size <= 0 or valid_data_len <= 0:\r\n valid = False\r\n\r\n elif section_size <= 0 or ((file_size < valid_data_len) and valid_data_len != 0x7fffffffffffffff):\r\n valid = False\r\n except exceptions.InvalidAddressException:\r\n valid = False\r\n\r\n if valid:\r\n vacbary = []\r\n section_size = scm.SectionSize.QuadPart\r\n full_blocks = section_size // 0x40000 # VACB block size\r\n left_over = section_size % 0x40000\r\n\r\n iterval = 0\r\n while (iterval < full_blocks) and (full_blocks <= 4):\r\n Vacbs = self.InitialVacbs[iterval]\r\n vacbinfo = extract_vacb(Vacbs, 0x40000, scm)\r\n if vacbinfo:\r\n vacbary.append(vacbinfo)\r\n iterval += 1\r\n\r\n if (left_over > 0) and (full_blocks < 4):\r\n Vacbs = self.InitialVacbs[iterval]\r\n vacbinfo = extract_vacb(Vacbs, left_over, scm)\r\n if vacbinfo:\r\n vacbary.append(vacbinfo)\r\n\r\n try:\r\n Vacbs = scm.Vacbs\r\n if Vacbs.real == 0 or scm.InitialVacbs[0].real == Vacbs.real:\r\n return vacbary\r\n except exceptions.InvalidAddressException:\r\n return vacbary\r\n\r\n size_of_pointer = nt.get_type('pointer').size\r\n _pointer_struct = struct.Struct(\"<Q\") if size_of_pointer == 8 else struct.Struct('I')\r\n\r\n # < 2 ** (VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT) (first level -> 32 MB)\r\n if section_size < 0x2000000:\r\n\r\n ArrayHead = Vacbs.real\r\n _i = 0\r\n for _i in range(0, full_blocks):\r\n vacb_addr = ArrayHead + (_i * size_of_pointer)\r\n vacb_entry = int(_pointer_struct.unpack(self.context.layers['primary'].read(vacb_addr, size_of_pointer))[0])\r\n if (not vacb_entry) or (not def_conf.layers['primary'].is_valid(vacb_entry)):\r\n continue\r\n Vacb = def_conf.object(def_conf.symbols + constants.BANG + '_VACB',\r\n offset = vacb_entry, layer_name = 'primary')\r\n vacbinfo = extract_vacb(Vacb, 0x40000, scm)\r\n if vacbinfo:\r\n vacbary.append(vacbinfo)\r\n if left_over > 0:\r\n vacb_addr = ArrayHead + ((_i + 1) * size_of_pointer)\r\n vacb_entry = int(_pointer_struct.unpack(self.context.layers['primary'].read(vacb_addr, size_of_pointer))[0])\r\n\r\n if (not vacb_entry) or (not def_conf.layers['primary'].is_valid(vacb_entry)):\r\n return vacbary\r\n\r\n Vacb = def_conf.object(def_conf.symbols + constants.BANG + '_VACB',\r\n offset=vacb_entry, layer_name='primary')\r\n vacbinfo = extract_vacb(Vacb, left_over, scm)\r\n if vacbinfo:\r\n vacbary.append(vacbinfo)\r\n\r\n return vacbary\r\n\r\n\r\n else:\r\n # Greater than 32 MB\r\n level_depth = math.ceil(math.log(section_size, 2))\r\n level_depth = (level_depth - 18) // 7\r\n level_depth = math.ceil(level_depth)\r\n limit_depth = level_depth\r\n\r\n ArrayHead = Vacbs.real\r\n for _i in range(0, 128):\r\n vacb_addr = ArrayHead + (_i * size_of_pointer)\r\n vacb_entry = int(_pointer_struct.unpack(self.context.layers['primary'].read(vacb_addr, size_of_pointer))[0])\r\n\r\n if vacb_entry == 0x0:\r\n continue\r\n Vacb = def_conf.object(def_conf.symbols + constants.BANG + '_VACB',\r\n offset=vacb_entry, layer_name='primary')\r\n if Vacb.SharedCacheMap.real == scm.vol.offset:\r\n vacbinfo = extract_vacb(Vacb, 0x40000, scm)\r\n if vacbinfo:\r\n vacbary.append(vacbinfo)\r\n else:\r\n vacbary = process_index_array(scm, vacb_entry, 2, limit_depth, vacbary)\r\n\r\n return vacbary", "def cache_args(f):\n use_cache = app.cmd_arg('-C', '--cache', dest='use_cache', action=\"store_true\", help=\"Use cache directory\", default=None)\n no_use_cache = app.cmd_arg('--no-cache', dest='use_cache', action=\"store_false\", help=\"Don't use cache directory\")\n return no_use_cache(use_cache(f))", "def GenerateCacheHitTests(\n self, cache_args: Optional[dict]\n ) -> Generator[Tuple[str, _TraceTestArguments], None, None]:\n if self.test_renavigation:\n for cache_hit_page in self.cache_pages:\n yield (posixpath.join(gpu_data_relative_path, cache_hit_page),\n _TraceTestArguments(browser_args=self.browser_args,\n category=self.category,\n test_harness_script=self.test_harness_script,\n finish_js_condition=self.finish_js_condition,\n success_eval_func=self.cache_eval_func,\n other_args=cache_args,\n restart_browser=False,\n origin=self.cache_page_origin))\n for cache_hit_page in self.cache_pages:\n yield (posixpath.join(gpu_data_relative_path, cache_hit_page),\n _TraceTestArguments(browser_args=self.browser_args,\n category=self.category,\n test_harness_script=self.test_harness_script,\n finish_js_condition=self.finish_js_condition,\n success_eval_func=self.cache_eval_func,\n other_args=cache_args,\n restart_browser=True,\n origin=self.cache_page_origin))", "def cache(sub_args):\n import subprocess\n\n sif_cache = sub_args.sif_cache\n # Get absolute PATH to templates in rna-seek git repo\n repo_path = os.path.dirname(os.path.abspath(__file__))\n images = os.path.join(repo_path, 'config','containers', 'images.json')\n\n\n # Create image cache\n if not exists(sif_cache):\n # Pipeline output directory does not exist on filesystem\n os.makedirs(sif_cache)\n elif exists(sif_cache) and os.path.isfile(sif_cache):\n # Provided Path for pipeline output directory exists as file\n raise OSError(\"\"\"\\n\\tFatal: Failed to create provided sif cache directory!\n User provided --sif-cache PATH already exists on the filesystem as a file.\n Please {} cache again with a different --sif-cache PATH.\n \"\"\".format(sys.argv[0])\n )\n\n # Check if local SIFs already exist on the filesystem\n with open(images, 'r') as fh:\n data = json.load(fh)\n\n pull = []\n for image, uri in data['images'].items():\n sif = os.path.join(sif_cache, '{}.sif'.format(os.path.basename(uri).replace(':', '_')))\n if not exists(sif):\n # If local sif does not exist on in cache, print warning\n # and default to pulling from URI in config/containers/images.json\n print('Image will be pulled from \"{}\".'.format(uri), file=sys.stderr)\n pull.append(uri)\n\n if not pull:\n # Nothing to do!\n print('Singularity image cache is already up to update!')\n else:\n # There are image(s) that need to be pulled \n if not sub_args.dry_run:\n # submission_script for rna-seek cache is /path/to/output/resources/cacher\n # Quote user provided values to avoid shell injections\n masterjob = subprocess.Popen(\n 'sbatch --parsable -J pl:cache --gres=lscratch:200 --time=10:00:00 --mail-type=BEGIN,END,FAIL ' +\n str(os.path.join(repo_path, 'resources', 'cacher')) + ' slurm ' +\n \" -s '{}' \".format(sif_cache) +\n \" -i '{}' \".format(','.join(pull)) + \n \" -t '/lscratch/${SLURM_JOB_ID}/.singularity/' \", \n cwd = sif_cache, shell=True, stderr = subprocess.STDOUT, stdout = subprocess.PIPE)\n\n masterjob.communicate()\n print('RNA-seek reference cacher submitted master job with exit-code: {}'.format(masterjob.returncode))", "def _run_with_cache(self, filename, **kwargs):\n kwargs[\"spellcheck_cache\"] = self.__class__.cache_dir\n return run_lint_spelling_only_main(filename, **kwargs)", "def read_from_cache(params):\n\tdir_name = get_dir_name(params)\n\tf = open(cache_folder+dir_name+'/data.csv',\"r\")\n\tcontent = f.read()\n\treturn content", "def prepareFrameCache(self, frame, cacheType) -> retval:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create a new cachefile from a annotations and traces args
def populate( tf: FileName, annotations: List[Annotations], traceslist: List[List[TraceData]] ) -> FileName: tf = Path(tf).expanduser().absolute() # populate the cachefile with h5py.File(tf, "w") as f: print(f"Merging into {tf.name} from:") for settings, traces in zip(annotations, traceslist): print(" -", settings["origin"]) ofile = f.create_group(settings["origin"]) # fill with ofile-attributes attrs = settings["attrs"] for key, val in attrs.items(): ofile.attrs.modify(str(key), str(val)) # fill with trace-data and trace-attributes tracegrp = ofile.create_group("traces") for tattr, trace in zip(settings["traces"], traces): tattr.update(**attrs) idx = str(tattr["id"]) trace = tracegrp.create_dataset(idx, data=trace) for k, v in tattr.items(): trace.attrs.modify(str(k), str(v)) return tf
[ "def cacheFile(cacheInfo=\"string\", noBackup=bool, prefix=bool, pointCount=bool, fileName=\"string\", refresh=bool, geometry=bool, runupFrames=int, interpEndTime=(), format=\"string\", inTangent=\"string\", worldSpace=bool, doubleToFloat=bool, sampleMultiplier=int, cacheFileNode=\"string\", outAttr=\"string\", pointsAndNormals=\"string\", startTime=(), inAttr=\"string\", replaceCachedFrame=bool, staticCache=bool, deleteCachedFrame=bool, simulationRate=(), channelName=\"string\", createCacheNode=bool, attachFile=bool, pc2File=\"string\", replaceWithoutSimulating=bool, channelIndex=bool, points=\"string\", dataSize=bool, singleCache=bool, cacheFormat=\"string\", convertPc2=bool, cacheableNode=\"string\", directory=\"string\", outTangent=\"string\", interpStartTime=(), appendFrame=bool, endTime=(), creationChannelName=\"string\", cacheableAttrs=\"string\", descriptionFileName=bool):\n pass", "def __init__(self, cachefile, srcfile, *args, **kwargs):\n self._dump_mutations = False\n self.cachefile = cachefile\n self._srcfile = self.srchash = None\n # the cache maps md5 sums of keys to a 2-list of ['filename', {'srcfile': 'srchash'}]\n self.cache = {}\n # maps the sources to the current MD5 hash\n self.sources = {}\n # cache keys, not stored\n self._hashes = {}\n self.load()\n self.srcfile = srcfile\n self.update(*args, **kwargs)\n self._dump_mutations = True\n self.dump()", "def _arg_cache(name):\n return wf().cachefile(name + '.argcache')", "def _create_fake_cache_file(self, file_name):\n fobj = open(os.path.join(self.cache_path, file_name), 'w')\n fobj.close()", "def make_cached_fields_file():\n make_cached_file(AFFINITY_BASE + 'fields', FIELDS_FILE)", "def cache_data(file_suffix_or_index):\n def actual_decorator(fun):\n @functools.wraps(fun)\n def func_wrapper(*args, **kwargs):\n course = args[1]\n file_suffix = file_suffix_or_index\n if isinstance(file_suffix, int):\n file_suffix = args[file_suffix]\n cache_file_path = '{}-{}.xml'.format(\n path.join(XML_CACHE_PATH, course['@courseid']),\n file_suffix)\n should_download = True\n if path.exists(cache_file_path):\n should_download = False\n with open(cache_file_path, 'r', encoding='utf-8') as cache:\n try:\n return xmltodict.parse(cache.read())\n except ExpatError:\n # error when parsing, redownload\n should_download = True\n if should_download:\n course_data = fun(*args, **kwargs)\n with open(cache_file_path, 'w', encoding='utf-8') as cache:\n cache.write(course_data)\n return xmltodict.parse(course_data)\n return func_wrapper\n return actual_decorator", "def _create_annotations(self, args: parser_extensions.Namespace):\n annotations = flags.Get(args, 'annotations')\n return self._dict_to_annotations_message(annotations)", "def __init__(\n self,\n file_path: str | dict[str, str],\n entry_lifetime: int = constants.DAY_IN_SECONDS,\n file_timeout: int = 0,\n ) -> None:\n super().__init__(\n entry_lifetime=entry_lifetime,\n )\n if isinstance(file_path, str):\n self.file_path = os.path.expanduser(file_path)\n else:\n current_platform = platform.system().lower()\n if current_platform is None or current_platform not in file_path:\n self.file_path = next(iter(file_path.values()))\n else:\n self.file_path = os.path.expanduser(file_path[current_platform])\n # Once we decided on where to put the file cache make sure that this\n # place is readable/writable by us\n random_string = \"\".join(random.choice(string.ascii_letters) for _ in range(5))\n cache_folder = os.path.dirname(self.file_path)\n try:\n tmp_file, tmp_file_path = tempfile.mkstemp(\n dir=cache_folder,\n )\n except OSError as o_err:\n raise PermissionError(\n o_err.errno,\n \"Cache folder is not writeable\",\n cache_folder,\n )\n try:\n with open(tmp_file, \"w\") as w_file:\n # If mkstemp didn't fail this shouldn't throw an error\n w_file.write(random_string)\n try:\n with open(tmp_file_path) as r_file:\n if r_file.read() != random_string:\n Exception(\"Temporary file just written has wrong content\")\n except OSError as o_err:\n raise PermissionError(\n o_err.errno,\n \"Cache file is not readable\",\n tmp_file_path,\n )\n finally:\n if os.path.exists(tmp_file_path) and os.path.isfile(tmp_file_path):\n os.unlink(tmp_file_path)\n self.file_timeout = file_timeout\n self._file_lock_path = f\"{self.file_path}.lock\"\n self._file_lock = FileLock(self._file_lock_path, timeout=self.file_timeout)\n self.last_loaded: datetime.datetime | None = None\n if os.path.exists(self.file_path):\n with self._lock:\n self._load()\n # indicate whether the cache is modified or not, this variable is for\n # SFDictFileCache to determine whether to dump cache to file when _save is called\n self._cache_modified = False", "def setFileCache(self, *args):\r\n return _osgDB.Registry_setFileCache(self, *args)", "def create_cache():\n if cfg.CACHE_PDF:\n os.makedirs(cfg.CACHE_PATH, exist_ok=True)\n os.makedirs(cfg.CACHE_UNREADABLE_PATH, exist_ok=True)", "def getFileCache(self, *args):\r\n return _osgDB.Registry_getFileCache(self, *args)", "def CreateRequests(self, args):\n url_map_ref = self.CreateGlobalReference(\n args.urlmap, resource_type='urlMaps')\n request = self.messages.ComputeUrlMapsInvalidateCacheRequest(\n project=self.project,\n urlMap=url_map_ref.Name(),\n cacheInvalidationRule=self.messages.CacheInvalidationRule(\n path=args.path))\n\n return [request]", "def create_from_gaf(self, file, **args):\n return self.create_from_file(file, fmt='gaf', **args)", "def gen_credentials_cache(keytab, principal):\n cachefile=tempfile.mkstemp()\n subprocess.check_call(['kinit', '-k', '-t', keytab, '-c', cachefile[1], principal])\n return cachefile", "def file_cache(tmpdir):\n return FileCache(str(tmpdir))", "def cache_args(f):\n use_cache = app.cmd_arg('-C', '--cache', dest='use_cache', action=\"store_true\", help=\"Use cache directory\", default=None)\n no_use_cache = app.cmd_arg('--no-cache', dest='use_cache', action=\"store_false\", help=\"Don't use cache directory\")\n return no_use_cache(use_cache(f))", "def create_cachefile_name(key, extension):\n return reex.sub(r\"(.*/)*(.*\\.).*\", r\"__cache__\\2\" + extension, key)", "def _do_create_artifact_cache(self, spec, action):\n compression = self._options.compression_level\n if compression not in range(1, 10):\n raise ValueError('compression_level must be an integer 1-9: {}'.format(compression))\n\n artifact_root = self._options.pants_workdir\n\n def create_local_cache(parent_path):\n path = os.path.join(parent_path, self._cache_dirname)\n self._log.debug('{0} {1} local artifact cache at {2}'\n .format(self._task.stable_name(), action, path))\n return LocalArtifactCache(artifact_root, path, compression,\n self._options.max_entries_per_target,\n permissions=self._options.write_permissions,\n dereference=self._options.dereference_symlinks)\n\n def create_remote_cache(remote_spec, local_cache):\n all_urls = remote_spec.split('|')\n\n if len(all_urls) > 0:\n local_cache = local_cache or TempLocalArtifactCache(artifact_root, compression)\n if any(map(self._is_s3, all_urls)):\n if len(all_urls) != 1:\n raise InvalidCacheSpecError('S3 Cache only supports a single entry, got: {0}'.format(\n remote_spec))\n return S3ArtifactCache(\n self._options.s3_credentials_file,\n self._options.s3_config_file,\n self._options.s3_profile,\n artifact_root,\n all_urls[0],\n local_cache,\n )\n pingable = self.get_available_urls(all_urls)\n best_url_selector = BestUrlSelector(\n ['{}/{}'.format(url.rstrip('/'), self._cache_dirname) for url in pingable]\n )\n return RESTfulArtifactCache(\n artifact_root,\n best_url_selector,\n local_cache,\n read_timeout=self._options.read_timeout,\n write_timeout=self._options.write_timeout,\n )\n\n local_cache = create_local_cache(spec.local) if spec.local else None\n remote_cache = create_remote_cache(spec.remote, local_cache) if spec.remote else None\n if remote_cache:\n return remote_cache\n return local_cache", "def _create_cache(self):\n if self._cachepath: # user defined cache path.\n if not os.path.exists(self._cachepath):\n os.makedirs(self._cachepath) # create cache\n self.path = self._cachepath if self._cachepath else tempfile.mkdtemp()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the settlement after the lock is acquire. (Async S_tr creation) Test all the settle transaction is setted up correctly
def test_s_tr_lock_aquire_creation(self): create_bill(self.ul, 10) s = self.create_settlement() self.assertEqual(s.wait_count, 0) # s_tr will be setted self.assertEqual(s.settletransaction_set.all().count(), 3) self.assertEqual(s.settletransaction_set.get(id=1).amount, 17.5) self.assertEqual(s.settletransaction_set.get(id=2).amount, 17.5) self.assertEqual(s.settletransaction_set.get(id=3).amount, 17.5)
[ "def create_test_lock(self, test_uuid):", "async def test_transaction_nonce_lock(self):\n\n no_tests = 20\n\n txs = []\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx = decode_transaction(tx)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n for i in range(11, 10 + no_tests):\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** i)\n self.assertEqual(decode_transaction(tx).nonce, dtx.nonce)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n\n responses = await asyncio.gather(*(to_asyncio_future(self.fetch(\"/tx\", method=\"POST\", body={\"tx\": tx})) for tx in txs))\n\n ok = 0\n bad = 0\n for resp in responses:\n if resp.code == 200:\n ok += 1\n else:\n bad += 1\n self.assertEqual(ok, 1)\n self.assertEqual(bad, no_tests - 1)\n\n # TODO: deal with lingering ioloop tasks better\n await asyncio.sleep(1)", "async def test_prevent_out_of_order_txs(self):\n\n tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx1 = decode_transaction(tx1)\n stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY)\n tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1.nonce + 1)\n stx2 = sign_transaction(tx2, FAUCET_PRIVATE_KEY)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 400, resp.body)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx1})\n self.assertEqual(resp.code, 200, resp.body)\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 200, resp.body)\n\n # lets the transaction queue processing run before ending the test\n await asyncio.sleep(1)", "def test_create_wire_transaction(session):\n # Create an account and an invoice for the account\n account = factory_create_wire_account(auth_account_id='1', status=CfsAccountStatus.ACTIVE.value)\n previous_day = datetime.now() - timedelta(days=1)\n # Create an invoice for this account\n invoice = factory_invoice(payment_account=account, created_on=previous_day, total=10, payment_method_code=None)\n\n fee_schedule = FeeScheduleModel.find_by_filing_type_and_corp_type('CP', 'OTANN')\n line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)\n line.save()\n\n assert invoice.invoice_status_code == InvoiceStatus.CREATED.value\n assert invoice.payment_method_code == 'WIRE'\n\n CreateInvoiceTask.create_invoices()\n\n updated_invoice: InvoiceModel = InvoiceModel.find_by_id(invoice.id)\n inv_ref: InvoiceReferenceModel = InvoiceReferenceModel. \\\n find_by_invoice_id_and_status(invoice.id, InvoiceReferenceStatus.ACTIVE.value)\n\n assert inv_ref\n assert updated_invoice.invoice_status_code == InvoiceStatus.SETTLEMENT_SCHEDULED.value", "def freeze_create(request):\n\n\trequest.user.kingdom.freeze_set.create()", "def test_create_online_banking_transaction(session):\n # Create an account and an invoice for the account\n account = factory_create_online_banking_account(auth_account_id='1', status=CfsAccountStatus.ACTIVE.value)\n previous_day = datetime.now() - timedelta(days=1)\n # Create an invoice for this account\n invoice = factory_invoice(payment_account=account, created_on=previous_day, total=10, payment_method_code=None)\n\n fee_schedule = FeeScheduleModel.find_by_filing_type_and_corp_type('CP', 'OTANN')\n line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)\n line.save()\n\n assert invoice.invoice_status_code == InvoiceStatus.CREATED.value\n\n CreateInvoiceTask.create_invoices()\n\n updated_invoice: InvoiceModel = InvoiceModel.find_by_id(invoice.id)\n inv_ref: InvoiceReferenceModel = InvoiceReferenceModel. \\\n find_by_invoice_id_and_status(invoice.id, InvoiceReferenceStatus.ACTIVE.value)\n\n assert inv_ref\n assert updated_invoice.invoice_status_code == InvoiceStatus.SETTLEMENT_SCHEDULED.value", "def test_s_tr_success_finish(self):\n s = self.create_settlement()\n\n for s_tr in s.settletransaction_set.all():\n\n # both user has agree to this payment\n s_tr.approve(s_tr.from_u)\n s_tr.approve(s_tr.to_u)\n\n self.assertEqual(s.state, FINISH)\n self.assertEqual(s.bill_set.first().state, FINISH)", "def test_agreement_state_set_pending_on_creation(self):\n api.build() # instantiate api for backends\n self.entity.mixins.append(self._get_sample_provider_mixins()[0][0])\n self.agree_back.create(self.entity, self.extras)\n\n entity_state = self.entity.attributes[\"occi.agreement.state\"]\n self.assertEqual(entity_state, \"pending\")\n LOG.info(\"Agreement has inital state set to 'pending'.\")", "def test_create_transaction(self):\n ta = self.transaction\n\n # Make sure the data fields have been filled\n self.assertEqual(len(ta.key), 32)\n self.assertNotEqual(ta.time_created, None)\n self.assertEqual(ta.firstname, \"Donald\")\n self.assertEqual(ta.lastname, \"Duck\")\n self.assertEqual(ta.company, \"None\")\n self.assertEqual(ta.email, \"donald.duck@duckburg.inv\")\n self.assertEqual(ta.telephone, \"991234567\")\n self.assertEqual(ta.mobile, \"+358991234567\")\n self.assertEqual(ta.street, \"1313 Webfoot Walk\")\n self.assertEqual(ta.postalcode, \"00000\")\n self.assertEqual(ta.city, \"Duckburg\")\n self.assertEqual(ta.country, \"US\")\n self.assertEqual(ta.information, \"Quack, damn you!\")\n self.assertEqual(ta.token, '')\n self.assertEqual(ta.time_pending, None)\n self.assertEqual(ta.time_cancelled, None)\n self.assertEqual(ta.time_paid, None)\n self.assertEqual(ta.payment_method_name, '')\n\n # Test properties\n self.assertEqual(ta.is_cancelled, False)\n self.assertEqual(ta.is_delivered, False)\n self.assertEqual(ta.is_pending, False)\n self.assertEqual(ta.is_paid, False)\n self.assertEqual(ta.full_name, \"Donald Duck\")\n\n # Make sure this doesn't crash\n self.assertEqual(ta.qr_code.startswith(\"http\"), True)\n\n # Check price functions\n self.assertEqual(ta.get_transaction_items().count(), 6)\n self.assertEqual(ta.get_total_price(), 70) # Note discounts\n self.assertEqual(ta.get_storeitem_count(self.items[0]), 1)\n self.assertEqual(ta.get_storeitem_count(self.items[2]), 5)\n self.assertEqual(ta.get_storeitem_count(self.items[1]), 0)\n\n # Make sure transaction items went through\n for item in ta.get_transaction_items():\n self.assertIn(item.item.id, [self.items[0].id, self.items[2].id])\n self.assertNotEqual(item.variant, None)\n self.assertEqual(item.time_delivered, None)\n self.assertEqual(len(item.key), 32)\n self.assertEqual(item.is_delivered, False)\n self.assertEqual(item.qr_code.startswith(\"http\"), True)\n\n # Check amounts (manually)\n self.assertEqual(TransactionItem.objects.filter(transaction=ta, item=self.items[0]).count(), 1)\n self.assertEqual(TransactionItem.objects.filter(transaction=ta, item=self.items[2]).count(), 5)\n\n # Check discount(s)\n discount_items = TransactionItem.objects.filter(transaction=ta, item=self.items[2])\n for item in discount_items:\n self.assertEqual(item.original_price, 20)\n self.assertEqual(item.purchase_price, 10)\n non_discount_item = TransactionItem.objects.get(transaction=ta, item=self.items[0])\n self.assertEqual(non_discount_item.original_price, 20)\n self.assertEqual(non_discount_item.purchase_price, 20)", "def test_block_creation(self):\n CommonTestCases.admin_token_assert_equal(\n self,\n create_block_query,\n create_block_response\n )", "def steal_test_lock(self, test_uuid):", "def try_acquire(self):\r\n return db_api.stack_lock_create(self.stack.id, self.engine_id)", "def test_division_manufacturing_time_transactions_post(self):\n pass", "def test_try_lock():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n # We hold the lease, all following calls are going to block\n first = Peer.from_server_url(url)\n first.acquire(\"A\")\n with pytest.raises(Timeout):\n with lock(BASE_URL, \"A\", timeout=timedelta(seconds=1)):\n pass", "def testSessionCreate(self):\n success = False\n vss = None\n\n try:\n vss = self.session.create_viral_seq_set()\n\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(vss is None)", "def test_create(self):\n svr_admin = treadmill.context.AdminContext.server.return_value\n self.svr.create('foo.somewhere.in.xx.com', {'cell': 'ny-999-cell',\n 'partition': 'xxx'})\n svr_admin.get.assert_called_with('foo.somewhere.in.xx.com', dirty=True)", "def test_create_withdrawal(self):\n pass", "def test_create_complete_state_err(self):\r\n del self.templ['Resources']['WebServer']\r\n self.parent_resource.set_template(self.templ, {\"KeyName\": \"test\"})\r\n\r\n ctx = self.parent_resource.context\r\n phy_id = \"cb2f2b28-a663-4683-802c-4b40c916e1ff\"\r\n templ = parser.Template(self.templ)\r\n env = environment.Environment({\"KeyName\": \"test\"})\r\n self.stack = parser.Stack(ctx, phy_id, templ, env, timeout_mins=None,\r\n disable_rollback=True,\r\n parent_resource=self.parent_resource)\r\n\r\n self.m.StubOutWithMock(parser, 'Template')\r\n parser.Template(self.templ, files={}).AndReturn(templ)\r\n\r\n self.m.StubOutWithMock(environment, 'Environment')\r\n environment.Environment({\"KeyName\": \"test\"}).AndReturn(env)\r\n\r\n self.m.StubOutWithMock(parser, 'Stack')\r\n parser.Stack(ctx, phy_id, templ, env, timeout_mins=None,\r\n disable_rollback=True,\r\n parent_resource=self.parent_resource,\r\n owner_id=self.parent_stack.id,\r\n adopt_stack_data=None).AndReturn(self.stack)\r\n\r\n st_set = self.stack.state_set\r\n self.m.StubOutWithMock(self.stack, 'state_set')\r\n self.stack.state_set(self.stack.CREATE, self.stack.IN_PROGRESS,\r\n \"Stack CREATE started\").WithSideEffects(st_set)\r\n\r\n self.stack.state_set(self.stack.CREATE, self.stack.COMPLETE,\r\n \"Stack CREATE completed successfully\")\r\n self.m.ReplayAll()\r\n\r\n self.assertRaises(exception.ResourceFailure,\r\n scheduler.TaskRunner(self.parent_resource.create))\r\n self.assertEqual(('CREATE', 'FAILED'), self.parent_resource.state)\r\n self.assertEqual(('Error: Stack CREATE started'),\r\n self.parent_resource.status_reason)\r\n\r\n self.m.VerifyAll()\r\n # Restore state_set to let clean up proceed\r\n self.stack.state_set = st_set", "def test_setup(self):\n assert self.cosm_trade_handler.setup() is None\n self.assert_quantity_in_outbox(0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
No only need to test all the s_tr is finished, but also all the bill state is swith to finsh
def test_s_tr_success_finish(self): s = self.create_settlement() for s_tr in s.settletransaction_set.all(): # both user has agree to this payment s_tr.approve(s_tr.from_u) s_tr.approve(s_tr.to_u) self.assertEqual(s.state, FINISH) self.assertEqual(s.bill_set.first().state, FINISH)
[ "def test_s_tr_lock_aquire_creation(self):\n create_bill(self.ul, 10)\n\n s = self.create_settlement()\n\n self.assertEqual(s.wait_count, 0)\n\n # s_tr will be setted\n self.assertEqual(s.settletransaction_set.all().count(), 3)\n\n self.assertEqual(s.settletransaction_set.get(id=1).amount, 17.5)\n self.assertEqual(s.settletransaction_set.get(id=2).amount, 17.5)\n self.assertEqual(s.settletransaction_set.get(id=3).amount, 17.5)", "def done(self,cr,uid,ids,context={}):\n for fees in self.browse(cr, uid, ids, context=context):\n contract = fees.contract_id\n voucher_id = super(contract_co_operative_fees, self).create_invoice(cr, uid, ids, context)\n fees.write({'state':'done'})\n \"\"\"user_obj = self.pool.get('res.users')\n voucher_obj = self.pool.get('account.voucher')\n voucher_line_obj = self.pool.get('account.voucher.line')\n\t\n for fees in self.browse(cr, uid, ids, context=context):\n\t \n contract = fees.contract_id\n \n voucher_id = voucher_obj.create(cr, uid, {\n 'contract_id': fees.contract_id.id,\n 'amount': fees.fees_amount,\n 'type': 'purchase',\n 'date': time.strftime('%Y-%m-%d'),\n 'partner_id': contract.partner_id.id , \n #'journal_id': 67,\n 'reference': contract.name+\"/\"+ fees.name,\n 'state': 'draft',\n # 'name':'Project fees:'+fees.name +'project :'+contract.department_id.name,\n # 'currency_id':contract.currency_id.id,\n })\n voucher_obj.write(cr,uid,[voucher_id],{'amount': fees.fees_amount}, context=context)\n \n \n vocher_line_id = voucher_line_obj.create(cr, uid, {\n 'amount': fees.fees_amount,\n 'voucher_id': voucher_id,\n 'type': 'dr',\n 'account_id': contract.contract_account.id,\n 'name': fees.name,\n })\n contract.write({'voucher_ids': [(4, voucher_id)]}, context=context)\n fees.write({'state':'done'})\n\t print \"voucher id:\",voucher_id\n\t print \"amount:\",fees.fees_amount\n\n \n Workflow function to change the state to confirm.\n \n @return: True\n \"\"\"\n currency_obj = self.pool.get('res.currency')\n new_amount = 0.0\n for fees in self.browse(cr, uid, ids):\n \n contract_currency = contract.currency_id.id\n euro_id = currency_obj.search(cr, uid, [('name','=','EUR')],limit=1)\n curren = currency_obj.browse(cr, uid, euro_id)\n new_amount = currency_obj.compute(cr, uid, contract_currency, curren[0].id, fees.fees_amount, fees.fees_date) \n all_amount = contract.fees_total_amount + fees.fees_amount\n if all_amount > contract.contract_amount :\n raise osv.except_osv(_('Amount exceed !'), _('The total fees amount well be more than the contract amount ..'))\n else:\n contract.write({'fees_total_amount': all_amount}) \n self.write(cr,uid,ids,{'fees_amount_in_euro':new_amount })\n\n return True", "def testRetargetIncompleteWork(self):\n \n def _remove_notification_fields(sprint_backlog):\n # Trac's notification module sets fields like 'new' and 'link' in \n # the ticket to use them as template variables. This can cause \n # errors when we save the same ticket afterwards because they are \n # not custom fields so trac try to set a column with this name in\n # the main ticket table which fails.\n # This error only shows up when you run the full unit test suite, \n # not only this test.\n for bi in sprint_backlog:\n ticket = bi.ticket\n ticket._old.pop('new', None)\n ticket._old.pop('link', None)\n \n # Check the story and the tasks are in the sprint backlog\n sb = self.bmm.get(name=Key.SPRINT_BACKLOG, scope=self.old_sprint.name)\n self.assert_length(3, sb)\n new_sb = self.bmm.get(name=Key.SPRINT_BACKLOG, scope=self.new_sprint.name)\n self.assert_length(0, new_sb)\n # Now re-target the story to self.new_sprint, should be there with both tasks\n cmd_retarget_old = self.controller.RetargetTicketsCommand(self.env,\n sprint=self.old_sprint.name,\n retarget=self.new_sprint.name)\n cmd_retarget_new = self.controller.RetargetTicketsCommand(self.env,\n sprint=self.new_sprint.name,\n retarget=self.old_sprint.name)\n self.controller.process_command(cmd_retarget_old)\n self.teh.move_changetime_to_the_past([self.us1, self.t1, self.t2])\n \n self.assert_length(0, sb)\n self.assert_length(3, new_sb)\n \n _remove_notification_fields(new_sb)\n # Now close one ticket and check if only 2 are retargeted\n self.t1[Key.STATUS] = Status.CLOSED\n self.t1.save_changes('tester', 'Closed t1')\n self.teh.move_changetime_to_the_past([self.t1])\n self.assert_equals(Status.CLOSED, self.t1[Key.STATUS])\n # closed ticket should still remain\n self.assert_contains(self.t1, new_sb)\n # self.t2 is now moved back to the old sprint\n self.controller.process_command(cmd_retarget_new)\n # sb contains t2 (closed) and us1 (because referenced from t2)\n self.assert_contains(self.t2, sb)\n self.assert_contains(self.us1, sb)\n self.assert_length(2, sb)\n self.assert_contains(self.t1, new_sb)\n self.assert_contains(self.us1, new_sb)\n self.assert_length(2, new_sb) # there should be t1 even if closed and us1\n \n # Now close the second task, but not the story, and verify that only the\n # story is moved\n self.teh.move_changetime_to_the_past([self.us1, self.t1, self.t2])\n self.t2[Key.STATUS] = Status.CLOSED\n self.t2.save_changes('tester', 'Closed t2')\n self.t1[Key.SPRINT] = self.old_sprint.name\n self.t1[Key.STATUS] = Status.CLOSED\n self.t1.save_changes('tester', 'Moved t1 back')\n self.teh.move_changetime_to_the_past([self.us1, self.t1, self.t2])\n self.assert_length(3, sb)\n \n # Only us1 is incomplete so it is moved to the new sprint again\n self.controller.process_command(cmd_retarget_old)\n # Now the sprint backlog always shows the tickets which are linked :-)\n self.assert_length(3, sb) # t1, t2 and the linked story\n self.assert_length(1, new_sb) # only the story", "def done(self, cr, uid, ids, context={}):\n '''payment_enrich_lines_obj = self.pool.get('payment.enrich.lines')\n for fuel_plan in self.browse(cr, uid, ids,context):\n if not fuel_plan.quantity_ids:\n raise osv.except_osv(_('ValidateError'), _('In Order To Complete Fuel Plan Order You need To Enter Fuel Quantities!'))\n if fuel_plan.payment_method == 'enrich':\n details = 'Fixed Fuel Plan No:'+fuel_plan.name\n payment_enrich_lines_obj.create(cr, uid, {\n 'enrich_id':fuel_plan.enrich_id.id,\n 'cost': fuel_plan.cost,\n 'date':time.strftime('%Y-%m-%d'),\n 'state':'draft',\n 'name':details,\n 'department_id':fuel_plan.department_id.id,\n 'model_id':'fuel.plan',\n }, context=context)\n copy_attachments(self,cr,uid,[fuel_plan.id],'fuel.plan',fuel_plan.enrich_id.id,'payment.enrich', context)\n elif fuel_plan.payment_method == 'voucher': \n self.create_voucher(cr,uid,ids,context)'''\n return self.write(cr, uid, ids, {'state':'done'}, context=context)", "def test_bt_sar_sanity_check_state(self):\n #Iterating through the BT SAR scenarios\n for scenario in range(0, self.bt_sar_df.shape[0]):\n # Reading BT SAR table row into dict\n read_scenario = self.bt_sar_df.loc[scenario].to_dict()\n\n start_time = self.dut.adb.shell('date +%s.%m')\n time.sleep(1)\n\n #Setting SAR state to the read BT SAR row\n enforced_state = self.set_sar_state(self.dut, read_scenario,\n self.country_code)\n\n #Reading device state from logcat after forcing SAR State\n device_state = self.get_current_device_state(self.dut, start_time)\n\n #Comparing read device state to expected device state\n for key in enforced_state.keys():\n key_regex = r'{}:\\s*(\\d)'.format(key)\n try:\n propagated_value = int(\n re.findall(key_regex, device_state)[0])\n except IndexError:\n propagated_value = 'NA'\n\n if enforced_state[key] == propagated_value:\n self.sar_test_result.metric_value = 1\n self.log.info(\n 'scenario: {}, state : {}, forced_value: {}, value:{}'.\n format(scenario, key, enforced_state[key],\n propagated_value))\n else:\n self.log.error(\n 'scenario:{}, state : {}, forced_value: {}, value:{}'.\n format(scenario, key, enforced_state[key],\n propagated_value))", "def test_group(self):\n\n class DoneState(State):\n def __init__(self):\n State.__init__(self,outcomes=['done'])\n def execute(self,ud=None):\n return 'done'\n\n sm = StateMachine(['succeeded','done'])\n with sm:\n StateMachine.add('FAILSAUCE',DoneState())\n transitions = {'aborted':'FAILSAUCE','preempted':'FAILSAUCE'}\n with sm:\n StateMachine.add('FIRST', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n StateMachine.add('SECOND', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g2), transitions)\n StateMachine.add('THIRD', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n spinner = threading.Thread(target=self.spin)\n spinner.start()\n outcome = sm.execute()\n\n assert outcome == 'done'", "def soldout():", "def checkToDeal(name, momentPrice):\r\n\t#global todayTask_lst\r\n\tname = name[2:]\r\n\tlogger.info ( \"\\n ===Check Deal ====>> Name== %s\"%name)\r\n\tdealtype = ''\r\n\tBidExecuted = False\r\n\ttaskLst = DealDatum['todayTask_lst']\r\n\tcheck_lst = [row for row in taskLst if row[0] == name]\r\n\tcheck_lst.sort(lambda x, y: cmp(x[1], y[1])) # the min at first min==>->big sequence\r\n\tlogger.info(\"check previous order list ===>%s<===\"%check_lst)\r\n\t#signalTrigger(name)\r\n\tcheckLatestTrend(name,momentPrice)\r\n\r\n\tprint \"pause inside checkToDeal function\"\r\n\tutilities.pause()\r\n\t#logger.debug(\" \\n ==Show the loop List ==> \\n %s \"%check_lst)\r\n\tfor order in check_lst: #[['300070', -1, 46.83, 100.0, 4683.0], ['300...\r\n\t\t# stockCode , sellbuyFlag , dealATprice,Volumn = oder[0],order[1] , order[2], order[3]\r\n\t\t#only sell last bought, leave buying done manually!!r\r\n\t\tif order[3] < 0 : # Cancelled Bidding\r\n\t\t\tcontinue # next one, if vol < 0 means a canceled order\r\n\t\telif order[1] == 1 : # First check previous 1 days Sold Shares Volumn , buy again\r\n\t\t\t#if utilities.percentValue(0.997*momentPrice,order[2]) > 4.0 : # !!! need more consideration!!!!\r\n\t\t\tif (0.998*momentPrice-order[2])*order[3] < -159.0 :\r\n\t\t\t\tdealtype = \"buy\"\r\n\t\t\t\tif momentPrice * int(order[3]) > 5000.0 : # should improve if split vol may cause a tax or fee rise!!!\r\n\t\t\t\t\tbuy_vol = int(5000.0/momentPrice/100)*100+100 #ignore the available money\r\n\t\t\t\t\tdeal_info = [order[0],momentPrice+0.1 ,buy_vol] #first to check\r\n\t\t\t\telse:\r\n\t\t\t\t\tdeal_info = [order[0], momentPrice+0.1,int(order[3])] #first to check\r\n\t\telif order[1] == -1 :\r\n\t\t\tif (0.997*momentPrice-order[2])*order[3] > 99.0 :\r\n\t\t\t\tdealtype = \"sell\"\r\n\t\t\t\t#!!!order[2] far less thean momentPrice, Deal should be committed right now\r\n\t\t\t\tdeal_info = [order[0],order[2] ,int(order[3])]\r\n\t\tif dealtype == '' :\r\n\t\t\tbreak # out the loop ,cause same staock in min 2 max order\r\n\t\telse :\r\n\t\t\tcallAutoTransaction(dealtype,deal_info)\r\n\t\t\tlogger.info(\">>====Sell stock done by Bidding \\n %s====>>>\"%deal_info)\r\n\t\t\tDealDatum['todayTask_lst'].remove(order)\r\n\t\t\t#todayTask_lst.remove(order) # remove from the global task list\r\n\t\t\tBidExecuted = True\r\n\t\t\tbreak # just wait for nex moment another round deal check\r\n\treturn BidExecuted", "def testD_PrototypeChain(self):\n dummymyThread = threading.currentThread()\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test will not continue\" % (nRunning))\n\n config = self.getConfig()\n config.BossAir.pluginName = 'SimpleCondorPlugin'\n\n baAPI = BossAirAPI(config=config, insertStates=True)\n\n workload = self.createTestWorkload()\n\n workloadName = \"basicWorkload\"\n\n changeState = ChangeState(config)\n\n nSubs = 5\n nJobs = 10\n\n dummycacheDir = os.path.join(self.testDir, 'CacheDir')\n\n jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,\n task=workload.getTask(\"ReReco\"),\n workloadSpec=os.path.join(self.testDir,\n 'workloadTest',\n workloadName),\n site='se.T2_US_UCSD')\n for group in jobGroupList:\n changeState.propagate(group.jobs, 'created', 'new')\n\n jobSubmitter = JobSubmitterPoller(config=config)\n jobTracker = JobTrackerPoller(config=config)\n statusPoller = StatusPoller(config=config)\n\n jobSubmitter.algorithm()\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, nSubs * nJobs)\n\n newJobs = baAPI._loadByStatus(status='New')\n self.assertEqual(len(newJobs), nSubs * nJobs)\n\n # Check WMBS\n getJobsAction = self.daoFactory(classname=\"Jobs.GetAllJobs\")\n result = getJobsAction.execute(state='Executing', jobType=\"Processing\")\n self.assertEqual(len(result), nSubs * nJobs)\n\n statusPoller.algorithm()\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, nSubs * nJobs)\n\n newJobs = baAPI._loadByStatus(status='New')\n self.assertEqual(len(newJobs), 0)\n\n newJobs = baAPI._loadByStatus(status='Idle')\n self.assertEqual(len(newJobs), nSubs * nJobs)\n\n # Tracker should do nothing\n jobTracker.algorithm()\n\n result = getJobsAction.execute(state='Executing', jobType=\"Processing\")\n self.assertEqual(len(result), nSubs * nJobs)\n\n # Wait for jobs to timeout due to short Pending wait period\n time.sleep(12)\n\n statusPoller.algorithm()\n\n newJobs = baAPI._loadByStatus(status='Idle')\n self.assertEqual(len(newJobs), 0)\n\n newJobs = baAPI._loadByStatus(status='Timeout', complete='0')\n self.assertEqual(len(newJobs), nSubs * nJobs)\n\n # Jobs should be gone\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0)\n\n # Check if they're complete\n completeJobs = baAPI.getComplete()\n self.assertEqual(len(completeJobs), nSubs * nJobs)\n\n # Because they timed out, they all should have failed\n jobTracker.algorithm()\n\n result = getJobsAction.execute(state='Executing', jobType=\"Processing\")\n self.assertEqual(len(result), 0)\n\n result = getJobsAction.execute(state='JobFailed', jobType=\"Processing\")\n self.assertEqual(len(result), nSubs * nJobs)\n\n return", "def is_done(self):\n return not (self.patrn_bfs_queue and self.sub_bfs_queue)", "def test_stop():\n print('\\n', \"Resending funds\")\n maxfee = 0.01\n call.nspv_login(wif_real)\n res = call.type_convert(call.nspv_listunspent())\n amount = res.get(\"balance\") - maxfee\n res = call.type_convert(call.nspv_spend(addr_send, amount))\n hexs = res.get(\"hex\")\n call.nspv_broadcast(hexs)\n print('\\n', \"stopping nspv process\")\n rpc_call = call.nspv_stop()\n call.assert_success(rpc_call)\n print('\\n', \"all tests are finished\")", "def test_status_update_from_complete_to_inprocess():\n PRM().Project(prm_module_project) \\\n .run_integration(skip=True) \\\n .goto_lk().Card(prm_module_project).create_child_card('C1') \\\n .goto_card('C1').set_lane('completed').set_size(20).update() \\\n .run_integration() \\\n .goto_lk().Card('C1').set_lane('in_process').update() \\\n .run_integration() \\\n .goto_prm().Project(prm_module_project).verify_lk_total_cards(20)\\\n .verify_lk_in_process_child_cards(20)\\\n .verify_lk_completed_child_cards(0)\\\n .verify_lk_percent_of_cards_completed(0)", "def test_05_credit_recharges(self):\n\tprint \"...starting test 2.05 to 2.08 \"\n\tprint \"removing load\"\n\tself.testHandler.handle_maxwell_request(\"voltage:0\")\n\tself.testHandler.handle_maxwell_request(\"phase_load:10\")\n\ttime.sleep(5)\n\tpower=float((self.testHandler.handle_network_request(\"get_active_power\", validity_level=\"medium\")).split(\" \")[0])\n print \"active power = \",power,\" watts\"\t \n\tself.testHandler.handle_network_request(\"clear_account\", validity_level=\"high\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n \tassert_equal(a,0.0)\n\tprint \"recharging with 100\"\n\tself.testHandler.handle_network_request(\"recharge:100\", validity_level=\"medium\")\n\ttime.sleep(2)\n\tprint (\"recharge of 100 done and starting the commulative recharge followed by fractional decharges\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"high\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tprint\"recharging with fractional recharge of .95\" \n\tself.testHandler.handle_network_request(\"recharge_fract:.95\", validity_level=\"medium\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tprint\"recharging with fractional recharge of .85\"\n\tself.testHandler.handle_network_request(\"recharge_fract:.85\", validity_level=\"medium\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tprint\"recharging with a recharge of 100\"\n\tself.testHandler.handle_network_request(\"recharge:100\", validity_level=\"medium\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits =\",a\n\tprint('commulattive positive recharge done \\n starting commulative recharges')\n\tprint\"recharging with negative recharge of -50\"\n\tself.testHandler.handle_network_request(\"recharge:-50\", validity_level=\"medium\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tprint\"recharging with negative recharge of -50\"\n\tself.testHandler.handle_network_request(\"recharge:-50\", validity_level=\"medium\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert(101.79<a<=101.80)", "async def test_prevent_out_of_order_txs(self):\n\n tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx1 = decode_transaction(tx1)\n stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY)\n tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1.nonce + 1)\n stx2 = sign_transaction(tx2, FAUCET_PRIVATE_KEY)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 400, resp.body)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx1})\n self.assertEqual(resp.code, 200, resp.body)\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 200, resp.body)\n\n # lets the transaction queue processing run before ending the test\n await asyncio.sleep(1)", "def backtest(self):\n # Cut off most recent history closing price since it is not complete and would effect the calculations\n #kline_array = self.client.get_historical_klines(symbol=pair, interval=Client.KLINE_INTERVAL_5MINUTE, start_str= '1' + ' month ago UTC')\n kline_array = self.client.get_historical_klines(symbol=self.pair, interval=self.asset_interval, start_str= self.time_look_back)\n self.closing_times = [dt.datetime.utcfromtimestamp(x[6]/1000) for x in kline_array][0:-1]\n self.closing_price_array = [float(x[4]) for x in kline_array][0:-1]\n self.checked_prices = []\n\n gain, loss = 0, 0\n for x in range(0, len(self.closing_price_array)-1):\n change = self.closing_price_array[x+1] - self.closing_price_array[x]\n self.checked_prices.append(self.closing_price_array[x+1])\n self.checked_times.append(self.closing_times[x+1])\n if change > 0:\n gain += change\n elif change < 0:\n loss += abs(change)\n\n #Get first rsi simple moving average\n if x == self.rsi_period:\n self.avg_gain = self.simple_moving_average(gain, self.rsi_period)\n self.avg_loss = self.simple_moving_average(loss, self.rsi_period)\n self.rsi = self.rsi_calc(self.avg_gain, self.avg_loss)\n self.rsi_array.append(self.rsi)\n gain, loss = 0, 0\n\n #Use wilders moving average to continue calculating rsi values\n elif x > self.rsi_period:\n self.avg_gain = self.wilders_moving_average(self.rsi_period, gain, self.avg_gain)\n self.avg_loss = self.wilders_moving_average(self.rsi_period, loss, self.avg_loss)\n self.rsi = self.rsi_calc(self.avg_gain, self.avg_loss)\n self.rsi_array.append(self.rsi)\n gain, loss = 0, 0\n\n # When there are enough rsi values begin to calculate stoch_rsi\n if len(self.rsi_array) >= self.stoch_period:\n k_fast = self.k_fast_stoch(self.rsi_array[len(self.rsi_array) - self.stoch_period:])\n self.k_fast_array['k_fast'].append(k_fast)\n self.k_fast_array['time'].append(self.closing_times[x])\n\n # When there are enough %K_FAST values begin to calculate %K_SLOW values = sma of n %K_FAST values\n if len(self.k_fast_array['k_fast']) >= self.k_slow_period:\n k_slow = self.simple_moving_average(self.k_fast_array['k_fast'][-1*self.k_slow_period:], self.k_slow_period)\n self.k_slow_array['k_slow'].append(k_slow)\n self.k_slow_array['time'].append(self.closing_times[x])\n\n # When there are enough %K_SLOW values begin to calculate %D_SLOW values = sma of n %K_SLOW values\n if len(self.k_slow_array['k_slow']) >= self.d_slow_period:\n d_slow = self.simple_moving_average(self.k_slow_array['k_slow'][-1*self.d_slow_period:], self.d_slow_period)\n self.d_slow_array['d_slow'].append(d_slow)\n self.d_slow_array['time'].append(self.closing_times[x])\n\n self.bollinger_bands(self.checked_prices, self.sma_period, self.deviation, self.checked_times[x])\n\n #Once all values start to be calculated we can determine whether to buy or sell until we hit the last\n self.buy_sell(current_time = self.checked_times[x])\n\n self.plot_orders() #Plot orders on graph", "def test_sitter_payment(self):\n self.assertTrue(self.sitter.calculate_payment() == 3 * 15)", "def testE_FullChain(self):\n\n from WMComponent.JobSubmitter.JobSubmitter import JobSubmitter\n from WMComponent.JobStatusLite.JobStatusLite import JobStatusLite\n from WMComponent.JobTracker.JobTracker import JobTracker\n\n myThread = threading.currentThread()\n\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test will not continue\" % (nRunning))\n\n config = self.getConfig()\n config.BossAir.pluginName = 'SimpleCondorPlugin'\n\n baAPI = BossAirAPI(config=config, insertStates=True)\n\n workload = self.createTestWorkload()\n\n workloadName = \"basicWorkload\"\n\n changeState = ChangeState(config)\n\n nSubs = 1\n nJobs = 2\n cacheDir = os.path.join(self.testDir, 'CacheDir')\n\n jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,\n task=workload.getTask(\"ReReco\"),\n workloadSpec=os.path.join(self.testDir,\n 'workloadTest',\n workloadName),\n site='se.T2_US_UCSD')\n for group in jobGroupList:\n changeState.propagate(group.jobs, 'created', 'new')\n\n jobSubmitter = JobSubmitter(config=config)\n jobTracker = JobTracker(config=config)\n jobStatus = JobStatusLite(config=config)\n\n jobSubmitter.prepareToStart()\n jobTracker.prepareToStart()\n jobStatus.prepareToStart()\n\n # What should happen here:\n # 1) The JobSubmitter should submit the jobs\n # 2) Because of the ridiculously short time on pending jobs\n # the JobStatus poller should mark the jobs as done\n # and kill them.\n # 3) The JobTracker should realize there are finished jobs\n #\n # So at the end of several polling cycles, the jobs should all\n # be done, but be in the failed status (they timed out)\n\n time.sleep(20)\n\n myThread.workerThreadManager.terminateWorkers()\n\n getJobsAction = self.daoFactory(classname=\"Jobs.GetAllJobs\")\n result = getJobsAction.execute(state='Executing', jobType=\"Processing\")\n self.assertEqual(len(result), 0)\n\n result = getJobsAction.execute(state='JobFailed', jobType=\"Processing\")\n self.assertEqual(len(result), nJobs * nSubs)\n return", "def testT_updateJobInfo(self):\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, 0, \"User currently has %i running jobs. Test will not continue\" % (nRunning))\n\n config = self.getConfig()\n config.BossAir.pluginName = 'SimpleCondorPlugin'\n config.BossAir.submitWMSMode = True\n\n baAPI = BossAirAPI(config=config, insertStates=True)\n workload = self.createTestWorkload()\n workloadName = \"basicWorkload\"\n changeState = ChangeState(config)\n\n nSubs = 1\n nJobs = 2\n dummycacheDir = os.path.join(self.testDir, 'CacheDir')\n jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,\n task=workload.getTask(\"ReReco\"),\n workloadSpec=os.path.join(self.testDir,\n 'workloadTest',\n workloadName),\n site=\"se.T2_US_UCSD\")\n for group in jobGroupList:\n changeState.propagate(group.jobs, 'created', 'new')\n jobSubmitter = JobSubmitterPoller(config=config)\n jobSubmitter.algorithm()\n nRunning = getCondorRunningJobs(self.user)\n self.assertEqual(nRunning, nSubs * nJobs)\n\n baAPI.track()\n idleJobs = baAPI._loadByStatus(status='Idle')\n\n # #\n # Make one of the sites in the sitelist to be True for ABORTED/DRAINING/DOWN\n # updateSiteInformation() method should edit the classAd for all the jobs\n # that are bound for the site\n # Check the Q manually using condor_q -l <job id>\n #\n jtok = baAPI.updateSiteInformation(idleJobs, \"T2_US_UCSD\", True)\n if jtok is not None:\n baAPI.kill(jtok, errorCode=71301) # errorCode can be either 71301/71302/71303 (Aborted/Draining/Down)\n\n return", "def test_running_order_end():\n running_order = WorkFlow.get_running_order(end=\"charges\")\n assert len(running_order) == 5\n assert running_order[-1] == \"charges\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
cursor = mysql.connection.cursor() sorgu = "Select from articles where id = %s" result = cursor.execute(sorgu,(id,))
def article(id): article = Articles.query.filter_by(id = id).first() if article: #article = cursor.fetchone() goruntulenme = article.goruntulenme goruntulenme = goruntulenme + 1 """sorgu2 = "Update articles Set goruntulenme = %s where id = %s" cursor = mysql.connection.cursor() cursor.execute(sorgu2,(goruntulenme,id)) mysql.connection.commit()""" article.goruntulenme = goruntulenme db.session.commit() return render_template("article.html",article = article) else: return render_template("article.html")
[ "def search_id(id, table, conn):\n query = 'select * from %s where id=%d' % (table, id)\n result = conn.execute(query).fetchall()\n if not result: #id not in our database, return False\n return False\n else:\n return str(result[0][1]) #return the url of the id", "def query(self, sql):", "def sql_query(dbname, query):\n ...", "def findEmployee(self,employeeId):\n try: \n mycursor = self.mydb.cursor()\n mycursor.execute(\"SELECT * FROM Employees WHERE Emp_id = \"+str(empId))\n myresult = mycursor.fetchone()\n print(myresult)\n \n except Exception:\n print(\"Unable to fetch employee\")\n return True", "def search_url(url, table, conn):\n query = 'select * from %s where original=\"%s\"' % (table, url)\n result = conn.execute(query).fetchall()\n if not result: #url not in our database, return False\n return False\n else:\n return result[0][0] #return the id of the url", "def execute_query(query):\n conn, cursor = db_connect()\n cursor.execute(query)\n results = cursor.fetchall()\n conn.close()\n return results", "def get_results(query):\n with psycopg2.connect('dbname=news') as conn:\n cur = conn.cursor()\n cur.execute(query)\n return cur.fetchall()", "def search_article_by_id(article_id: int):\n _cursor = g.blogYY_conn.cursor()\n _cursor.execute(\"\"\"\n SELECT\n `article`.`id`, \n `article`.`title`,\n `article`.`create_timestamp`,\n `article`.`content`,\n `article`.`category_id`,\n `category`.`name`\n FROM \n `article`\n INNER JOIN\n `category`\n ON\n `article`.`category_id` = `category`.`id`\n WHERE\n `article`.`id`=?;\n \"\"\", (article_id,))\n return [{\n \"id\": _[0],\n \"title\": _[1],\n \"create_time_str\": datetime.fromtimestamp(_[2]).strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"content\": _[3],\n \"category_id\": _[4],\n \"category_name\": _[5]\n } for _ in [_cursor.fetchone()]]", "def id(self, _id):\n sql = \"\"\"SELECT * FROM %s where id = '%s'\"\"\" % (self.table_name, _id)\n self.result = self.db.query(sql)\n return self", "def query_follower_by_id(conn, id):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM follower WHERE id=?\", (id,))\n\n row = cur.fetchone()\n\n return row", "def query_person(id):\n pets=[] #to store pets owned by person #id\n db_cursor=pets_db.cursor()\n person_sql=\"SELECT * FROM Person WHERE id ={}\".format(id)\n db_cursor.execute(person_sql)\n result=db_cursor.fetchone()\n if result == None:\n print(\"No user with that Id\") # no user found with that id\n else:\n #print the user's information\n print(\"{} {}, {} years old\".format(result[1], result[2],result[3]))\n person_pets=\"SELECT * FROM Person_Pet WHERE person_id={}\".format(id)\n db_cursor.execute(person_pets)\n person_pet_result=db_cursor.fetchall()\n\n for x in person_pet_result:\n pet_query=\"SELECT * FROM Pet WHERE id={}\".format(x[1])\n db_cursor.execute(pet_query)\n pet_result=db_cursor.fetchall()\n pets+=pet_result\n #iterate the pets and print result\n for x in pets:\n if x[4]==1: #check if dog is alive or dead\n print (\"{} {} owned {} a {}, that was {} years old\".format(result[1], result[2],x[1],x[2],x[3]))\n else: #if not dead\n print (\"{} {} owns {} a {}, that is {} years old\".format(result[1], result[2],x[1],x[2],x[3]))", "def execute_sql(self, sql):\n print 'command sql : ', sql\n self.cursor.execute(sql)\n\n rows = self.cursor.fetchall()\n\n return rows", "def fetch_query(query):\n\n connection, cursor = connect()\n cursor.execute(query)\n results = cursor.fetchall()\n connection.close()\n return results", "def test_get_article_by_id():\n article = Article(\n author=\"jane@doe.com\",\n title=\"New Article\",\n content=\"Super extra awesome article\",\n ).save()\n\n query = GetArticleByIDQuery(id=article.id)\n\n assert query.execute().id == article.id", "def execute_sql(self, sql):\n cursor = connection.cursor()\n cursor.execute(sql)", "def execute_sql(self,sql):\n self.query(sql)", "def get_doc_by_id(doc_id, cursor):\n return cursor.execute(f\"select * from documents where id='{doc_id}'\").fetchall()", "def executeSelect(self, query):\n\t\ttry:\n\t\t\tconn = connect()\n\t\t\tif conn.is_connected():\n\t\t\t\tprint('Connected do MySQL. Query: %s', query)\n\t\t\t\tcursor = conn.cursor()\n\t\t\t\tcursor.execute(query)\n\t\t\t\tresult = cursor.fetchone()\n\t\t\t\treturn result\n\t\texcept Error as e:\n\t\t\tprint(e)\n\t\tfinally:\n\t\t\tconn.commit()\n\t\t\tconn.close()", "def query(sql, params=()):\n try:\n connection = sqlite3.connect('Data/uch_care.db')\n except FileNotFoundError:\n messagebox.showinfo(\"Warning\", \"Failed to connect\")\n logging.error(\"Failed to find Data/uch_care.db\")\n except Exception as e:\n messagebox.showinfo(\"Warning\", \"Failed to connect\")\n logging.error(\"Failed to connect to database: \", e)\n else:\n cursor = connection.cursor()\n if sql[:6] == \"SELECT\":\n cursor.execute(sql, params)\n return cursor.fetchall()\n else:\n cursor.execute(sql, params)\n connection.commit()\n connection.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }