query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Calculates the output size of the last conv layer.
def _get_conv_out(self, shape) -> int: conv_out = self.conv(torch.zeros(1, *shape)) return int(np.prod(conv_out.size()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_size(self) -> int:\n return self.output_dim", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_output = weights[-1]\n dilations = np.asarray(self.dilation_rate)\n if (isinstance(self.padding, list) or isinstance(self.padding, tuple)) and len(self.padding) == 2:\n output_size = np.asarray(\n np.ceil((input_size + 2 * np.asarray(self.padding) - kernels - (kernels - 1) * (\n dilations - 1)) / strides + 1),\n dtype=np.int)\n else:\n output_size = np.asarray(\n np.ceil(input_size / strides) if self.padding == \"SAME\" or self.padding == \"ZEROPAD\" else np.ceil(\n (input_size - (kernels - 1) * dilations) / strides), dtype=np.int)\n \n output_shape = self.incoming_shape[:]\n output_shape[-3:-1] = output_size.tolist()\n output_shape[-1] = num_output\n return output_shape", "def get_final_emb_size(self):\n size = self.n_layers * 1 * 2 * self.hidden_size\n return size", "def output_shape(self) ->torch.Size:\n return self._computed_output_shape()", "def __len__(self):\n num_x, num_y = self.conv_dims()\n return num_x * num_y", "def _output_size_conv2d(conv, size):\n o_size = np.array(size) + 2 * np.array(conv.padding)\n o_size -= np.array(conv.dilation) * (np.array(conv.kernel_size) - 1)\n o_size -= 1\n o_size = o_size / np.array(conv.stride) + 1\n return np.floor(o_size)", "def upperLayersSize(self):\n return sys.getsizeof(self.segment)", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def output_size(self) -> int:\n return self.out_sz", "def get_model_output_size(self) -> int:\n pass", "def output_size(self):\n return self._output_size", "def output_size(self):\n return self._output_size", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def output_dim(self) -> int:\n return 2 * self._hidden_dim", "def output_dim(self):\n return self._output_dim", "def output_dim(self) -> int:\n return (\n self.mlp_hidden_dims[-1]\n if self.mlp_hidden_dims is not None\n else self.blocks_dims[-1]\n )", "def _total_chunk_size_left(self):\n if self.streaming_type == 'reshape':\n return self.N_l // self.conv_factor\n elif self.streaming_type == 'mask':\n return self.N_l // self.conv_factor * self.n_layers\n elif self.unidir:\n return 10000 // self.conv_factor\n else:\n return 10000 // self.conv_factor", "def get_layer_shape(self,layer_id):\n return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def output_size(self) -> int:\n return self.win_length", "def batch_size(self):\n return self._first_rgb.shape[0]", "def get_output_tensor_size(self, index):\n return self._engine.get_output_tensor_size(index)", "def batch_size(self):\n self.validate_shape_and_dtype()\n return self.rgb.shape[0]", "def outputSize(in_size, kernel_size, stride, padding):\n output = int((in_size - kernel_size + 2 * padding) / stride) + 1\n return output", "def num_layers(self): # -> int:\n ...", "def getOutputLength(self):\n return len(self.Y[0])", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if self.incoming_shape == self.scale_size:\n self.out = incoming\n else:\n self.out = resize2d(incoming, size=self.scale_size, method=self.method,\n align_corners=self.align_corners)\n if self.method_name == 'AREA':\n self.out = tf.stop_gradient(self.out)\n \n return self.out", "def output_shape(self):\r\n return self.detector.output_shape", "def num_layers(self):\n\n return 2 + self.num_hidden_layers", "def layer_size(self, layer_id): # -> int:\n ...", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def output_width(self):\n\t\treturn self.output_shape_param('W')", "def l_out_conv(layer_num, kernel_size, pool=False):\n l_out_list = []\n l_in = constants.SHAPE_OF_ONE_DATA_POINT[1]\n for i in range(layer_num):\n l_out = l_out_conv1d(l_in, kernel_size, stride=2)\n l_out = l_out_conv1d(l_out, kernel_size, stride=2)\n\n l_out_list.append(l_out)\n\n if pool:\n pool_size = 3\n l_out = l_out_pool(l_out, pool_size)\n l_out_list.append(l_out)\n l_in = l_out\n\n # make a copy and reverse for decoder size def\n\n l_out_list_copy = copy.deepcopy(l_out_list)\n l_out_list.append(32)\n encoder_sizes = l_out_list\n l_out_list_copy.reverse()\n l_out_list_copy.append(constants.SHAPE_OF_ONE_DATA_POINT[1])\n decoder_sizes = l_out_list_copy\n return encoder_sizes, decoder_sizes", "def get_hidden_layer_size(self):\r\n return self.hidden_layer_size", "def get_output_shape(self):\n return self.shape", "def get_out_dim(self) -> int:\n return self.out_dim", "def _input_size(self):\n return self.embedding_size + self.hidden_size", "def size_out(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_neurons", "def outdim(self):\n return len(self.getSensors())", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def output_length(self,\n inp_len: Optional[th.Tensor]) -> Optional[th.Tensor]:\n if self.last_choice is None:\n return inp_len\n if inp_len is None:\n return None\n return th.div(inp_len,\n self.src_sr[self.last_choice],\n rounding_mode=\"trunc\") * self.dst_sr[self.last_choice]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def output_height(self):\n\t\treturn self.output_shape_param('H')", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, x, y, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1\n for s in self.incoming_shape[0:2] + self.incoming_shape[2:-1] + [self.n_units]]", "def conv_to_fc_size(\n input_shape, conv_depth, pools,\n stride=[2, 2, 2], padding='SAME',\n dropout_keep_prob=1.0):\n h, w, d = input_shape\n if padding == 'SAME':\n for i in range(pools):\n h = math.ceil(float(h) / float(stride[0]))\n w = math.ceil(float(w) / float(stride[1]))\n d = math.ceil(float(d) / float(stride[2])) \n else:\n # 'VALID' padding\n pass\n \n return conv_depth * h * w * d", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def __len__(self):\n _, timesteps, height, width = self.data.shape\n height //= self.size\n width //= self.size\n\n if self.subset == 'train':\n out = self.length\n elif self.subset == 'all':\n out = height * width\n else:\n out = (height // 2) * (width // 2)\n\n if not self.time:\n out *= timesteps\n\n return out", "def _n_features_out(self):\n return self.components_.shape[0]", "def layers_compressed_size(self):\n # don't have this information at this point\n return None", "def layers_compressed_size(self):\n # don't have this information at this point\n return None", "def num_layers(self):\n return self._num_layers", "def output_size(self):\n raise NotImplementedError('This is an interface class, please use a derived instance')", "def calculate_shape_decreases_3D_Net(self, input_crop_size):\n cropsize_x, cropsize_y, cropsize_z = input_crop_size\n input_crop = torch.ones((1, cropsize_z, cropsize_x, cropsize_y))\n net_output, _ = self.forward_net(input_crop)\n _, outsize_z, outsize_y, outsize_x = net_output.size()\n\n return cropsize_x-outsize_x, cropsize_y-outsize_y, cropsize_z-outsize_z", "def output_dims(self) -> Optional[Tuple[int]]:\n return None", "def size(self):\n return self.num_inputs, self.num_outputs", "def get_layer_size(self, layer_ind):\n assert(layer_ind < self.num_layers)\n return self._layer_sizes[layer_ind]", "def compute_output_shape(self, input_shape):\n output_shape = [0] * self.rank\n for d in range(self.rank):\n output_shape[d] = sum(self.paddings[d]) + input_shape[d]\n return tf.TensorShape(output_shape)", "def output_mb(self):\n total_output_size = sum([t.shuffle_mb_written for t in self.tasks])\n return total_output_size", "def get_num_of_output_tensors(self):\n return self._engine.get_num_of_output_tensors()", "def compute_output_shape(self, input_shape):\n batch_size = input_shape[0]\n sequence_length = input_shape[1]\n return (batch_size, sequence_length)", "def size(self):\n\n frame = self.get_frame()\n\n # Unpack array dimensions\n height, width, layers = np.array(frame).shape\n\n return width, height", "def get_output_shape(self):\n return self.out.shape.as_list()", "def compute_output_shape(self, input_shape):\n if tf.keras.backend.image_data_format() == 'channels_first':\n return (input_shape[0][0], input_shape[0][1]) + input_shape[1][2:4]\n\n return (input_shape[0][0],) + input_shape[1][1:3] + (input_shape[0][-1],)", "def get_model_output_dimension(self):\r\n raise NotImplementedError()", "def __len__(self):\n # print(\"len: \" + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)\n return math.floor(len([name for name in os.listdir(self.imgs_dir) if\n os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)", "def __len__(self):\n return self.flat_image.size", "def get_frame_size(self):\n return self._frames.shape[-1]", "def size_out(self):\n return self.dimensions", "def size(self) -> tf.Tensor:", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def compute_output_shape(self, input_shape):\r\n return input_shape", "def get_num_hidden(self) -> int:\n return self.output_dim", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def __find_net_dims(self):\n\n input_params = INPUT_CHANNELS * INPUT_SIZE ** 2\n net_dims = [input_params]\n for w in self._conv_weights + self._lin_weights:\n net_dims.append(w.shape[0])", "def get_size(self):\n return self.get_params().shape[0]", "def target_size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"target_size\")", "def set_output_shape(self):\n self.output_shape = (reduce(mul, self.input_shape),)", "def get_height(self):\n height = 0\n for layer, ldata in self.conf['Layers'].items():\n layer_t = ldata['params']['thickness']\n height += layer_t\n return height", "def estimate_cudnn_parameter_size(input_size, hidden_size, direction):\n single_rnn_size = 8 * hidden_size + 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size)\n return direction * single_rnn_size", "def compute_output_shape(self, input_shape):\n \n assert input_shape and len(input_shape) == 2\n return input_shape[0], self.n_clusters", "def num_channels_per_output(cls) -> list[tuple[int, ...]]:\n return [\n (16, 24, 40, 112, 320),\n (16, 24, 40, 112, 320),\n (16, 24, 48, 120, 352),\n (24, 32, 48, 136, 384),\n (24, 32, 56, 160, 448),\n (24, 40, 64, 176, 512),\n (32, 40, 72, 200, 576),\n (32, 48, 80, 224, 640),\n (32, 56, 88, 248, 704),\n (72, 104, 176, 480, 1376),\n ]", "def layers_sizes(self):\n return iter([self.delta_h*l for l in range(int(self.h/self.delta_h)-1)])", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def convert_size(g, op, block):\n\n input_x = g.get_node(op.input(\"Input\")[0])\n out = _op.ndarray_size(input_x, dtype=\"int64\")\n out = _op.expand_dims(out, axis=0)\n g.add_node(op.output(\"Out\")[0], out)", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def get_total_input_dimension(self, layers):\n self._validate_layer_names(layers)\n total = 0\n for layer in self.layers:\n if layer.layer_name in layers:\n total += layer.get_input_space().get_total_dimension()\n return total" ]
[ "0.72758055", "0.7067759", "0.70510364", "0.7000888", "0.68895096", "0.6863038", "0.6814944", "0.68022835", "0.67512447", "0.67106795", "0.6696877", "0.6696877", "0.66832334", "0.66832334", "0.6663041", "0.6635598", "0.6611119", "0.6571467", "0.6549242", "0.65476173", "0.6491185", "0.64886093", "0.64730805", "0.64347905", "0.6408419", "0.6395363", "0.63848263", "0.6374435", "0.6337225", "0.6329526", "0.63214976", "0.6321218", "0.6317406", "0.6317406", "0.6317406", "0.6317406", "0.6317406", "0.6313596", "0.63118535", "0.6292788", "0.6285853", "0.6261559", "0.62520146", "0.6235888", "0.62108845", "0.6203142", "0.61921227", "0.61860853", "0.6169535", "0.6169535", "0.6169535", "0.6117509", "0.6089173", "0.6081267", "0.6044872", "0.6041074", "0.6037495", "0.60354316", "0.60354316", "0.60304433", "0.6029954", "0.60257196", "0.6022036", "0.6017297", "0.6010937", "0.6010702", "0.6006798", "0.59927523", "0.5988746", "0.59523195", "0.59461147", "0.5939062", "0.59378123", "0.5931183", "0.5929887", "0.5929149", "0.5925468", "0.59234643", "0.590295", "0.5899945", "0.5894715", "0.58903706", "0.5888755", "0.58834404", "0.5874345", "0.5871627", "0.5868094", "0.5853292", "0.5848595", "0.5848207", "0.58449274", "0.58373946", "0.58373946", "0.58373946", "0.5822839", "0.581487", "0.581487", "0.5811419" ]
0.6925207
5
Forward pass through network. Calculates the Q using the value and advantage.
def forward(self, input_x): adv, val = self.adv_val(input_x) return val + (adv - adv.mean(dim=1, keepdim=True))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, state):\n x = state\n feature = self.feature_layer(x)\n action_value = self.value_layer(feature)\n advantage = self.advantage_layer(feature)\n \n q_value = action_value + (advantage - advantage.mean(dim=1, keepdim=True))\n return q_value", "def advantage(self, state, Q: torch.Tensor = None):\n return Q - Q.max()\n # return Q - torch.matmul(self.π.pmf(state, action_values=Q), Q)", "def forward(self, x1, x2):\n return x1 * self.Q + (1 - self.Q) * x2", "def Q_net(self, state):\n\t\tif not self._prediction_made: \n\t\t\tQ = tf.matmul(tf.nn.relu( tf.matmul(state, self.weights_hidden) + self.bias_hidden ), self.weights_out) + self.bias_out \n\t\t\tself._Qval = Q\t\n\t\t\tself._prediction_made = True\n\t\treturn self._Qval", "def forward(self, state, action):\n state = torch.cat(state, dim=1)\n \n for i in range(len(action)):\n action[i] /= self.max_action\n\n # Concatenate the action vector \n action = torch.cat(action, dim=1)\n x = torch.cat([state, action], dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n q_value = self.q_out(x)\n\n return q_value", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n #simple implementation of a python noob to implement DDQN\n bla = torch.from_numpy(np.zeros(64)).float().to(device)\n for i in range(64):\n bla[i] = self.qnetwork_target(next_states[i]).detach()[self.qnetwork_local(next_states).detach().argmax(1)[i]]\n Q_targets_next = bla.unsqueeze(1)\n #this was my first try of ddqn in python style, but as i said i'm a noob and didn't get it working\n #Q_targets_next = [self.qnetwork_target(next_states).detach()[i] for i in self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)]\n \n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def forward(self, x):\n q_denom = (x.unsqueeze(1) - self.clusters)**2\n q_denom = q_denom.sum(dim=2)\n q_denom /= self.alpha\n q_denom += 1.0\n q = 1.0 / q_denom\n q = q ** ((self.alpha + 1.0) / 2.0)\n q = q.t() / q.sum(dim=1) # Div shapes [20, 1024] / [1024]\n q = q.t()\n return q", "def forward(self, x):\n x = self.fc0(x.view(-1, x.size(-1))).view(x.size(0), x.size(1), -1)\n x = self.pe(x)\n\n x = self.inner_layers(x) # FF, FF, FF, finalFF\n\n state_value = self.fc_s(x) # double-dqn : state\n\n advantage_values = self.fc_a(x) # double-dqn : advantage\n advantage_values = advantage_values.view(\n advantage_values.size()[:-1] + (self.action_size, self.n_atoms))\n\n dist_weights = state_value.unsqueeze(\n dim=-2) + advantage_values - advantage_values.mean(dim=-2, keepdim=True)\n\n return dist_weights", "def q1_forward(self, state: torch.Tensor) -> torch.Tensor:\n return self.q_networks[0](state)", "def Q(self, states, neural_net_to_use, no_grad = False):\r\n\r\n states = torch.from_numpy(states)\r\n states = states.float()\r\n\r\n if no_grad:\r\n with torch.no_grad():\r\n output = neural_net_to_use(states)\r\n return output\r\n\r\n output = neural_net_to_use(states)\r\n return output", "def learn(self):\n ## obtain sample batch using priority based sampling.\n states, actions, rewards, next_states, dones, weights, sample_inds = self.buffer.sample_batch(BETA)\n \n ## obtain the discounted sum of rewards from reward list\n ## also obtain final gamma multiplier\n reduced_rewards, gamma_multipliers = self.reduce_rewards(rewards)\n \n ## convert to tensors\n states = np_to_tensor(states)\n actions = np_to_tensor(actions)\n reduced_rewards = np_to_tensor(reduced_rewards)\n gamma_multipliers = np_to_tensor(gamma_multipliers)\n next_states = np_to_tensor(next_states)\n dones = np_to_tensor(dones)\n weights = np_to_tensor(np.array(weights))\n \n #### Updating Qnet\n \n ## actions from the target actor network\n greedy_actions = self.actor_target(next_states)\n ## compute temporal difference\n targets = reduced_rewards + torch.mul( torch.mul(gamma_multipliers , self.QNetwork_target(next_states, greedy_actions)) , (1-dones).unsqueeze(1))\n Q_sa = self.QNetwork_local(states, actions)\n \n td_error = targets - Q_sa\n \n ## update the priorities using temporal differences\n self.buffer.update_priority(sample_inds,\n (td_error).detach().abs().squeeze().cpu().data.numpy()+REPLAY_EPS)\n \n ## compute the loss, importance sampling weights are used\n loss = ((td_error).pow(2)*weights).mean()\n \n self.QNet_optim.zero_grad()\n loss.backward()\n self.QNet_optim.step()\n \n ### Updating Actor\n pred_actions = self.actor_local(states)\n actor_loss = - self.QNetwork_local(states, pred_actions).mean()\n \n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n \n #### Polyak Updates\n self.soft_update(self.QNetwork_local, self.QNetwork_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n Q_Value = 0 #initializing q value\n\n feat_Extractor = self.featExtractor\n\n weight = self.weights #To get the weight to control exploration and exploitation\n\n features = feat_Extractor.getFeatures(state,action) #to get all the features associated with (state,action) pair\n\n for each_feature in features:\n #refer to README_Reinforcement.txt for the formula at line 11\n temp_Qvalue = weight[each_feature] * features[each_feature] #Q(state,action) = w * featureVector where * is the dotProduct operator\n Q_Value = Q_Value + temp_Qvalue\n\n return Q_Value #Returns final qvalue\n #util.raiseNotDefined()", "def test_propagate(self):\n # Get network components\n data = array([[0], [1]])\n cdata = LabeledCData(data, labels=array([0, 1]))\n encoder = BinaryEncoding(cdata)\n unitary = ProductAnsatz(1)\n measure = Measurement(1, [0])\n qnn = Network([encoder, unitary, measure], \"1q-qvm\")\n\n # Propagate the zeroth data point\n out = qnn.propagate(0, shots=10)\n\n print(out)", "def updateQ_value(self, value):\n self.Q_value = (self.Q_value * self.nVisits + value) / (self.nVisits + 1)", "def forward(self, x):\n dims = list(range(1, len(x.shape)))\n mean = x.mean(dim=dims, keepdim=True)\n var = torch.pow(x - mean, 2).mean(dim=dims, keepdim=True)\n return self.apply_gain_and_bias((x - mean) / (var + EPS).sqrt())", "def forward(self, state):\n x = F.relu(self.input(state))\n for layer in self.layers:\n x = F.relu(layer(x))\n if self.duel:\n # Value function estimator\n val = F.relu(self.val_fc_input(x))\n val = self.val_fc_output(val)\n # Advantage function estimator\n adv = F.relu(self.adv_fc_input(x))\n adv = self.adv_fc_output(adv)\n # Subtract mean so that V and A are uniquely identifiable for a given Q\n return val + adv - adv.mean(1).unsqueeze(1).expand(state.size(0), self.action_size)\n else:\n return self.output(x)", "def forwardPolicyNet(self, state):\n with torch.no_grad():\n q_values = self.policy_net(state)\n return q_values", "def getQValue(self, state, action):\n #print \"getQValue in ApproximateQAgent\"\n\n \"*** YOUR CODE HERE ***\"\n weights = self.getWeights()\n features = self.featExtractor.getFeatures(state, action, self)\n\n value = 0\n\n #print \"FEATURES: \", features\n #print \"WEIGHTS: \", weights\n\n for feature in features:\n value += features[feature]*weights[feature]\n return value\n #util.raiseNotDefined()", "def getQValue(self, state, action):\n \"\"\"Description:\n [Enter a description of what you did here.]\n Use first equation in slide 71 of MDP to compute q-value depond on weights and current features.\n \n !! But I think what I did is not work for IdentityExtractor. Because feature of IdentityExtrator always return 1,\n it did not change even a ghost is closing.\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n qValue = self.weight * self.featExtractor.getFeatures(state,action)\n return qValue\n \"\"\" END CODE \"\"\"", "def preval_forward(self, data_shot, label_shot, data_query):\n embedding_query = self.encoder(data_query)\n embedding_shot = self.encoder(data_shot)\n logits = self.base_learner(embedding_shot)\n #loss = self.FL(logits, label_shot) + self.CD(logits,label_shot) + self.LS(logits,label_shot)\n loss = self.CD(logits,label_shot)\n grad = torch.autograd.grad(loss, self.base_learner.parameters())\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, self.base_learner.parameters())))\n logits_q = self.base_learner(embedding_query, fast_weights)\n\n for _ in range(1, 100):\n logits = self.base_learner(embedding_shot, fast_weights)\n #loss = self.FL(logits, label_shot) + self.CD(logits,label_shot) + self.LS(logits,label_shot)\n loss = self.CD(logits,label_shot)\n grad = torch.autograd.grad(loss, fast_weights)\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, fast_weights)))\n logits_q = self.base_learner(embedding_query, fast_weights) \n return logits_q", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n # Double DQN. Uses local network for action selection and target network for value estimation\n # see: https://arxiv.org/pdf/1509.06461.pdf\n Q_actions_next = self.dqn_local(next_states).detach().argmax(1).unsqueeze(1)\n Q_targets_next = self.dqn_target(next_states).gather(1, Q_actions_next)\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n # Standard DQN\n # Get max predicted Q values (for next states) from target model\n # Q_targets_next = self.dqn_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states\n # Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.dqn_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.dqn_local, self.dqn_target, TAU)", "def forward(self, state_t):\n # Use your network to compute qvalues for given state\n conved_features = self.conv_layers(state_t)\n A = self.head_A(conved_features)\n V = self.head_V(conved_features).repeat(1, self.n_actions)\n mean_A = torch.mean(A, dim=-1, keepdim=True).repeat(1, self.n_actions)\n qvalues = A + V - mean_A\n\n assert qvalues.requires_grad, \"qvalues must be a torch tensor with grad\"\n assert len(\n qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == self.n_actions\n\n return qvalues", "def forward(self, value, query, lens):\n relevant_scores = self.relevant_score(value, query, lens)\n e_relevant_scores = torch.exp(relevant_scores)\n weights = e_relevant_scores / e_relevant_scores.sum(-1, keepdim=True)\n attention = (weights.unsqueeze(-1) * value).sum(1)\n return attention", "def forward(self):\n R = self.LP.cost.R\n A = self.LP.dyn.A\n B = self.LP.dyn.B\n\n x = self.LP.x0\n self.x[0] = x\n for i in range(self.LP.N):\n u = - np.linalg.inv(R+B.T.dot(self.V[i+1]).dot(B)).dot(.5*B.T.dot(self.W[i+1]) \\\n + B.T.dot(self.V[i+1]).dot(A).dot(x))\n if self.LP.dyn.u_dim == 1:\n self.u[i] = float(u)\n else:\n self.u[i] = u\n self.J_star[i] = float(x.T.dot(self.V[i]).dot(x) + self.W[i].T.dot(x)) #up to constant\n\n if i == 0:\n self.J[i] = self.LP.cost.loss(x, u, i)\n else:\n self.J[i] = self.J[i-1] + self.LP.cost.loss(x, u, i)\n x = self.LP.dyn.next_state(x, u)\n self.x[i+1] = x\n\n self.J[self.LP.N] = self.J[self.LP.N-1] + self.LP.cost.loss(x, 0, self.LP.N)\n\n self.J_star[self.LP.N] = float(x.T.dot(self.V[self.LP.N]).dot(x) \\\n + self.W[self.LP.N].T.dot(x)) #up to constant", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(\n next_states).detach().max(1)[0].unsqueeze(1)\n \n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, self.tau) \n\n return", "def learn(self, experiences, gamma):\n self.optimizer.zero_grad()\n\n states, actions, rewards, next_states, dones = experiences\n\n best_actions = self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)\n q_values_target = self.qnetwork_target(next_states).detach()\n q_expected = rewards + (gamma * q_values_target.gather(1, best_actions)) * (1 - dones)\n q_current = self.qnetwork_local(states).gather(1, actions)\n\n loss = F.mse_loss(q_expected, q_current)\n\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, dqn_agent.TAU)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0.0\n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n qvalue += value * self.weights[feature_name]\n return qvalue", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0.0\n for feature_name, value in self.featExtractor.getFeatures(state, action).iteritems():\n qvalue += value * self.weights[feature_name]\n return qvalue", "def ipdTft(length,gamma,epsilon,alpha = .8):\r\n #possible previous states (what each did in the last iteration)\r\n states = [(\"*\",\"*\"),(\"C\",\"D\"), (\"C\",\"C\"), (\"D\",\"C\"), (\"D\",\"D\")]\r\n #actions: Defect or Cooperate\r\n actions = [\"D\",\"C\"]\r\n #payoff matrix (as dict)\r\n payoff = {(\"C\",\"D\"): (-3,0), (\"C\",\"C\"): (-1,-1), \r\n (\"D\",\"C\"): (0,-3), (\"D\",\"D\"): (-2,-2)}\r\n #initialize learners \r\n\r\n #q1 = qLearn(states,actions,gamma,alpha,epsilon)\r\n #q1 = QLearnCont(ipd_feats,10,actions,gamma,alpha,epsilon,kernel = 'linear')\r\n #q1 = DQN(ipd_feats,10,actions,.99,.5,.1,learn_type = 'linear')\r\n q1 = DQN(ipd_feats,10,actions,.99,.5,.1,shape = (10,10,1))\r\n #initialize list of rewards\r\n rewards = []\r\n #iterate through length states and run the game\r\n prevState = (\"*\",\"*\")\r\n for i in range(length):\r\n #get actions\r\n print(\"Iteration %i:\" %i)\r\n print(\"Previous State:\", prevState)\r\n qa1 = q1.chooseAction(prevState)\r\n qa2 = tft(prevState[0])\r\n print(\"Player 1 Action:\",qa1)\r\n print(\"Player 2 Action:\",qa2)\r\n \r\n #find payoff\r\n newState = (qa1,qa2)\r\n reward = payoff[newState]\r\n rewards.append(reward[0])\r\n print(\"Player 1 Reward:\", reward[0])\r\n print(\"Player 2 Rewards:\", reward[1])\r\n print(\"Current average reward for Player 1:\",np.mean(rewards))\r\n #assign reward and update Q params\r\n q1.qUpdate(prevState,qa1,reward[0],newState)\r\n \r\n prevState = newState\r\n #print(q1.Q)\r\n return(rewards,q1)", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, weights, indexes = experiences\n\n q_expected, q_targets = self.get_target_and_expected(states, \n actions, \n rewards, \n next_states, \n dones, \n gamma)\n\n #print('q_expected.shape', q_expected.shape)\n #print('q_targets.shape', q_targets.shape)\n \n # Compute loss\n ##### deltas = F.mse_loss(q_expected, q_targets)\n deltas = q_expected - q_targets\n #print('loss.shape', loss.data.cpu().numpy().shape)\n #print('loss', loss)\n \n _sampling_weights = (torch.Tensor(weights)\n .view((-1, 1)))\n \n # mean square error\n loss = torch.mean((deltas * _sampling_weights)**2)\n\n # importance sampling weights used to correct bias introduced \n # by prioritisation experience replay\n # See Annealing the bias https://arxiv.org/abs/1511.05952\n #with torch.no_grad():\n # weight = sum(np.multiply(weights, loss.data.cpu().numpy()))\n # print('weight', weight)\n # loss *= weight\n # print('weights.shape', weights.shape)\n # print('loss type', type(loss))\n # print('loss shape', loss.size())\n # loss *= weights\n # Minimize the loss\n # call zero_grad before calling backward() \n # o.w. gradients are accumulated from multiple passes\n self.optimizer.zero_grad()\n # backward computes dloss/dx for every parameter x\n loss.backward()\n # updates parameters\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) \n \n # ------------------- update priorities ------------------- # \n priorities = abs(deltas.detach()).numpy()\n #priorities = abs(q_expected.detach() - q_targets.detach()).numpy()\n self.memory.update_priorities(priorities, indexes)", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use Q-Learning algoritm in slide 58 of MDP\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n maxQns = self.getValue(nextState) # get max q-value of next state\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action) #self.qValues[(state, action)]\n difference = reward + self.discountRate * maxQns - Qsa\n self.qValues[(state, action)] += self.alpha * difference\n \n self.vitCount[(state, action)] += 1\n \"\"\" END CODE \"\"\"", "def learn(self):\n \n # target parameter update\n # target parameter update\n if self.learn_step_counter % self.nu_iter == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict())\n #testing the preformace of the network\n if self.learn_step_counter == 0:\n print('As referece this first test on dev data. Is maded with the Q networks, initialized randomly : ' )\n else:\n print(\"\\n Lets copy the Q-value Net in to Q-target net!. And test the performace on the dev data: \")\n \n current_bleu = self.dev_network()\n print(\"Current Bleu score is: \", current_bleu)\n \n self.learn_step_counter += 1\n\n \n long_Batch = self.sample_size*3\n # Sampling the higgest rewards values\n b_memory_big = self.memory[np.argsort(-self.memory[:-self.max_output_length, self.state_size+1])][:long_Batch]\n \n sample_index = np.random.choice(long_Batch, self.sample_size)\n b_memory = b_memory_big[sample_index, :]\n\n b_s = torch.FloatTensor(b_memory[:, :self.state_size])\n b_a = torch.LongTensor(b_memory[:, self.state_size:self.state_size+1].astype(int))\n b_r = torch.FloatTensor(b_memory[:, self.state_size+1:self.state_size+2])\n b_s_ = torch.FloatTensor(b_memory[:, self.state_size+2: self.state_size+2 + self.state_size])\n\n b_is_eos = torch.FloatTensor(b_memory[:, self.size_memory1-1:]).view(self.sample_size, 1)\n #print(b_a, b_a.size)\n #print(b_is_eos)\n #Activate the eval_net\n unfreeze_model(self.eval_net)\n \n # q_eval w.r.t the action in experience\n q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)\n q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\n #taking the most likely action.\n b_a_ = torch.LongTensor(q_next.max(1)[1].view(self.sample_size, 1).long())\n #b_a_ = q_next.max(1)[0].view(self.sample_size, 1).long() # shape (batch, 1)\n q_eval_next = self.eval_net(b_s_).gather(1, b_a_) # shape (batch, 1)\n \n #If eos q_target = reward. \n q_target = b_r + self.gamma * b_is_eos* q_eval_next.view(self.sample_size, 1) # shape (batch, 1)\n #version 0\n #q_target = b_r + self.gamma * q_next.max(1)[0].view(self.sample_size, 1) # shape (batch, 1)\n \n loss = self.loss_func(q_eval, q_target)\n \n self.tb_writer.add_scalar(\"learn/learn_batch_loss\",\n loss.data, self.learn_step_counter)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n #desctivate the eval_net\n freeze_model(self.eval_net)", "def forward(self, x):\n # tracking shapes\n B, C, H, W = x.size()\n K = self.K\n HW_prime = H * W\n\n # get qkv's\n f = self.f(x).view(B, C // K, H * W) # B x (C/K) x (HW)\n g = self.g(x) # B x (C/K) x H x W\n h = self.h(x) # B x (C/2) x H x W\n if self.down_sample:\n g = F.max_pool2d(g, [2, 2]) # B x (C/K) x (H/2) x (W/2)\n h = F.max_pool2d(h, [2, 2]) # B x (C/2) x (H/2) x (W/2)\n HW_prime = HW_prime // 4 # update (HW)'<-(HW) // 4\n\n g = g.view(B, C // K, HW_prime) # B x (C/K) x (HW)'\n h = h.view(B, C // 2, HW_prime) # B x (C/2) x (HW)'\n\n beta = self._dot_product_softmax(f, g) # B x (HW) x (HW)'\n s = torch.einsum('ijk,ilk->ijl', h, beta).view(B, C // 2, H, W) # B x (C/2) x H x W\n return self.gamma * self.v(s) + x # B x C x H x W", "def learn(self, experiences, gamma):\n \n states, actions, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def test_forward_values():\n rnn = GeneratorRNN(1)\n inputs = Variable(torch.zeros(1,1,3))\n hidden = rnn.init_hidden()\n e,pi,mu,sigma,rho,_ = rnn(inputs, hidden)\n\n assert (e >= 0).all()\n assert (e <= 1).all()\n\n diff = torch.abs(1-torch.sum(pi))\n assert (diff < 0.00001).all()\n\n assert (sigma > 0).all()\n\n assert (rho > -1).all()\n assert (rho < 1).all()\n\n rnn = GeneratorRNN(3)\n inputs = Variable(torch.zeros(10,1,3))\n hidden = rnn.init_hidden()\n e,pi,mu,sigma,rho,_ = rnn(inputs, hidden)\n\n pi_sum = torch.sum(pi,dim=2)\n diff = torch.abs(1-pi_sum)\n assert (diff < 0.00001).all()", "def computeQValueFromValues(self, state, action):\r\n #\r\n weightedVfvsSum = 0\r\n reward = 0\r\n # to get possible next state(s)\r\n for nextState, prob in self.mdp.getTransitionStatesAndProbs(state, action):\r\n reward += self.mdp.getReward(state, action, nextState) * prob\r\n #print \":computeQValueFromValues: nextState is: \", nextState, \" | self.values[nextState] is: \", self.values[nextState]\r\n weightedVfvsSum += prob * self.getValue(nextState)\r\n #\r\n return ( reward + ( self.discount * weightedVfvsSum) ) # making the actual qvalue\r", "def learn(self):\n if self.learn_step_counter % self.target_q_update_step == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict()) #update target_net's parameters\n logging.info(\"updtate target q\")\n self.learn_step_counter += 1\n\n rgbs,depths, rgbs_1, depths_1,questions,actions,rewards,terminals = self.memory.sample()\n\n rgbs_var = Variable(torch.FloatTensor(rgbs).cuda())\n depths_var = Variable(torch.FloatTensor(depths).cuda())\n rgbs_1_var = Variable(torch.FloatTensor(rgbs_1).cuda())\n depths_1_var = Variable(torch.FloatTensor(depths_1).cuda())\n questions_var = Variable(torch.LongTensor(questions).cuda())\n actions_var = Variable(torch.LongTensor(actions).cuda())\n rewards_var = Variable(torch.FloatTensor(rewards).cuda())\n terminals_var = Variable(torch.FloatTensor(terminals).cuda())\n\n q_eval_matrix = self.eval_net(rgbs_var,depths_var,questions_var)\n q_eval_matrix = q_eval_matrix.view(-1,9*28*28)\n actions_var = actions_var.view(-1,1)\n q_eval = torch.gather(q_eval_matrix, 1, actions_var) \n q_eval = q_eval.squeeze(1)\n\n q_next_matrix = self.target_net(rgbs_1_var,depths_1_var,questions_var).detach() #don't backward\n q_next_matrix = q_next_matrix.view(-1,9*28*28)\n q_next = torch.max(q_next_matrix,1)[0]\n\n one_var = Variable(torch.ones_like(terminals_var))\n\n q_target = rewards_var + (one_var- terminals_var)*self.discount * q_next\n \n loss = self.loss_func(q_eval, q_target)\n\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self.task_total_loss += loss.item()\n self.task_total_q += q_target.mean()\n self.update_count += 1", "def preval_forward(self, data_shot, label_shot, data_query):\n embedding_query = self.encoder(data_query)\n embedding_shot = self.encoder(data_shot)\n logits = self.base_learner(embedding_shot)\n loss = F.cross_entropy(logits, label_shot)\n grad = torch.autograd.grad(loss, self.base_learner.parameters())\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, self.base_learner.parameters())))\n logits_q = self.base_learner(embedding_query, fast_weights)\n\n for _ in range(1, 100):\n logits = self.base_learner(embedding_shot, fast_weights)\n loss = F.cross_entropy(logits, label_shot)\n grad = torch.autograd.grad(loss, fast_weights)\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, fast_weights)))\n logits_q = self.base_learner(embedding_query, fast_weights) \n return logits_q", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def getQvalue(self, state, action):\n featureVector = self.getFeatures(state, action)\n qValue = 0\n for k in featureVector.keys():\n qValue = qValue + self.weights[k] * featureVector[k]\n\n return qValue", "def computeQValueFromValues(self, state, action):\n #get the Transition function and nextStates\n state_prob_pair=self.mdp.getTransitionStatesAndProbs(state,action)\n #initialize the value to zero\n actual_value=0\n #iterate over probabilities (transition functions) and next states\n for pair in state_prob_pair:\n #compute qvalue\n actual_value+=pair[1]*(self.mdp.getReward(state,action,pair[0])+self.discount*self.values[pair[0]])\n #print \"The Q value is \",actual_value\n return actual_value", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, idxs, weights = experiences\n \n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss MSE\n loss = (Q_expected - Q_targets.detach()).pow(2)\n # Add weights to loss\n loss = loss * weights\n # Add noise to loss to arrive at prior weights\n prios = loss + 1e-6\n # Take mean\n loss = loss.mean()\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update buffer priorities\n self.memory.update_priorities(zip(idxs, prios.data.cpu().numpy()))\n\n\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def learn(self, state, action, reward, next_state):\r\n\r\n \"\"\"Please Fill Your Code Here.\r\n \"\"\"\r\n self.Q[state][action] = self.Q[state][action] + self.alpha * (reward + self.gamma * max(self.Q[next_state]) - self.Q[state][action])\r\n\r\n return 0", "def learn(self):\n batch = self.agent.replay_buffer.sample(self.batch_size)\n states = torch.tensor([x.state for x in batch], dtype=torch.float32).to(self.agent.device) # shape == (batch_size, 3, 6, 7)\n actions = [x.action for x in batch]\n rewards = torch.tensor([x.reward for x in batch], dtype=torch.float32).to(self.agent.device)\n next_states = torch.tensor([x.next_state for x in batch], dtype=torch.float32).to(self.agent.device)\n dones = [x.done for x in batch]\n\n self.optimizer.zero_grad()\n\n\n q_vals = self.agent.policy_net(states)[range(len(actions)), actions] # Q vals for actions taken\n q_next_vals = self.agent.target_net(next_states).detach() # we don't care about grad wrt target net\n q_next_vals[dones] = 0.0 # terminal states have no future expected value\n q_targets = rewards + self.gamma * torch.max(q_next_vals, dim=1)[0]\n\n # all_q_vals = self.agent.policy_net(states)\n # print()\n # print('actions')\n # print(actions)\n # print()\n # print('original all q vals')\n # print(self.agent.policy_net(states)) \n # print(self.agent.policy_net(states).shape)\n # print()\n # print('QVALS:', q_vals)\n # print(q_vals.shape)\n # print('\\n\\n')\n # print('QTARGETS:', q_targets)\n # print(q_targets.shape)\n\n # breakpoint()\n\n loss = self.loss_fn(q_targets, q_vals).to(self.agent.device)\n loss.backward()\n \n # for layer in self.agent.policy_net.named_parameters():\n \n # # print(f'layer: {layer[0]}')\n # # print(f'grad:', layer[1].grad)\n\n # # print('loss', loss)\n # # print('q_vals grad:', q_vals.grad)\n # # print('states:', )\n\n self.optimizer.step()\n\n self.agent.learning_iters += 1\n if self.agent.learning_iters % self.target_update_freq == 0:\n self.agent.update_target_net()\n # logger.info('Updated target net')", "def forward(self):\n self.value = np.dot(self.x_node.value, self.w_node.value) + self.b_node.value", "def forward(self,\n x: Tensor) \\\n -> Tensor:\n\n x = self.convs[0](x)\n res = x\n for i in range(self.num_rates):\n x = torch.tanh(self.filter_convs[i](\n x)) * torch.sigmoid(self.gate_convs[i](x))\n x = self.convs[i+1](x)\n res = res + x\n return res", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qValue = 0\n transitions = self.mdp.getTransitionStatesAndProbs(state, action)\n #print('Transitions: ' + str(transitions))\n for t in transitions:\n nextState, prob = t\n reward = self.mdp.getReward(state, action, nextState)\n #print('Reward: ' + str(reward))\n oneTransition = prob * (reward + self.discount * self.values[nextState])\n qValue = qValue + oneTransition\n return qValue", "def learn(self, experiences, gamma):\n\n states, actions, rewards, next_states, dones = experiences\n\n Q_targets = self.get_dqg_target(next_states, rewards, gamma, dones)\n\n # Get expected Q values\n q_exp = self.qnetwork_local(states)\n\n # gets the q values along dimention 1 according to the actions, which is used as index\n # >>> t = torch.tensor([[1,2],[3,4]])\n # >>> torch.gather(t, 1, torch.tensor([[0],[1]]))\n # tensor([[ 1],\n # [ 4]])\n q_exp = q_exp.gather(1, actions)\n\n # compute loss\n loss = F.mse_loss(q_exp, Q_targets)\n\n # reset optimizer gradient\n self.optimizer.zero_grad()\n # do backpropagation\n loss.backward()\n # do optimize step\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, PARAM.TAU)", "def forward(self, inp):\n return inp.dot(self.W) + self.b", "def learn(self, memory: ReplayMemory, batch_size: int) -> float:\n # 从replay buffer当中采样,从经验回放集合中采样batch_size个样本,计算当前目标Q值\n indices, (state_batch, next_batch, action_batch, reward_batch, done_batch), is_weights = \\\n memory.sample(batch_size)\n # 使用行为网络计算值函数 Q_j\n values = self.__policy(state_batch).gather(1, action_batch)\n \n expected = []\n policy_Q_batch = self.__policy(next_batch).cpu().data.numpy()\n max_action_next = np.argmax(policy_Q_batch, axis=1)\n target_Q_batch = self.__target(next_batch)\n \n for i in range(batch_size):\n if done_batch[i]:\n expected.append(reward_batch[i])\n else:\n target_Q_value = target_Q_batch[i, max_action_next[i]]\n expected.append(reward_batch[i] + self.__gamma * target_Q_value)\n \n expected = torch.stack(expected)\n TD_error = torch.abs(expected - values)\n memory.update(indices, TD_error)\n \n # 根据目标函数 (Q_j - expected)^2来梯度下降\n loss = (torch.FloatTensor(is_weights).to(self.__device) * F.mse_loss(values, expected)).mean()\n\n self.__optimizer.zero_grad()\n loss.backward()\n for param in self.__policy.parameters():\n param.grad.data.clamp_(-1, 1)\n self.__optimizer.step()\n\n return loss.item()", "def forward(self, prev_state, obs_t):\r\n # Use your network to compute qvalues for given state\r\n #print(state_t.shape)\r\n h = self.conv(obs_t)\r\n\r\n h = h.view(h.size(0), -1)\r\n\r\n new_state = h_new, c_new = self.lstm(h, prev_state)\r\n advantage = self.adv(h_new)\r\n value = self.val(h_new)\r\n\r\n\r\n adv_mean = torch.mean(advantage, dim=1, keepdim=True)\r\n qvalues = value + advantage - adv_mean\r\n\r\n return new_state, qvalues", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n\n QValue = 0\n for feature in feature_dictionary:\n QValue += self.weights[feature] * feature_dictionary[feature]\n return QValue", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n\n QValue = 0\n for feature in feature_dictionary:\n QValue += self.weights[feature] * feature_dictionary[feature]\n return QValue", "def update_Q(self, reward):\n old_estimate = self.q_estimates[self.prev_action]\n self.q_estimates[self.prev_action] = old_estimate + 1/self.N[self.prev_action] * (reward - old_estimate)", "def test_forward(self):\n # test single input\n self.model.w = np.array([[0.5, 0.25]])\n self.model.b = 0.5\n x = np.array([[0.2, 0.1]])\n out = self.model.forward(x)\n self.assertTrue(np.abs(out[0] - 0.6514) < 0.01)\n\n # test multiple inputs\n self.model.w = np.array([[0.1, 0.2]])\n self.model.b = 0.2\n x = np.array([[0.3, 0.4],\n [0.5, 0.6]])\n out = self.model.forward(x)\n should_be = np.array([0.5769,0.5915])\n self.assertTrue(np.allclose(out, should_be, atol=0.01))", "def back_propagate(self, reward, maxQ):\n\n error = self.alpha * (reward + self.gamma*maxQ - self.value)\n #logging.debug(\"error is now %s\" % (error))\n\n # sigmoid derivate is sigmoid(x) * (1 - sigmoid(x) )\n dsig = self.value * (1 - self.value)\n\n gradient = error * dsig\n #logging.debug(\"gradient is now: %s\" % (gradient))\n\n self.weigths = np.add( self.weights, np.multiply(gradient, self.weights) )\n # self.weights = [gradient * w + w for w in self.weights]", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, indices = experiences\n\n # Get max predicted Q values (for next states) from target model\n if self.dbl_dqn:\n local_best_actions = self.qnetwork_local(next_states).detach().argmax(1)\n Q_next_states = self.qnetwork_target(next_states)\n Q_targets_next = Q_next_states.gather(1, local_best_actions.unsqueeze(1))\n else:\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n\n # Compute Q targets for current states\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n if self.priority_rpl:\n errors = abs(Q_expected - Q_targets)\n self.memory.update_priorities(indices, errors)\n importance = self.memory.get_importance(indices, self.a, self.b)\n importance = np.array(importance)\n loss = torch.mean(torch.mul(errors.float(), torch.from_numpy(importance).float().to(device)))\n else:\n loss = F.mse_loss(Q_expected, Q_targets)\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def train(self, Q_pred: torch.FloatTensor, Q_true: torch.FloatTensor) -> float:\n self.dqn.train(mode=True)\n self.optim.zero_grad()\n loss = self.loss_fn(Q_pred, Q_true)\n loss.backward()\n self.optim.step()\n\n return loss", "def forward(self, state_t):\n # Use your network to compute qvalues for given state\n qvalues = <YOUR CODE>\n\n assert qvalues.requires_grad, \"qvalues must be a torch tensor with grad\"\n assert (\n len(qvalues.shape) == 2 and \n qvalues.shape[0] == state_t.shape[0] and \n qvalues.shape[1] == n_actions\n )\n\n return qvalues", "def forward(self, x, u0=torch.Tensor()):\n x =self.vari_gpu(x)\n u0 =self.vari_gpu(u0)\n \n # input x0 and batch size \n num_batch = x.size(0)\n x0 = x.view(num_batch, -1)\n \n A_hat = self.build_A_block()\n B_hat = self.build_B_block()\n \n # Q_sqrt in QP\n Q = self.Q_sqrt.mm(self.Q_sqrt.t())\n R = self.R_sqrt.mm(self.R_sqrt.t())\n R_diag = self.build_Rdiagnol_block(R)\n Q_hat, Q_diag = self.build_Q_block(Q, Q, R, B_hat)\n Q_sqrt_hat = sqrtm(Q_hat) # computs sqrt of Q\n Q_sqrt_hat = Q_sqrt_hat.repeat(num_batch,1,1) # builds batch\n \n # p in QP p = 2 * (Q_diag*B_hat)^T * (A_hat*x0)\n A_x0 = A_hat.mm(x0.t()).t() # presents[x1;x2;...;xN] size: batch * dim(x1;x2;...;xN)\n p = 2*A_x0.mm(Q_diag.mm(B_hat))\n \n # G in QP\n G1,G2 = self.build_G_block(B_hat)\n G1 = G1.repeat(num_batch,1,1) # builds batch\n G2 = G2.repeat(num_batch,1,1) # builds batch\n \n # h in QP\n h1 = self.h1.repeat(num_batch,1) # builds batch\n h21 = self.h21.repeat(num_batch,1) # builds batch\n h21 -= A_x0 \n h22 = self.h22.repeat(num_batch,1) # builds batch\n h22 += A_x0\n h2 = torch.cat((h21,h22),1)\n \n zero = self.zero.repeat(num_batch,1)\n \n # E in QP\n E = self.E_sqrt.mm(self.E_sqrt.t())\n E_sqrt = self.E_sqrt.repeat(num_batch,1,1)\n \n # for Q(x0,u0), add equality constraint: u(0) = u0 \n if u0.nelement() != 0:\n u0 = u0.view(num_batch, -1)\n # F*z = f\n F = self.F\n f = u0*self.f\n F = F.repeat(num_batch,1,1) # builds batch\n #f = f.repeat(num_batch,1) # builds batch\n# print(Q_sqrt_hat.size(), p.size(), G1.size(),\n# h1.size(), G2.size(),h2.size(),\n# E_sqrt.size(),F.size(),f.size())\n\n self.para = [Q_sqrt_hat, p, G1, h1, G2,\n h2, E_sqrt, F, f]\n u_opt,e_opt, = self.layer(Q_sqrt_hat, p, G1, h1, G2,\n h2, E_sqrt, F, f,zero) # u_opt: batch*dim(u)\n # for V(x0), defines the QP layer without equality \n # constraints \n else:\n layer = QP_layer_no_eq(nz=self.num_u, nineq_u=\n self.num_ineq_u, nineq_x=self.num_ineq_x)\n self.para = [Q_sqrt_hat, p, G1, h1, G2,\n h2, E_sqrt] \n # gets the solution of the basic optimization problem\n u_opt,e_opt, = layer(Q_sqrt_hat, p, G1, h1, G2, h2, \n E_sqrt,zero) # u_opt: batch*dim(u)\n\n # get the optimal cost\n # a+b: sum(i:1 to N): xi^T*Q*xi + u(i-1)^T*R*u(i-1)\n # c: x0^T*Q*x0\n # d:(i:1 to N):ei^T*E*ei\n a = (u_opt.mm(Q_hat)*u_opt + p*u_opt).sum(1)\n b = (A_x0.mm(Q_diag)*A_x0).sum(1)\n c = (x0.mm(Q)*x0).sum(1)\n d = (e_opt.mm(E)*e_opt).sum(1)\n cost_opt = (a+b+c+d).unsqueeze(1) # size: batch*1\n u0_opt = u_opt.mv(self.weight) # only the fisrt action\n #print(u0,u0_opt)\n return cost_opt, u0_opt", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n return torch.nn.functional.linear(\n x,\n self.weight_mu + self.weight_sigma * self.weight_epsilon,\n self.bias_mu + self.bias_sigma * self.bias_epsilon,\n )", "def forward(self, X):\n self._X = X # For backprop later on.\n self._z = np.dot(X, self._W) + self._b\n a = self._act.a(self._z)\n return a", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n transition_state_probs = self.mdp.getTransitionStatesAndProbs(state, action)\n # Add each state and probability to q_value\n q_value = 0\n for state_, probability in transition_state_probs:\n state_reward = self.mdp.getReward(state, state_, action)\n q_value += probability * (state_reward + self.discount * self.values[state_])\n return q_value", "def update_q_values(self, state, value):\n if self.prev_state is not None and self.learning:\n reward = self.reward(Game.game_state(state))\n self.q_values[self.represent_state(self.prev_state), self.prev_action] += self.alpha * (\n reward + self.gamma * value - self.prev_q_val)", "def forward(self, state_t):\n # Use your network to compute qvalues for given state\n qvalues = self.network(state_t)\n\n assert qvalues.requires_grad, \"qvalues must be a torch tensor with grad\"\n assert len(\n qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == self.n_actions\n\n return qvalues", "def forward(self, w_value, x_value, b_value):\n self.inputs = [w_value, x_value, b_value]\n # return np.matmul(x_value, w_value) + b_value # [Note] Matmul Order\n return x_value.dot(w_value) + b_value # [Note] Matmul Order", "def forward(self, state_t):\n # Use your network to compute qvalues for given state\n conved_1 = self.conv1(state_t)\n conved_2 = self.conv2(conved_1)\n conved_3 = self.conv3(conved_2)\n qvalues = self.dense(conved_3)\n\n assert qvalues.requires_grad, \"qvalues must be a torch tensor with grad\"\n assert len(\n qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == n_actions\n\n return qvalues", "def forward(self, state, action):\n # Pass the states into the first layer\n x = self.fc_layers[0](state)\n x = self.bn(x)\n x = F.relu(x)\n # Concatenate the first layer output with the action\n x = torch.cat((x, action), dim=1)\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[1:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying sigmoid activation\n x = torch.sigmoid(self.fc_layers[-1](x))\n # Return the Q-Value for the input state-action\n return x", "def computeQValueFromValues(self, state, action):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n # Code to remove --- from here\n transitions = self.mdp.getTransitionStatesAndProbabilities(state, action)\n qvalue = 0\n for (nextState, probability) in transitions:\n reward = self.mdp.getReward(state, action, nextState)\n qvalue += probability *(reward + self.discount*self.values[nextState])\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"\n \n return qvalue", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # sample memories\n states_val, action_val, rewards, next_state_val, continues \\\n = (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # evaluate the target q\n target_q = self.sess.run(self.graph.target_q_values, feed_dict={self.graph.states: next_state_val})\n # if using double q\n if self.params.double_q:\n online_q = self.sess.run(self.graph.online_q_values, feed_dict={self.graph.states: next_state_val})\n actions = np.argmax(online_q, axis=1)\n max_next_q_values = target_q[np.arange(actions.shape[0]), actions].reshape(-1, 1)\n else:\n max_next_q_values = np.max(target_q, axis=1, keepdims=True)\n # train the online DQN\n td_target = rewards + continues * self.params.discount_factor * max_next_q_values\n _, self.loss_val = self.sess.run([self.graph.training_op, self.graph.loss],\n feed_dict={self.graph.states: states_val, self.graph.actions: action_val,\n self.graph.td_target: td_target})\n # copy to target\n if self.params.copy_interval is None or (\n self.params.copy_interval and (self.iteration % self.params.copy_interval == 0)):\n self.sess.run(self.graph.copy_online_to_target)", "def train_step(self, experiences, gamma):\n states = experiences['states']\n rewards = experiences['rewards']\n actions = experiences['actions']\n next_states = experiences['next_states']\n dones = experiences['dones']\n q_values = self.main_network(states).gather(1, actions.view(-1, 1)).squeeze()\n\n # Get max predicted Q values (for next states) from target model\n next_q_values = self.target_network(next_states).detach().max(1)[0]\n\n # Compute Q targets for current states\n expected_q_value = rewards + (gamma * next_q_values * (1 - dones))\n\n # Compute loss\n loss = F.mse_loss(q_values, expected_q_value)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the target network\n self.soft_update(self.main_network, self.target_network, TAU)", "def forward(self, state):\n if self.noisy:\n lower, upper = self.x[0], self.x[-1]\n test_x = lower + torch.rand(len(self.x)) * (upper - lower)\n else:\n test_x = self.x\n\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n pred = self.gp(test_x)\n ucb = pred.mean + self.beta() * pred.stddev\n\n max_id = torch.argmax(ucb)\n next_point = test_x[[[max_id]]]\n return next_point, torch.zeros(1)", "def Q(self, value):\n assert value > 0, \"Q needs to be positive and above zero (we divide by Q)\"\n self._Q = value\n self._update()", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n #get the value of the state\n qVal = self.values[state]\n #iterate through the MDP transition states from the current state\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #q value = discount * expected value of reward of state\n qVal += self.discount * probability * self.values[transitionState]\n return qVal\n # END OUR CODE", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n Q_Value = self.Q #calling constructor\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n\n Q_Value[(state,action)] = ((1-learning_rate) * temporary_QValue) + (learning_rate * (reward + discount_factor * nextState_QValue)) #for formula go to README_Reinforcement.txt at line 8\n\n #util.raiseNotDefined()", "def train_step(self):\n # Sample training batch from replay\n training_batch = self.replay.sample(self.batch_size)\n\n # Calculate target Q values for each example:\n # For non-terminal states, targetQ is estimated according to\n # targetQ = r + gamma*Q'(s',max_a Q(s',a))\n # where Q' denotes the target network.\n # For terminating states the target is computed as\n # targetQ = r\n updates = []\n for exp in training_batch:\n start,_,reward,end = exp\n if(self.dampen_states):\n # To dampen states (usually done after major patches or when the meta shifts)\n # we replace winning rewards with 0.\n reward = 0.\n state_code = end.evaluate()\n if(state_code==DraftState.DRAFT_COMPLETE or state_code in DraftState.invalid_states):\n # Action moves to terminal state\n updates.append(reward)\n else:\n # Follwing double DQN paper (https://arxiv.org/abs/1509.06461).\n # Action is chosen by online network, but the target network is used to evaluate this policy.\n # Each row in predicted_Q gives estimated Q(s',a) values for all possible actions for the input state s'.\n feed_dict = {self.ddq_net.online_ops[\"input\"]:[end.format_state()],\n self.ddq_net.online_ops[\"valid_actions\"]:[end.get_valid_actions()]}\n predicted_action = self.ddq_net.sess.run(self.ddq_net.online_ops[\"prediction\"], feed_dict=feed_dict)[0]\n\n feed_dict = {self.ddq_net.target_ops[\"input\"]:[end.format_state()]}\n predicted_Q = self.ddq_net.sess.run(self.ddq_net.target_ops[\"outQ\"], feed_dict=feed_dict)\n\n updates.append(reward + self.ddq_net.discount_factor*predicted_Q[0,predicted_action])\n\n # Update online net using target Q\n # Experience replay stores action = (champion_id, position) pairs\n # these need to be converted into the corresponding index of the input vector to the Qnet\n actions = np.array([start.get_action(*exp[1]) for exp in training_batch])\n targetQ = np.array(updates)\n feed_dict = {self.ddq_net.online_ops[\"input\"]:np.stack([exp[0].format_state() for exp in training_batch],axis=0),\n self.ddq_net.online_ops[\"actions\"]:actions,\n self.ddq_net.online_ops[\"target\"]:targetQ,\n self.ddq_net.online_ops[\"dropout_keep_prob\"]:0.5}\n _ = self.ddq_net.sess.run(self.ddq_net.online_ops[\"update\"],feed_dict=feed_dict)", "def learn(self, experiences, gamma):\n states_and_prev_recurrents, actions, recurrents, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n next_states_and_recurrents = torch.cat([next_states, recurrents], dim=1)\n Q_targets_next = self.qnetwork_target(next_states_and_recurrents).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states_and_prev_recurrents).gather(1, actions)\n\n # Compute loss\n loss_rl = F.mse_loss(Q_expected, Q_targets)\n\n states = states_and_prev_recurrents[:, :8]\n target_recurrents = map_observation_to_recurrent_state(states)\n recurrent_pred = self.qnetwork_local(states_and_prev_recurrents)[:, -5:]\n\n loss_internal_states = F.multilabel_soft_margin_loss(recurrent_pred, target_recurrents)\n\n loss = loss_rl + loss_internal_states\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states = experiences\n\n # Get max predicted Q values (for next states) from target model\n next_action_targets = self.target_model(next_states)\n next_action = next_action_targets.max(1)[0].unsqueeze(-1)\n targets = rewards + (gamma * torch.Tensor(next_action))\n#\n # Get expected Q values from policy model\n action_policy = self.policy_model(states)\n policy = action_policy.gather(1, actions)\n # Compute loss\n loss = F.mse_loss(policy, targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.policy_model, self.target_model, self.tau)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0\n features = self.featExtractor.getFeatures(state, action)\n #Each feature is in the form of dictionary {((3, 3), 'east'): 1.0}. Each key is a combination of coordinate and direction. Each value represents the old qvalue.\n for feature in features.keys():\n qvalue += features[feature] * self.weights[feature]\n return qvalue", "def learn(self, state, action, reward, state_):\n self.Q.optimizer.zero_grad()\n states = T.tensor(state, dtype=T.float).to(self.Q.device)\n action = T.tensor(action).to(self.Q.device)\n reward = T.tensor(reward).to(self.Q.device)\n states_ = T.tensor(state_, dtype=T.float).to(self.Q.device)\n\n q_pred = self.Q.forward(states)[action]\n\n q_next = self.Q.forward(states_).max()\n\n q_target = reward + self.gamma*q_next\n\n loss = self.Q.loss(q_target, q_pred).to(self.Q.device)\n loss.backward()\n self.Q.optimizer.step()\n self.decrement_epsilon()", "def getQValue(self, gameState, action):\n features = self.getFeatures(gameState, action)\n return features * self.weights", "def computeQValueFromValues(self, state, action):\n\n # Find expected utility of making this move\n x = 0\n for t in self.mdp.getTransitionStatesAndProbs(state,action):\n x += t[1] * self.getValue(t[0])\n\n\n # Return Reward + discounted expected utility\n return self.mdp.getReward(state,None,None) + self.discount*x", "def act(self, state_and_prev_recurrent, eps=0.):\n state_and_prev_recurrent = torch.from_numpy(state_and_prev_recurrent).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state_and_prev_recurrent)[:, :4]\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))", "def q_update(self):\n\n # exit if the experience buffer is not yet large enough\n if self.experience_buffer.size < self.batch_size:\n return\n \n # get the random batch\n states, action_indices, rewards, not_terminals, succ_states, succ_players, succ_legal_moves = self.experience_buffer.random_batch(self.batch_size)\n states = states.to(Globals.device)\n action_indices = action_indices.to(Globals.device)\n rewards = rewards.to(Globals.device)\n not_terminals = not_terminals.to(Globals.device)\n succ_states = succ_states.to(Globals.device)\n succ_players = succ_players.to(Globals.device)\n\n # prepare the training data\n q_values = self.target_network(succ_states)\n target = torch.empty(1, self.batch_size)\n for i in range(self.batch_size):\n if not_terminals[i] == 0:\n target[0, i] = rewards[i]\n continue\n\n if succ_players[i] == CONST.WHITE_MOVE:\n legal_q_values = q_values[0, 0:9][succ_legal_moves[i]]\n q_value, _ = legal_q_values.max(0)\n else:\n legal_q_values = q_values[0, 9:18][succ_legal_moves[i]]\n q_value, _ = legal_q_values.min(0)\n\n target[0, i] = rewards[i] + self.disc*not_terminals[i]*q_value\n\n # execute the training step of the network\n self.training_network.train_step(states, target, action_indices) # the eligibility trace is used as td target", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n # print('learn states.shape', states.shape)\n # print('learn next_states.shape', next_states.shape)\n \n q_expected, q_targets = self.get_target_and_expected(states, \n actions, \n rewards, \n next_states, \n dones, \n gamma)\n\n\n # Compute loss\n loss = F.mse_loss(q_expected, q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def forward(self, x):\n # Compute the mean norm of activations per channel.\n nu2 = x.pow(2).mean(dim=[2, 3], keepdim=True)\n\n # Perform FRN.\n x = x * torch.rsqrt(nu2 + self.eps.abs())\n\n # Scale and Bias\n if self.is_scale:\n x = self.weight * x\n if self.is_bias:\n x = x + self.bias\n return x", "def forward(self, x):\n n, c, t, v = x.size()\n x1 = x.view(n, c * t, v)\n y = None\n for i in range(self.num_subset):\n A1 = self.PA[i]\n z = self.conv_d[i](torch.matmul(x1, A1).view(n, c, t, v))\n y = z + y if y is not None else z\n A2 = self.cen(x)\n z2 = torch.matmul(x1, A2).view(n, c, t, v)\n z2 = self.conv_cen(z2)\n y += self.lamb * z2\n y = self.bn(y)\n y += self.down(x)\n y = self.relu(y)\n y = self.attention(y)\n return y", "def update1(self, state, action, nextState, reward):\n #print \"update1 in ApproximateQAgent\"\n \"*** YOUR CODE HERE ***\"\n ##################################################################################################################################Eric Did Stuff\n actionList = nextState.getLegalActions(self.index)\n\n\n #print \"Action List\", actionList\n\n\n\n\n weights = self.getWeights()\n\n features = self.featExtractor.getFeatures(state, action, self)\n #self.myFeats = features\n if self.index == 0:\n print \"FEATURES: \",features\n value = self.computeValueFromQValues(nextState)\n qValue = self.getQValue(state,action)\n #print \"value\", value, \"qValue\", qValue\n for feature in features:\n if len(actionList) != 0:\n weights[feature] = weights[feature] + self.alpha * (reward + self.discount * value - qValue) * features[feature]\n else:\n weights[feature] = weights[feature] + self.alpha * (reward - qValue) * features[feature]\n #print \"feature\", feature, \"weights\", weights[feature]\n #print \"weights\", weights\n\n #util.raiseNotDefined()", "def forward(self, input_x):\n return self.net(input_x.float())", "def update_value(self, reward):\n\t\tval = self.value\n\t\tval = val + ((reward - val)/self.visited)\n\t\tself.value = val", "def qlearning(env, iterations=1000, gamma=0.9, alpha=0.1):\n nS = env.nS # number of states\n nA = env.nA # number of actions\n Q_value = np.zeros((nS, nA))\n policy = np.ones((env.nS,env.nA))/env.nA\n epsilon = 1\n s_t1 = env.reset() # reset the environment and place the agent in the start square\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Don't forget to decay epsilon according to GLIE\n\n curr_state = s_t1\n \n start = time.time() # to time how long convergence takes\n print(\"---Q Learning---\\nTraining Started.\")\n \n for k in range (1, iterations):\n # if (k%10000) == 0:\n # print(\"Now playing iteration: \", k)\n epsilon = 1/k\n curr_action, reward, new_state, done = take_one_step(env, policy, curr_state)\n new_action = sample_action(policy, new_state)\n Q_value[curr_state, curr_action] = Q_value[curr_state, curr_action] + alpha * (reward + gamma * (Q_value[new_state, np.argmax(Q_value[new_state])]) - Q_value[curr_state, curr_action])\n \n # epsilon-greedy policy update\n Q_list = np.argwhere(Q_value[curr_state] == np.amax(Q_value[curr_state])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n for a in range (nA):\n if a == max_Q:\n policy[curr_state][a] = epsilon/nA + (1 - epsilon) # for the chosen maximal index of Q, set the policy to epsilon/m + 1 - epsilon\n else:\n policy[curr_state][a] = epsilon/nA \n \n # print(\"Q_value = {0}\".format(Q_value))\n # print(\"policy = {0}\".format(policy))\n \n if done:\n curr_state = env.reset() # reset the environment and place the agent in the start square\n curr_action = sample_action(policy, curr_state)\n else:\n curr_state = new_state\n curr_action = new_action\n \n stop = time.time()\n print(\"Training Completed.\")\n print(\"It took: {0} iterations and {1} minutes\".format(k,(stop-start)/60))\n \n ############################\n det_policy = np.argmax(Q_value, axis=1)\n return Q_value, det_policy", "def computeQValueFromValues(self, state, action):\n \n \n next_states_probs = self.mdp.getTransitionStatesAndProbs(state, action)\n # liste des recompenses R(s,a,s')\n rewards = []\n # liste des probas de transitions P(s'|a,s)\n probs = []\n # liste des Vk(s')\n previous_values = []\n # occurence[0] = les next_state\n # occurence[1] = les proba de transi\n for occurence in next_states_probs:\n rewards.append(self.mdp.getReward(state, action, occurence[0]))\n probs.append(occurence[1])\n previous_values.append(self.getValue(occurence[0]))\n Q_value = 0\n # boucle qui calcule somme des ( P(s'|a,s) * [R(s,a,s') + gamma * Vk(s')] ) sur les s'\n for i in range(len(probs)):\n Q_value += probs[i] * (rewards[i] + self.discount * previous_values[i])\n \n return Q_value\n util.raiseNotDefined()", "def prediccion(self):\n # Project the state ahead\n self.X = self.F @ self.X + self.B @ self.M\n self.P = self.F @ self.P @ self.F.T + self.Q\n\n return self.X", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # print \"getQValue\"\n features = self.featExtractor.getFeatures(state, self.index)#.values()\n #weights = self.weights.values()\n #dotProduct = reduce( (lambda x, y: x*y), map( (lambda x, y: x+y), self.weights, features))\n #return dotProduct\n score = 0\n for key in features.keys():\n score += features[key]*self.weights[key]\n return score", "def advantage(self, state):\n Q = self.predict(state)\n return Q - np.dot(self.π.pmf(state, Q), Q)", "def quantum_net(self, q_input_features, q_weights_flat):\n\n # Reshape weights\n q_weights = q_weights_flat.reshape(self.args.q_depth, self.args.n_qubits, 3)\n\n # Start from state |+> , unbiased w.r.t. |0> and |1>\n # Amplitude encoding\n qml.QubitStateVector(q_input_features, wires=list(range(self.args.n_qubits)))\n \n # Sequence of trainable variational layers\n for k in range(self.args.q_depth):\n self.entangling_layer(self.args.n_qubits)\n self.Rot_layer(q_weights[k])\n\n # Expectation values in the Z basis\n exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(self.args.target_class)]\n return tuple(exp_vals)", "def getQValue(self, state, action):\n features = self.featExtractor.getFeatures(state, action)\n total = 0\n for feat in features:\n total += self.getWeights()[feat] * features[feat]\n return total" ]
[ "0.6747889", "0.62147856", "0.6128476", "0.60471773", "0.6019139", "0.60117346", "0.6002128", "0.5985154", "0.5965721", "0.59572846", "0.5952152", "0.59481573", "0.5945076", "0.5932837", "0.5910055", "0.5899582", "0.5890279", "0.58570886", "0.5830228", "0.5819873", "0.5801153", "0.5801049", "0.5794494", "0.5792501", "0.5786561", "0.57828605", "0.5769817", "0.5769817", "0.5761622", "0.5749106", "0.57389516", "0.573731", "0.5734351", "0.57288456", "0.5728823", "0.5715762", "0.5707936", "0.5700066", "0.56967753", "0.56959164", "0.56612486", "0.5659109", "0.56472933", "0.56470686", "0.5643828", "0.56362295", "0.56345713", "0.5630269", "0.56254566", "0.56238425", "0.56208587", "0.5614038", "0.56024075", "0.56024075", "0.5593971", "0.558358", "0.55696803", "0.55695665", "0.55633485", "0.556268", "0.555533", "0.5550586", "0.55464655", "0.5542814", "0.55403054", "0.55401766", "0.55330324", "0.55297714", "0.5513674", "0.5509729", "0.55073386", "0.5505605", "0.55045444", "0.5494592", "0.54943776", "0.5490902", "0.547678", "0.5471215", "0.54706854", "0.54650015", "0.5462103", "0.54483813", "0.5445422", "0.5445407", "0.54434276", "0.54403234", "0.5440135", "0.54359686", "0.54190576", "0.54127", "0.54044056", "0.54042023", "0.54034334", "0.540155", "0.5392757", "0.5389218", "0.5374336", "0.5369545", "0.53663695" ]
0.56493485
43
Gets the advantage and value by passing out of the base network through the value and advantage heads.
def adv_val(self, input_x): float_x = input_x.float() base_out = self.conv(input_x).view(float_x.size()[0], -1) return self.head_adv(base_out), self.head_val(base_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bias(self):", "def __get_net_probs(self):\n return np.array([node.value for node in self.net]).reshape(5,5)", "def net_output(self):\n result = self.gives()\n for k, v in self.needs().items():\n result[k] = result.get(k, 0) - v\n\n return result", "def forward(self, obs):\n res = self.hidden_layers(obs)\n logits = self.logits(res)\n value = self.value_branch(res)\n return logits, value", "def get_value(self):\n if not self.visited:\n # first visit at node\n self.visited = True\n\n # value calculation\n for node, weight in self.predecessors:\n self.value += (node.get_value() * weight)\n\n # applying activation function\n if self.activation is not None:\n self.activation()\n\n self.calculated = True\n\n return self.value\n else:\n # visited node\n if self.calculated:\n # calculated in this computation\n return self.value\n else:\n # recurrent connection\n return self.past_value", "def test_find_highest_value_node_first(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 2, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n nn.layers[3].nodes[0].weights = [1.0, 1.0]\n nn.layers[3].nodes[1].weights = [0.0, 0.0]\n\n val = nn.assign_output([2, 3], test=True)\n self.assertEqual(val, '10')", "def forward(self, input):\n mean, std = self.mean_net(input), self.std_net(input)\n return mean, std", "def test_forward_values():\n rnn = GeneratorRNN(1)\n inputs = Variable(torch.zeros(1,1,3))\n hidden = rnn.init_hidden()\n e,pi,mu,sigma,rho,_ = rnn(inputs, hidden)\n\n assert (e >= 0).all()\n assert (e <= 1).all()\n\n diff = torch.abs(1-torch.sum(pi))\n assert (diff < 0.00001).all()\n\n assert (sigma > 0).all()\n\n assert (rho > -1).all()\n assert (rho < 1).all()\n\n rnn = GeneratorRNN(3)\n inputs = Variable(torch.zeros(10,1,3))\n hidden = rnn.init_hidden()\n e,pi,mu,sigma,rho,_ = rnn(inputs, hidden)\n\n pi_sum = torch.sum(pi,dim=2)\n diff = torch.abs(1-pi_sum)\n assert (diff < 0.00001).all()", "def adv_val(self, input_x) -> Tuple[Tensor, Tensor]:\n float_x = input_x.float()\n base_out = self.net(float_x)\n return self.fc_adv(base_out), self.fc_val(base_out)", "def get_weights(self):", "def getWeight(self) -> float:\n ...", "def bias_prior(self):", "def get_hidden_values(self, data):\n return T.nnet.sigmoid(T.dot(data, self.w1) + self.b1)", "def weight(self):", "def get_q_values(self, state, network):\n out = None\n state = state.permute(0, 3, 1, 2)\n #pdb.set_trace()\n ##############################################################\n ################ YOUR CODE HERE - 4-5 lines lines ################\n if network == 'q_network':\n out = self.q_network(state)\n else:\n out = self.target_network(state)\n ##############################################################\n ######################## END YOUR CODE #######################\n return out", "def analyze_belief_strength_with_bias(self, G):\r\n n = []\r\n nbs_list = []\r\n for node in G.nodes: #cycles through the nodes of the graph to mine the attributes\r\n n.append(node) #appends each node to a list that will be put into a dictionary\r\n pbs_list = []\r\n og_bs = G.nodes[node]['belief_strength'] #mines the numerical value for a nodes belief strength, from a pre-set node attribute\r\n unc = G.nodes[node]['uncertainty'] #mines the numerical value for a nodes belief uncertainty, from a pre-set node attribute\r\n prob = G.nodes[node]['probability']\r\n for pre in G.predecessors(node):\r\n ew = G.edges[pre, node]['weight'] #mines the numerical value of an edge's weight, from a pre-set edge attribute\r\n pre_bs = G.nodes[pre]['belief_strength'] #mines the numerical value for a predecessors belief strength, from a pre-set node attribute\r\n x = ew * pre_bs #determines how much a node values its neighbor's opinion.\r\n pbs_list.append(x) #puts all values for predecessor belief strangths in a list\r\n if len(pbs_list) == 0:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n else:\r\n apbs = sum(pbs_list)/len(pbs_list) #calculates the average predecessor belief strength value for a node\r\n if apbs*og_bs > 0:\r\n if apbs > 0:\r\n nbs = min(og_bs + (0.1*prob*unc*apbs), 100)\r\n else:\r\n nbs = max(og_bs + (0.1*prob*unc*apbs), -100)\r\n nbs = int(nbs)\r\n else:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n nbs_list.append(nbs) #the new belief strengths are appended to a list that will be put into adictionary\r\n change = dict(zip(n, nbs_list)) #creates a dictionary from two lists which stores the nodes as keys and their new belief strengths as values\r\n print(change)\r\n return change #this will be used to update the list in a different function\r", "def value(self, observation, prev_action, prev_reward):\n agent_inputs = buffer_to((observation, prev_action, prev_reward),\n device=self.device)\n _mu, _log_std, value, _rnn_state = self.model(*agent_inputs, self.prev_rnn_state)\n return value.to(\"cpu\")", "def forward(self, value, query, lens):\n relevant_scores = self.relevant_score(value, query, lens)\n e_relevant_scores = torch.exp(relevant_scores)\n weights = e_relevant_scores / e_relevant_scores.sum(-1, keepdim=True)\n attention = (weights.unsqueeze(-1) * value).sum(1)\n return attention", "def res_get(hp2res, hp_dict, hp_labels):\n logg = logging.getLogger(f\"c.{__name__}.res_get\")\n logg.setLevel(\"INFO\")\n logg.debug(f\"Start res_get\")\n\n # build the hp_set for the corresponding bar\n hp_set = []\n for label in hp_labels:\n hp_set.append(hp_dict[label])\n hp_set = tuple(hp_set)\n # get the corresponding loss value\n hp_val = hp2res[hp_set]\n\n logg.debug(f\"hp_set {hp_set} hp_val {hp_val}\")\n return hp_val", "def _learn_node_parameter_var(outputs, weights, inputs):\n var = 0.\n\n \"\"\" YOUR CODE HERE \"\"\"\n temp = 0\n N_observe = outputs.shape[0]\n if inputs is None:\n temp = np.sum((outputs-weights[0])**2)\n else:\n for i in range(N_observe):\n temp += (outputs[i] - (np.sum(weights[1:] * inputs[i]) +weights[0]))**2\n var = temp/N_observe\n\n\n\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return var", "def getQValue(self, state, action):\n #print \"getQValue in ApproximateQAgent\"\n\n \"*** YOUR CODE HERE ***\"\n weights = self.getWeights()\n features = self.featExtractor.getFeatures(state, action, self)\n\n value = 0\n\n #print \"FEATURES: \", features\n #print \"WEIGHTS: \", weights\n\n for feature in features:\n value += features[feature]*weights[feature]\n return value\n #util.raiseNotDefined()", "def get_reward(self):\n # Ver list\n self.Verlist = {\n '1': False,\n '2': False,\n '3': False,\n '4': False,\n '5': True,\n }\n # --------------------------------- NEW ----\n r = 0\n if self.ENVGetSIReset:\n V = {\n 'CoolRateTemp': self.DRateFun(self.mem['KCNTOMS']['Val']),\n 'CurrentTemp': self.mem['UAVLEG2']['Val'],\n 'CurrentPres': self.mem['ZINST65']['Val'],\n 'Dis': abs(self.DRateFun(self.mem['KCNTOMS']['Val']) - self.mem['UAVLEG2']['Val']),\n 'PZRLevel': self.mem['ZINST63']['Val'],\n 'SG1Nar': self.mem['ZINST78']['Val'], 'SG2Nar': self.mem['ZINST77']['Val'],\n 'SG3Nar': self.mem['ZINST76']['Val'],\n 'SG1Wid': self.mem['ZINST72']['Val'], 'SG2Wid': self.mem['ZINST71']['Val'],\n 'SG3Wid': self.mem['ZINST70']['Val'],\n 'SG1Pres': self.mem['ZINST75']['Val'], 'SG2Pres': self.mem['ZINST74']['Val'],\n 'SG3Pres': self.mem['ZINST73']['Val'],\n }\n if self.Verlist['1']:\n # Cooling rate에 따라서 온도 감소\n r -= V['Dis'] / 100\n # 가압기 수위 10 아래 종료\n # if V['PZRLevel'] <= 10: r -= 100\n if self.Verlist['2']:\n # 목표치까지 도달\n r += (29.5 - V['CurrentPres']) / 100\n r += (170 - V['CurrentTemp']) / 100\n if self.Verlist['3']:\n # Cooling rate에 따라서 온도 감소\n dis_reward = - V['Dis'] / 100 # [0.0 ~ -0.2] 동향을 보임\n # Pressure and Temp Dis\n curp = 29.5 if V['CurrentPres'] <= 29.5 else V['CurrentPres']\n curt = 170 if V['CurrentTemp'] <= 170 else V['CurrentTemp']\n dis_pres = (29.5 - V['CurrentPres']) / 100\n dis_temp = (170 - V['CurrentTemp']) / 100\n\n # r += (dis_pres * 0.1) + (dis_temp * 0.1) + (dis_reward * 10) # 감압 X\n r += (dis_pres * 0.1) + (dis_reward * 5)\n if self.Verlist['4']:\n # Cooling rate에 따라서 온도 감소\n dis_reward = - V['Dis'] / 100 # [0.0 ~ -0.2] 동향을 보임\n # Pressure and Temp Dis\n curp = 29.5 if V['CurrentPres'] <= 29.5 else V['CurrentPres']\n dis_pres = (29.5 - V['CurrentPres']) / 100\n PT_reward = - PTCureve().Check(Temp=V['CurrentTemp'], Pres=V['CurrentPres'])\n r += (dis_pres * 0.1) + (dis_reward * 5) + (PT_reward * 0.1)\n if self.Verlist['5']:\n r = 0\n # 1] Cooling rate에 따라서 온도 감소\n coolrate_r = - V['Dis']\n # 2] 가압기 수위 20~76% 구간 초과시 패널티\n pzrlevel_r = 0\n if 20 <= V['PZRLevel'] <= 76:\n pass\n else:\n if 20 > V['PZRLevel']:\n pzrlevel_r -= (20 - V['PZRLevel'])\n else:\n pzrlevel_r -= (V['PZRLevel'] - 76)\n # 3] 증기 발생기 6% ~ 50% 이상 초과 시 패널티\n sg_r = 0\n for _ in range(1, 4):\n if 6 <= V[f'SG{_}Nar'] <= 50:\n pass\n else:\n if 6 > V[f'SG{_}Nar']:\n sg_r -= (6 - V[f'SG{_}Nar'])\n else:\n sg_r -= (V[f'SG{_}Nar'] - 50)\n # 4] PT 커브에서 벗어나면 거리만큼 패널티\n PT_reward = - PTCureve().Check_Dis(Temp=V['CurrentTemp'], Pres=V['CurrentPres'])\n # 5] 목표치와 가까워 질 수록 +\n pres_r, temp_r = 0, 0\n pres_r = (29.5 - V['CurrentPres'])\n temp_r = (170 - V['CurrentTemp'])\n # 6] S/G 압력\n Avg_pres = (V['SG1Pres'] + V['SG2Pres'] + V['SG3Pres'])/3\n SGpres_r = 9 - Avg_pres if Avg_pres > 9 else 0\n # --------------------------------------------------------------\n w = {\n 'coolrate_r': [coolrate_r, 2],\n 'pzrlevel_r': [pzrlevel_r, 1],\n 'sg_r': [sg_r, 1.5],\n 'PT_reward': [PT_reward, 3],\n 'pres_r': [pres_r, 1],\n 'temp_r': [temp_r, 0.5],\n 'SGpres_r': [SGpres_r, 0.5]\n }\n\n log_txt_temp = ''\n for key in w.keys():\n r += w[key][0] * w[key][1]\n log_txt_temp += f'[{round(w[key][0]*w[key][1], 1)}:{w[key][0]}*{w[key][1]}]_'\n log_txt_temp = f'R:{r} = ' + log_txt_temp\n\n self.Loger_txt += log_txt_temp\n\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+{dis_temp * 0.1}+({dis_reward * 10})\\t\"\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+({dis_reward * 5})\\t\" #Verlist['3']\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+({dis_reward * 5})+({PT_reward * 0.1})\\t\"\n\n # --------------------------------- Send R ----\n self.AcumulatedReward += r\n # self.Loger_txt += f'{r}\\t'\n self.DIS_CSF_Info += f'[R: {r}]\\t'\n return r", "def forward(self, state):\n x = state\n feature = self.feature_layer(x)\n action_value = self.value_layer(feature)\n advantage = self.advantage_layer(feature)\n \n q_value = action_value + (advantage - advantage.mean(dim=1, keepdim=True))\n return q_value", "def forward(self, state):\n\n # connect layers to each other and put relu activations between them\n for layer in self.hidden_layers:\n state = layer(state)\n state = F.relu(state)\n value = self.value_layer(state)\n return value", "def forwardPolicyNet(self, state):\n with torch.no_grad():\n q_values = self.policy_net(state)\n return q_values", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def _policy_nn(self):\n with tf.variable_scope(\"reward_params\") as scope:\n \n self.h1 = tf.layers.dense(self.input_ph, self.hidden_dim, tf.nn.tanh,\n kernel_initializer=tf.random_normal_initializer(\n stddev=np.sqrt(1 / self.params_dim)), name=\"h1\")\n self.h2 = tf.layers.dense(self.h1, self.hidden_dim, tf.nn.tanh,\n kernel_initializer=tf.random_normal_initializer(\n stddev=np.sqrt(1 / self.params_dim)), name=\"h2\")\n self.rewards = tf.layers.dense(self.h2, 1,\n kernel_initializer=tf.random_normal_initializer(\n stddev=np.sqrt(1 / self.hidden_dim)), name=\"rewards\")\n self.rewards_sum = tf.reduce_sum(self.rewards)", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.bh)", "def get_thrust_value(self, command):\n return self._gain * numpy.abs(command) * command", "def get(self) -> float:\n ...", "def get_weight(self, val1, val2):\n\n\t\tnode1 = self.get_node(val1)\n\t\tnode2 = self.get_node(val2)\n\n\t\treturn node1.get_weight(node2)", "def learn(self):\n # Calculate prior probabilities.\n self.priorpos = len(self.posdata) / (len(self.posdata) + len(self.negdata))\n self.priorneg = len(self.negdata) / (len(self.posdata) + len(self.negdata))\n print(\"Prior probability positive: \")\n print(self.priorpos)\n print(\"Prior probability negative: \")\n print(self.priorneg)\n\n # Calculate negative likelihood/conditional probability.\n occurpos = self.occurence(self.posvec)\n self.condpos = self.condprob(occurpos)\n occurneg = self.occurence(self.negvec)\n self.condneg = self.condprob(occurneg)", "async def get_change(sochain_url, value_out, network, address):\n try:\n balance = await sochain_api.get_balance(sochain_url, network, address)\n balance = round(balance[0].amount * 10 ** 8)\n change = 0\n if balance - value_out > DUST_THRESHOLD:\n change = balance - value_out\n return change\n except Exception as err:\n raise Exception(str(err))", "def net(input_lst, weight_lst, bias):\r\n net_total = bias\r\n\r\n for node in range(len(input_lst)):\r\n net_total += input_lst[node] * weight_lst[node]\r\n\r\n return net_total", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def brain_weight_oz(self):\r\n return Heart.heart_weight_oz(self) # Used method from Heart Class\r", "def forward(self, prev_state, obs_t):\r\n # Use your network to compute qvalues for given state\r\n #print(state_t.shape)\r\n h = self.conv(obs_t)\r\n\r\n h = h.view(h.size(0), -1)\r\n\r\n new_state = h_new, c_new = self.lstm(h, prev_state)\r\n advantage = self.adv(h_new)\r\n value = self.val(h_new)\r\n\r\n\r\n adv_mean = torch.mean(advantage, dim=1, keepdim=True)\r\n qvalues = value + advantage - adv_mean\r\n\r\n return new_state, qvalues", "def LHopital(n, top, bot, accuracy=0.0001, scaledown=1.25):\n vtop = top(n)\n vbot = bot(n)\n i = 1\n while vtop == 0 and vbot == 0:\n vtop = deriv(top, n, i, accuracy, scaledown)\n vbot = deriv(bot, n, i, accuracy, scaledown)\n i += 1\n return vtop/vbot", "def get_hidden_values(self, input):\r\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden_values(self, input):\r\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def compute_loss(\n action_probs: tf.Tensor, values: tf.Tensor, returns: tf.Tensor\n) -> tf.Tensor:\n\n advantage = returns - values\n td = tf.subtract(returns, values)\n\n # actor\n # action_log_probs = tf.math.log(action_probs)\n # actor_loss = -tf.math.reduce_sum(action_log_probs * advantage)\n action_log_probs = tf.math.log(action_probs)\n actor_loss = -tf.math.reduce_mean(action_log_probs * td)\n\n # critic\n # td = tf.subtract(returns, values)\n # critic_loss = tf.reduce_mean(tf.square(td))\n critic_loss = huber_loss(values, returns)\n\n tf.print(\"a_loss:\", actor_loss, \"c_loss:\", critic_loss)\n\n return actor_loss + critic_loss", "def gain(self):\n return self[1]", "def get_expected_cost(self):", "def value(self, observation, prev_action, prev_reward):\n model_inputs = buffer_to((observation, prev_action, prev_reward),\n device=self.device)\n _mu, _log_std, value = self.model(*model_inputs)\n return value.to(\"cpu\")", "def baseEvaluate(self, gameState, action):\n features = self.getFeatures(gameState, action)\n weights = self.getWeights(gameState, action)\n return features * weights", "def get_hyperparams(self):", "def forward(self, pred, gt, weight=None):\n num_pos = torch.relu(torch.sum(gt) - 1) + 1\n num_neg = torch.relu(torch.sum(1 - gt) - 1) + 1\n if weight is not None:\n loss = nn.BCEWithLogitsLoss(reduction='none')(pred, gt.float())\n loss = torch.mean(loss * weight)\n elif self.balanced is False:\n loss = nn.BCEWithLogitsLoss(reduction='mean')(pred, gt.float())\n else:\n loss = nn.BCEWithLogitsLoss(pos_weight=num_neg * 1.0 / num_pos, reduction='mean')(pred, gt.float())\n\n # compute precision, recall, f1\n pred_labels = pred > 0\n gt, pred_labels, pred = gt.detach().cpu().numpy(), pred_labels.detach().cpu().numpy(), pred.detach().cpu().numpy()\n precision = precision_score(gt[0], pred_labels[0])\n recall = recall_score(gt[0], pred_labels[0])\n f1 = f1_score(gt[0], pred_labels[0])\n mean_logit_true = np.sum(pred * gt) / max(1, np.sum(gt))\n mean_logit_false = np.sum(pred * (1 - gt)) / max(1, np.sum(1 - gt))\n\n eval_stats = {\n \"loss\": loss,\n \"precision\": float(precision),\n \"recall\": float(recall),\n \"f1\": float(f1),\n \"logit_true\": float(mean_logit_true),\n \"logit_false\": float(mean_logit_false)\n }\n return eval_stats", "def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}", "def value(self) -> float:", "def prediction_b(self):\r\n return self._prediction_b", "def test_get_hyperflex_node_profile_by_moid(self):\n pass", "def get_value_loss(flat_params):\n set_flat_params_to(value_net, tensor(flat_params))\n for param in value_net.parameters():\n if param.grad is not None:\n param.grad.data.fill_(0)\n values_pred = value_net(states)\n value_loss = (values_pred - returns).pow(2).mean() # MeanSquaredError\n\n # weight decay\n for param in value_net.parameters():\n value_loss += param.pow(2).sum() * l2_reg\n value_loss.backward()\n return value_loss.item(), get_flat_grad_from(value_net.parameters()).cpu().numpy()", "def getWeight(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _val_step(self, inputs, targets, extra_params):\n\n inputs = inputs.to(self.device)\n targets = targets.to(self.device, non_blocking=True)\n\n recons = self.generator(inputs)\n\n # Discriminator part.\n # pred_fake = self.discriminator(recons) # Generated fake image going through discriminator.\n # pred_real = self.discriminator(targets) # Real image going through discriminator.\n # gradient_penalty = compute_gradient_penalty(self.discriminator, targets, recons)\n # disc_loss = pred_fake.mean() - pred_real.mean() + self.lambda_gp * gradient_penalty\n\n # Generator part.\n # gen_loss = -pred_fake.mean()\n recon_loss = self.loss_funcs['recon_loss_func'](recons, targets)\n # total_gen_loss = gen_loss + self.recon_lambda * recon_loss\n\n step_loss = recon_loss\n step_loss_components = {'recon_loss': recon_loss}\n\n return recons, step_loss, step_loss_components", "def value(self):\n return self.head", "def evaluate(self, gameState, action):\n features = self.getFeatures(gameState, action)\n ####print \"features \", features\n weights = self.getWeights(gameState, action)\n ####print \"weights \", weights\n return features * weights", "def get_net(netsnt, netrcv, tempsnt, temprcv):\n return {\n 'NET-PKT-SNT': (netsnt - tempsnt),\n 'NET-PKT-RCV': (netrcv - temprcv),\n }", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def forward(self, input):\n\n common = self.common_tower(input)\n wdl = self.wdl_head(common)\n policy = self.policy_head(common)\n\n return wdl, policy", "def call(self, obs):\n\t\tx = tf.convert_to_tensor(obs)\n\t\thidden_logs = self.hidden1(x)\n\t\thidden_vals = self.hidden2(x)\n\t\treturn self.logits(hidden_logs), self.value(hidden_vals)", "def dealer_probs():\n # Pdf of any current hand (value, hard) and final value; p(v_f | v_c) where v_f = final value, v_c = current value\n probabilities = {}\n\n # End nodes: (value, True) for value >= 17 and (value, False) for value > 17\n # Dependencies (in order of increasing requirements):\n # Hard values, value >= 11, possiblity of bust, no possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value\n # Soft values, 17 >= value >= 11 (value, False) depends on (value', False) for 17 >= value' > value, (value', True) for 17 > value' > 11\n # Hard values, 11 > value >= 2 , no possibility of bust, possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value and (value', False) for 17 >= value' > 13\n\n\n # End nodes\n for value in xrange(17, 22):\n probabilities[(value, True)] = {value: 1.0}\n if value == 17: continue # on soft 17, dealer will still hit\n probabilities[(value, False)] = {value: 1.0}\n\n # Hard values, 17 > value >= 11, possibility of bust, no possibility of going soft with an ace\n for value in xrange(16, 10, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(1, min(10, 21-value)+1):\n next_prob = probabilities[(value + next_card, True)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Soft values, 17 >= value >= 11\n for value in xrange(17, 10, -1):\n probabilities[(value, False)] = {}\n current_prob = probabilities[(value, False)]\n for next_card in xrange(1, 11):\n next_value = value + next_card\n hard = False\n if next_value > 21:\n next_value -= 10\n hard = True\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Hard values, 11 > value >= 2, no possibility of bust, possibility of going soft with an ace\n for value in xrange(10, 1, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(2, 12):\n next_value = value + next_card\n hard = (next_card != 11)\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n return probabilities", "def test_find_highest_value_node_last(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 2, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n nn.layers[3].nodes[0].weights = [0.0, 0.0]\n nn.layers[3].nodes[1].weights = [1.0, 1.0]\n\n val = nn.assign_output([2, 3], test=True)\n self.assertEqual(val, '01')", "def gibbs_ask_traffic(self, X, e, Z, bn, N):\n\n #makes copies\n X = e\n e = e\n\n #probability\n probability = [0,0]\n numerator = 0\n\n\n #True, False\n\n for x in range(N):\n # second joint\n if Z == True: # if non evidence variable\n random_choice = np.random.choice([0,1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][0]\n else:\n random_choice = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][1]\n\n # first joint\n if X[1] == 0.8 or X[1] == 0.2: # Rain is true\n X[0] = bn[0][0]\n else: # Rain is False\n X[0] = bn[0][1]\n\n # third joint\n if X[1] == 0.8 or X[1] == 0.1: # traffic\n random_late = np.random.choice([0,1], 1, True, [0.5,0.5])[0]\n X[2] = bn[2][0][random_late]\n else: # no traffic\n random_late = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0]\n X[2] = bn[2][1][random_late]\n\n # print(X)\n if X[0] == 0.1:\n probability[0] += 1\n else:\n probability[1] += 1\n\n\n probability[0] = probability[0] / N\n probability[1] = probability[1] / N\n # print(probability)\n return probability", "def forward(self, x):\n dims = list(range(1, len(x.shape)))\n mean = x.mean(dim=dims, keepdim=True)\n var = torch.pow(x - mean, 2).mean(dim=dims, keepdim=True)\n return self.apply_gain_and_bias((x - mean) / (var + EPS).sqrt())", "def getWeights(self, gameState, action):\n # return {'successorScore': 1.0}\n if self.isOffensive:\n return self.getOffensiveWeights(gameState, action)\n else:\n return self.getDefensiveWeights(gameState, action)", "def test_get_damage(self):\n self.veh.health = 2.2\n for op in self.veh.operators:\n op.health = 0.5\n self.veh.get_damage(0.5)\n self.assertEqual(self.veh.health, 1.9)\n self.assertEqual(self.veh.operators[0].health, 0.4)\n self.assertEqual(self.veh.operators[1].health, 0.45)\n self.assertEqual(self.veh.operators[2].health, 0.45)", "def getMyValue(self):\n valueBV = 0.0\n valueCR = 0.0\n valueAL = 0.0\n valueEC = 0.0\n valueIA = 0.0\n factorAL = globals.cityCRGen/globals.cityALGen\n factorEC = globals.cityCRGen/globals.cityECGen\n factorIA = globals.cityCRGen/globals.cityIAGen\n ratio = self.strength/100.0\n valueCR += self.myDesign.costCR*ratio\n valueAL += self.myDesign.costAL*ratio\n valueEC += self.myDesign.costEC*ratio\n valueIA += self.myDesign.costIA*ratio\n valueBV += (valueCR +\n valueAL*factorAL +\n valueEC*factorEC +\n valueIA*factorIA) / 1000.0\n return (valueBV, valueCR, valueAL, valueEC, valueIA)", "def get_aff_net(sta):\n pass", "def getQValue(self, gameState, action):\n features = self.getFeatures(gameState, action)\n return features * self.weights", "def get_node_value(succs, preds):\n ret = 1\n if succs == 0:\n ret *= NODE_ENTRY\n\n if preds == 0:\n ret *= NODE_EXIT\n\n ret *= NODE_NORMAL\n return ret", "def forward(self, x):\n x = F.relu(self.affine1(x))\n x = F.relu(self.affine2(x))\n\n # actor: choses action to take from state s_t\n # by returning probability of each action\n action_prob = F.softmax(self.action_head(x), dim=-1)\n\n # critic: evaluates being in the state s_t\n state_values = self.value_head(x)\n\n # return values for both actor and critic as a tupel of 2 values:\n # 1. a list with the probability of each action over the action space\n # 2. the value from state s_t\n return action_prob, state_values", "def value_net(self):\n return functools.partial(self.value_net_fn, self.value_net_params)", "def test_regression_relative_attention_bidirectional_values(self):\n outputs, unused_params = self.relative_attention.init_with_output(\n random.PRNGKey(0), self.query_len, self.key_len, bidirectional=True)\n self.assertEqual(outputs.shape,\n (1, self.num_heads, self.query_len, self.key_len))\n self.assertAlmostEqual(outputs[0, 0, 0, 0], 0.55764728, places=5)\n self.assertAlmostEqual(outputs[0, 1, 2, 1], -0.10935841, places=5)\n self.assertAlmostEqual(outputs[0, 1, 4, 6], 0.14510104, places=5)\n self.assertAlmostEqual(outputs[0, 2, 4, 6], -0.36783996, places=5)", "def learn(self):\n \n # target parameter update\n # target parameter update\n if self.learn_step_counter % self.nu_iter == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict())\n #testing the preformace of the network\n if self.learn_step_counter == 0:\n print('As referece this first test on dev data. Is maded with the Q networks, initialized randomly : ' )\n else:\n print(\"\\n Lets copy the Q-value Net in to Q-target net!. And test the performace on the dev data: \")\n \n current_bleu = self.dev_network()\n print(\"Current Bleu score is: \", current_bleu)\n \n self.learn_step_counter += 1\n\n \n long_Batch = self.sample_size*3\n # Sampling the higgest rewards values\n b_memory_big = self.memory[np.argsort(-self.memory[:-self.max_output_length, self.state_size+1])][:long_Batch]\n \n sample_index = np.random.choice(long_Batch, self.sample_size)\n b_memory = b_memory_big[sample_index, :]\n\n b_s = torch.FloatTensor(b_memory[:, :self.state_size])\n b_a = torch.LongTensor(b_memory[:, self.state_size:self.state_size+1].astype(int))\n b_r = torch.FloatTensor(b_memory[:, self.state_size+1:self.state_size+2])\n b_s_ = torch.FloatTensor(b_memory[:, self.state_size+2: self.state_size+2 + self.state_size])\n\n b_is_eos = torch.FloatTensor(b_memory[:, self.size_memory1-1:]).view(self.sample_size, 1)\n #print(b_a, b_a.size)\n #print(b_is_eos)\n #Activate the eval_net\n unfreeze_model(self.eval_net)\n \n # q_eval w.r.t the action in experience\n q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)\n q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\n #taking the most likely action.\n b_a_ = torch.LongTensor(q_next.max(1)[1].view(self.sample_size, 1).long())\n #b_a_ = q_next.max(1)[0].view(self.sample_size, 1).long() # shape (batch, 1)\n q_eval_next = self.eval_net(b_s_).gather(1, b_a_) # shape (batch, 1)\n \n #If eos q_target = reward. \n q_target = b_r + self.gamma * b_is_eos* q_eval_next.view(self.sample_size, 1) # shape (batch, 1)\n #version 0\n #q_target = b_r + self.gamma * q_next.max(1)[0].view(self.sample_size, 1) # shape (batch, 1)\n \n loss = self.loss_func(q_eval, q_target)\n \n self.tb_writer.add_scalar(\"learn/learn_batch_loss\",\n loss.data, self.learn_step_counter)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n #desctivate the eval_net\n freeze_model(self.eval_net)", "def __getstate__(self):\n return (self.layers, self.best_loss)", "def _get_hop(self):\n return self.__hop", "def GetWeight(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def receive_blocks(self, content: Tuple[int, ndarray]) -> Union[Iterable[netEncapsulation], netEncapsulation, None]:\n # get last state of working node\n last_state = self.Bak_Weights_Node.get(content[0], 0)\n # update global current state\n self.Current_Weights = self.Current_Weights + content[1]\n # get difference\n grad_diff = self.Current_Weights - last_state\n # update last state of working node\n self.Bak_Weights_Node[content[0]] = self.Current_Weights\n\n return netEncapsulation(content[0], (-1, grad_diff))", "def coefficient(self) -> float:\n ...", "def predict(self, model_input):\n # Should return a dictionary of move-prior pairs and the value from\n # the network's value head\n pass", "def neural_result(self, input):\n n_output = self.network.activate(input)\n if n_output >= 0.5:\n return 2\n else:\n return 1", "def analyze_belief_strength_without_bias(self, G):\r\n n = []\r\n nbs_list = []\r\n for node in G.nodes: #cycles through the nodes of the graph to mine the attributes\r\n n.append(node) #appends each node to a list that will be put into a dictionary\r\n pbs_list = []\r\n og_bs = G.nodes[node]['belief_strength'] #mines the numerical value for a nodes belief strength, from a pre-set node attribute\r\n unc = G.nodes[node]['uncertainty'] #mines the numerical value for a nodes belief uncertainty, from a pre-set node attribute\r\n prob = G.nodes[node]['probability']\r\n for pre in G.predecessors(node):\r\n ew = G.edges[pre, node]['weight'] #mines the numerical value of an edge's weight, from a pre-set edge attribute\r\n pre_bs = G.nodes[pre]['belief_strength'] #mines the numerical value for a predecessors belief strength, from a pre-set node attribute\r\n x = ew * pre_bs #determines how much a node values its neighbor's opinion.\r\n pbs_list.append(x) #puts all values for predecessor belief strangths in a list\r\n if len(pbs_list) == 0:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n else:\r\n apbs = sum(pbs_list)/len(pbs_list) #calculates the average predecessor belief strength value for a node\r\n nbs = min(og_bs + (0.1*prob*unc*apbs), 100) # average predecessor's belief strength is added to the original belief strength.\r\n nbs = max(nbs, -100)\r\n nbs = int(nbs)\r\n nbs_list.append(nbs) #the new belief strengths are appended to a list that will be put into adictionary\r\n change = dict(zip(n, nbs_list)) #creates a dictionary from two lists which stores the nodes as keys and their new belief strengths as values\r\n print(change)\r\n return change #this will be used to update the list in a different function\r", "def invalid_values(net):\n\n check_results = {}\n\n # Contains all element attributes that are necessary to initiate a power flow calculation.\n # There's a tuple with the structure (attribute_name, input type restriction)\n # for each attribute according to pandapower data structure documantation\n # (see also type_checks function)\n\n important_values = {'bus': [('vn_kv', '>0'), ('in_service', 'boolean')],\n 'line': [('from_bus', 'positive_integer'),\n ('to_bus', 'positive_integer'),\n ('length_km', '>0'), ('r_ohm_per_km', '>=0'),\n ('x_ohm_per_km', '>=0'), ('c_nf_per_km', '>=0'),\n ('max_i_ka', '>0'), ('df', '0<x<=1'), ('in_service', 'boolean')],\n 'trafo': [('hv_bus', 'positive_integer'), ('lv_bus', 'positive_integer'),\n ('sn_mva', '>0'), ('vn_hv_kv', '>0'), ('vn_lv_kv', '>0'),\n ('vkr_percent', '>=0'),\n ('vk_percent', '>0'), ('pfe_kw', '>=0'), ('i0_percent', '>=0'),\n ('in_service', 'boolean')],\n 'trafo3w': [('hv_bus', 'positive_integer'), ('mv_bus', 'positive_integer'),\n ('lv_bus', 'positive_integer'),\n ('sn_hv_mva', '>0'), ('sn_mv_mva', '>0'), ('sn_lv_mva', '>0'),\n ('vn_hv_kv', '>0'), ('vn_mv_kv', '>0'), ('vn_lv_kv', '>0'),\n ('vkr_hv_percent', '>=0'), ('vkr_mv_percent', '>=0'),\n ('vkr_lv_percent', '>=0'), ('vk_hv_percent', '>0'),\n ('vk_mv_percent', '>0'), ('vk_lv_percent', '>0'),\n ('pfe_kw', '>=0'), ('i0_percent', '>=0'),\n ('in_service', 'boolean')],\n 'load': [('bus', 'positive_integer'), ('p_mw', 'number'),\n ('q_mvar', 'number'),\n ('scaling', '>=0'), ('in_service', 'boolean')],\n 'sgen': [('bus', 'positive_integer'), ('p_mw', 'number'),\n ('q_mvar', 'number'),\n ('scaling', '>=0'), ('in_service', 'boolean')],\n 'gen': [('bus', 'positive_integer'), ('p_mw', 'number'),\n ('scaling', '>=0'), ('in_service', 'boolean')],\n 'ext_grid': [('bus', 'positive_integer'), ('vm_pu', '>0'),\n ('va_degree', 'number')],\n 'switch': [('bus', 'positive_integer'), ('element', 'positive_integer'),\n ('et', 'switch_type'), ('closed', 'boolean')]}\n\n # matches a check function to each single input type restriction\n type_checks = {'>0': check_greater_zero,\n '>=0': check_greater_equal_zero,\n '<0': check_less_zero,\n '<=0': check_less_equal_zero,\n 'boolean': check_boolean,\n 'positive_integer': check_pos_int,\n 'number': check_number,\n '0<x<=1': check_greater_zero_less_equal_one,\n 'switch_type': check_switch_type\n }\n\n for key in important_values:\n if len(net[key]) > 0:\n for value in important_values[key]:\n for i, element in net[key].iterrows():\n check_result = type_checks[value[1]](element, i, value[0])\n if check_result is not None:\n if key not in check_results:\n check_results[key] = []\n # converts np.nan to str for easier usage of assert in pytest\n nan_check = pd.isnull(net[key][value[0]].at[i])\n if nan_check:\n check_results[key].append((i, value[0],\n str(net[key][value[0]].at[i]), value[1]))\n else:\n check_results[key].append((i, value[0],\n net[key][value[0]].at[i], value[1]))\n if check_results:\n return check_results", "def estimate_advantage(self, obs, q_values):\n\n # TODO: Estimate the advantage when nn_baseline is True\n # HINT1: pass obs into the neural network that you're using to learn the baseline\n # extra hint if you're stuck: see your actor's run_baseline_prediction\n # HINT2: advantage should be [Q-b]\n if self.nn_baseline:\n b_n_unnormalized = self.baseline_model(obs)\n b_n = b_n_unnormalized * np.std(q_values) + np.mean(q_values)\n adv_n = (q_values - tf.squeeze(b_n)).numpy()\n # Else, just set the advantage to [Q]\n else:\n adv_n = q_values.copy()\n\n # Normalize the resulting advantages\n if self.standardize_advantages:\n adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-8)\n\n return adv_n.astype(np.float32)", "def cost_b_v(self):\n return self._cost_b_v" ]
[ "0.5776241", "0.5750878", "0.5623462", "0.54842955", "0.5386926", "0.52231693", "0.52097505", "0.5113644", "0.5109669", "0.5086685", "0.5073465", "0.50708735", "0.5056254", "0.5033763", "0.5021825", "0.5015476", "0.50106674", "0.5005514", "0.4995753", "0.49826834", "0.4966393", "0.49619266", "0.4957525", "0.49503857", "0.4940665", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49244165", "0.49227518", "0.4919729", "0.4911313", "0.4905583", "0.4896395", "0.48959696", "0.4890918", "0.48896733", "0.48894134", "0.48862803", "0.48862803", "0.48862803", "0.48862803", "0.48862803", "0.48862803", "0.48862803", "0.48852536", "0.48801818", "0.48775497", "0.48774955", "0.48774955", "0.48716152", "0.4870855", "0.48702246", "0.48555374", "0.4854864", "0.4831371", "0.48311788", "0.48306584", "0.48277628", "0.482472", "0.48242116", "0.4822445", "0.48213378", "0.48205054", "0.4819617", "0.48105913", "0.4803089", "0.47956207", "0.47956207", "0.47949058", "0.47926322", "0.47876275", "0.4785001", "0.47837597", "0.47819042", "0.47760442", "0.47744524", "0.47734678", "0.47693202", "0.47660184", "0.4765424", "0.47653174", "0.47619143", "0.47607484", "0.47589195", "0.4757779", "0.47575587", "0.4756708", "0.47563773", "0.4753214", "0.4749549", "0.47460026", "0.47404528", "0.47342733", "0.47323495", "0.47280627" ]
0.5197751
7
Calculates the output size of the last conv layer.
def _get_conv_out(self, shape) -> int: conv_out = self.conv(torch.zeros(1, *shape)) return int(np.prod(conv_out.size()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_size(self) -> int:\n return self.output_dim", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_output = weights[-1]\n dilations = np.asarray(self.dilation_rate)\n if (isinstance(self.padding, list) or isinstance(self.padding, tuple)) and len(self.padding) == 2:\n output_size = np.asarray(\n np.ceil((input_size + 2 * np.asarray(self.padding) - kernels - (kernels - 1) * (\n dilations - 1)) / strides + 1),\n dtype=np.int)\n else:\n output_size = np.asarray(\n np.ceil(input_size / strides) if self.padding == \"SAME\" or self.padding == \"ZEROPAD\" else np.ceil(\n (input_size - (kernels - 1) * dilations) / strides), dtype=np.int)\n \n output_shape = self.incoming_shape[:]\n output_shape[-3:-1] = output_size.tolist()\n output_shape[-1] = num_output\n return output_shape", "def get_final_emb_size(self):\n size = self.n_layers * 1 * 2 * self.hidden_size\n return size", "def output_shape(self) ->torch.Size:\n return self._computed_output_shape()", "def __len__(self):\n num_x, num_y = self.conv_dims()\n return num_x * num_y", "def _output_size_conv2d(conv, size):\n o_size = np.array(size) + 2 * np.array(conv.padding)\n o_size -= np.array(conv.dilation) * (np.array(conv.kernel_size) - 1)\n o_size -= 1\n o_size = o_size / np.array(conv.stride) + 1\n return np.floor(o_size)", "def upperLayersSize(self):\n return sys.getsizeof(self.segment)", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def output_size(self) -> int:\n return self.out_sz", "def get_model_output_size(self) -> int:\n pass", "def output_size(self):\n return self._output_size", "def output_size(self):\n return self._output_size", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def output_dim(self) -> int:\n return 2 * self._hidden_dim", "def output_dim(self):\n return self._output_dim", "def output_dim(self) -> int:\n return (\n self.mlp_hidden_dims[-1]\n if self.mlp_hidden_dims is not None\n else self.blocks_dims[-1]\n )", "def _total_chunk_size_left(self):\n if self.streaming_type == 'reshape':\n return self.N_l // self.conv_factor\n elif self.streaming_type == 'mask':\n return self.N_l // self.conv_factor * self.n_layers\n elif self.unidir:\n return 10000 // self.conv_factor\n else:\n return 10000 // self.conv_factor", "def get_layer_shape(self,layer_id):\n return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def output_size(self) -> int:\n return self.win_length", "def batch_size(self):\n return self._first_rgb.shape[0]", "def get_output_tensor_size(self, index):\n return self._engine.get_output_tensor_size(index)", "def batch_size(self):\n self.validate_shape_and_dtype()\n return self.rgb.shape[0]", "def outputSize(in_size, kernel_size, stride, padding):\n output = int((in_size - kernel_size + 2 * padding) / stride) + 1\n return output", "def num_layers(self): # -> int:\n ...", "def getOutputLength(self):\n return len(self.Y[0])", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if self.incoming_shape == self.scale_size:\n self.out = incoming\n else:\n self.out = resize2d(incoming, size=self.scale_size, method=self.method,\n align_corners=self.align_corners)\n if self.method_name == 'AREA':\n self.out = tf.stop_gradient(self.out)\n \n return self.out", "def output_shape(self):\r\n return self.detector.output_shape", "def num_layers(self):\n\n return 2 + self.num_hidden_layers", "def layer_size(self, layer_id): # -> int:\n ...", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def output_width(self):\n\t\treturn self.output_shape_param('W')", "def l_out_conv(layer_num, kernel_size, pool=False):\n l_out_list = []\n l_in = constants.SHAPE_OF_ONE_DATA_POINT[1]\n for i in range(layer_num):\n l_out = l_out_conv1d(l_in, kernel_size, stride=2)\n l_out = l_out_conv1d(l_out, kernel_size, stride=2)\n\n l_out_list.append(l_out)\n\n if pool:\n pool_size = 3\n l_out = l_out_pool(l_out, pool_size)\n l_out_list.append(l_out)\n l_in = l_out\n\n # make a copy and reverse for decoder size def\n\n l_out_list_copy = copy.deepcopy(l_out_list)\n l_out_list.append(32)\n encoder_sizes = l_out_list\n l_out_list_copy.reverse()\n l_out_list_copy.append(constants.SHAPE_OF_ONE_DATA_POINT[1])\n decoder_sizes = l_out_list_copy\n return encoder_sizes, decoder_sizes", "def get_hidden_layer_size(self):\r\n return self.hidden_layer_size", "def get_output_shape(self):\n return self.shape", "def get_out_dim(self) -> int:\n return self.out_dim", "def _input_size(self):\n return self.embedding_size + self.hidden_size", "def size_out(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_neurons", "def outdim(self):\n return len(self.getSensors())", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def output_length(self,\n inp_len: Optional[th.Tensor]) -> Optional[th.Tensor]:\n if self.last_choice is None:\n return inp_len\n if inp_len is None:\n return None\n return th.div(inp_len,\n self.src_sr[self.last_choice],\n rounding_mode=\"trunc\") * self.dst_sr[self.last_choice]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[0:2] + [self.n_units]]", "def output_height(self):\n\t\treturn self.output_shape_param('H')", "def get_output_shape(self):\n # Get shape of output tensor(s), which will be [samples, n_seq_pos+n_tickersteps, x, y, n_units]\n return [s if isinstance(s, int) and s >= 0 else -1\n for s in self.incoming_shape[0:2] + self.incoming_shape[2:-1] + [self.n_units]]", "def conv_to_fc_size(\n input_shape, conv_depth, pools,\n stride=[2, 2, 2], padding='SAME',\n dropout_keep_prob=1.0):\n h, w, d = input_shape\n if padding == 'SAME':\n for i in range(pools):\n h = math.ceil(float(h) / float(stride[0]))\n w = math.ceil(float(w) / float(stride[1]))\n d = math.ceil(float(d) / float(stride[2])) \n else:\n # 'VALID' padding\n pass\n \n return conv_depth * h * w * d", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def __len__(self):\n _, timesteps, height, width = self.data.shape\n height //= self.size\n width //= self.size\n\n if self.subset == 'train':\n out = self.length\n elif self.subset == 'all':\n out = height * width\n else:\n out = (height // 2) * (width // 2)\n\n if not self.time:\n out *= timesteps\n\n return out", "def _n_features_out(self):\n return self.components_.shape[0]", "def layers_compressed_size(self):\n # don't have this information at this point\n return None", "def layers_compressed_size(self):\n # don't have this information at this point\n return None", "def num_layers(self):\n return self._num_layers", "def output_size(self):\n raise NotImplementedError('This is an interface class, please use a derived instance')", "def calculate_shape_decreases_3D_Net(self, input_crop_size):\n cropsize_x, cropsize_y, cropsize_z = input_crop_size\n input_crop = torch.ones((1, cropsize_z, cropsize_x, cropsize_y))\n net_output, _ = self.forward_net(input_crop)\n _, outsize_z, outsize_y, outsize_x = net_output.size()\n\n return cropsize_x-outsize_x, cropsize_y-outsize_y, cropsize_z-outsize_z", "def output_dims(self) -> Optional[Tuple[int]]:\n return None", "def size(self):\n return self.num_inputs, self.num_outputs", "def compute_output_shape(self, input_shape):\n output_shape = [0] * self.rank\n for d in range(self.rank):\n output_shape[d] = sum(self.paddings[d]) + input_shape[d]\n return tf.TensorShape(output_shape)", "def get_layer_size(self, layer_ind):\n assert(layer_ind < self.num_layers)\n return self._layer_sizes[layer_ind]", "def output_mb(self):\n total_output_size = sum([t.shuffle_mb_written for t in self.tasks])\n return total_output_size", "def get_num_of_output_tensors(self):\n return self._engine.get_num_of_output_tensors()", "def compute_output_shape(self, input_shape):\n batch_size = input_shape[0]\n sequence_length = input_shape[1]\n return (batch_size, sequence_length)", "def size(self):\n\n frame = self.get_frame()\n\n # Unpack array dimensions\n height, width, layers = np.array(frame).shape\n\n return width, height", "def get_output_shape(self):\n return self.out.shape.as_list()", "def compute_output_shape(self, input_shape):\n if tf.keras.backend.image_data_format() == 'channels_first':\n return (input_shape[0][0], input_shape[0][1]) + input_shape[1][2:4]\n\n return (input_shape[0][0],) + input_shape[1][1:3] + (input_shape[0][-1],)", "def get_model_output_dimension(self):\r\n raise NotImplementedError()", "def __len__(self):\n # print(\"len: \" + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)\n return math.floor(len([name for name in os.listdir(self.imgs_dir) if\n os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)", "def get_frame_size(self):\n return self._frames.shape[-1]", "def __len__(self):\n return self.flat_image.size", "def size_out(self):\n return self.dimensions", "def size(self) -> tf.Tensor:", "def compute_output_shape(self, input_shape):\r\n return input_shape", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def get_num_hidden(self) -> int:\n return self.output_dim", "def __find_net_dims(self):\n\n input_params = INPUT_CHANNELS * INPUT_SIZE ** 2\n net_dims = [input_params]\n for w in self._conv_weights + self._lin_weights:\n net_dims.append(w.shape[0])", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def get_size(self):\n return self.get_params().shape[0]", "def target_size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"target_size\")", "def set_output_shape(self):\n self.output_shape = (reduce(mul, self.input_shape),)", "def get_height(self):\n height = 0\n for layer, ldata in self.conf['Layers'].items():\n layer_t = ldata['params']['thickness']\n height += layer_t\n return height", "def estimate_cudnn_parameter_size(input_size, hidden_size, direction):\n single_rnn_size = 8 * hidden_size + 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size)\n return direction * single_rnn_size", "def num_channels_per_output(cls) -> list[tuple[int, ...]]:\n return [\n (16, 24, 40, 112, 320),\n (16, 24, 40, 112, 320),\n (16, 24, 48, 120, 352),\n (24, 32, 48, 136, 384),\n (24, 32, 56, 160, 448),\n (24, 40, 64, 176, 512),\n (32, 40, 72, 200, 576),\n (32, 48, 80, 224, 640),\n (32, 56, 88, 248, 704),\n (72, 104, 176, 480, 1376),\n ]", "def compute_output_shape(self, input_shape):\n \n assert input_shape and len(input_shape) == 2\n return input_shape[0], self.n_clusters", "def layers_sizes(self):\n return iter([self.delta_h*l for l in range(int(self.h/self.delta_h)-1)])", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def get_output_shape(self):\n return self.incoming_shape", "def convert_size(g, op, block):\n\n input_x = g.get_node(op.input(\"Input\")[0])\n out = _op.ndarray_size(input_x, dtype=\"int64\")\n out = _op.expand_dims(out, axis=0)\n g.add_node(op.output(\"Out\")[0], out)", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)", "def get_output_shape(self):\n return [s if isinstance(s, int) and s >= 0 else -1 for s in self.incoming_shape[:-1]] + [self.n_units]" ]
[ "0.72754544", "0.70678294", "0.70496345", "0.7000903", "0.68878484", "0.6862838", "0.6813435", "0.6802265", "0.67503273", "0.6709293", "0.6696119", "0.6696119", "0.66828674", "0.66828674", "0.66640866", "0.6636642", "0.66120887", "0.65727687", "0.65476394", "0.6546968", "0.649225", "0.6488163", "0.647176", "0.6433944", "0.64070106", "0.63956326", "0.6384113", "0.63741004", "0.6338352", "0.6329885", "0.6321152", "0.6320208", "0.63181686", "0.63181686", "0.63181686", "0.63181686", "0.63181686", "0.6313435", "0.6313048", "0.62926173", "0.6286019", "0.626248", "0.62507516", "0.6235358", "0.6210238", "0.6205025", "0.61896455", "0.618706", "0.6169863", "0.6169863", "0.6169863", "0.61184937", "0.6089494", "0.6081189", "0.6043143", "0.60397446", "0.603698", "0.60339123", "0.60339123", "0.60296345", "0.6029119", "0.60265887", "0.60224426", "0.6015501", "0.60112196", "0.60099113", "0.60058385", "0.59925956", "0.5988687", "0.59501857", "0.5946426", "0.59398085", "0.5938301", "0.5928663", "0.5927673", "0.59269863", "0.5923864", "0.59217745", "0.5900508", "0.590011", "0.5895272", "0.5888301", "0.58877516", "0.5880676", "0.58731276", "0.58724433", "0.58674884", "0.58520764", "0.58492595", "0.5848929", "0.58446085", "0.58377725", "0.58377725", "0.58377725", "0.582256", "0.58151716", "0.58151716", "0.5811239" ]
0.6926029
6
Forward pass through network.
def forward(self, input_x) -> Tensor: conv_out = self.conv(input_x).view(input_x.size()[0], -1) return self.head(conv_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self):\n pass", "def forward(self):\n pass", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward_pass(self):", "def forward(self, forward):\n\n self._forward = forward", "def fastforward(self):\n self.run_command('fastforward')", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def forward(self, x):\n return self.net(x)", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self, x):\n pass", "def forward(self, *args, **kwargs):\n pass", "def forward(self, x_in):\r\n # x_out = torch.zeros_like(x_in)\r\n\r\n for layer in self.layers: #Call forward function of each layer in order\r\n x_out = layer.forward(x_in)\r\n # print(\"Forward pass Seq: \", layer, x_in, x_out)\r\n x_in = x_out # output of the layer is passed as input to the next layer\r\n self.temp = x_in\r\n return x_out", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\toutput = self._layers[0].forward(x)\n\t\tfor i in range(1, len(self._layers)):\n\t\t\toutput = self._layers[i].forward(output)\n\t\treturn output\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def forward(p):\n try:\n if IP in p and p[IP].dst == RD_ADRRESS and p[Ether].src != GW_MAC_ADRRESS and p[Ether].dst == GW_MAC_ADRRESS:\n if p[IP].src not in black_list:\n send(p[1::], iface=IFACE, verbose=0)\n except:\n print(\"error in forward\")\n finally:\n sys.exit()", "def forward(self, x):\r\n out = x + self.conv_block(x) # add skip connections\r\n return out", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def forward(self, x):\n out = x + self.conv_block(x) # add skip connections\n return out", "def base_forward(self, x):\r\n pass", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def go_forward(self):\n command = _build_robovac_command(RobovacModes.GO_FORWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)", "def forward(self) -> None:\n self.system.notify(\"Jarvis::Forward\")\n self.media.fast_forward()", "def forward(self)->None:", "def forward(self, x, **kwargs):\n pass", "def forward_graph(self):\n raise NotImplementedError", "def forward(self, output, target):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self):\n self.position += 1", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, *args):\n raise NotImplementedError", "def network_forward(self, X):\n \n #############################################################################\n # TODO: Perform a forward pass on the network and store the caches of #\n # each layer inside the cache_list #\n #############################################################################\n ActivationFunction = None\n if self.hidden_activation_fn == \"sigmoid\":\n ActivationFunction = lambda x: self.sigmoid_forward(x)\n elif self.hidden_activation_fn == \"tanh\":\n ActivationFunction = lambda x: self.tanh_forward(x)\n elif self.hidden_activation_fn == \"relu\":\n ActivationFunction = lambda x: self.relu_forward(x)\n\n Layer1Value, cacheL1 = self.fully_connected_forward(X, self.params[\"W1\"], self.params[\"b1\"])\n Layer1ValueActivation, cacheL1A = ActivationFunction(Layer1Value)\n scores, cacheL2 = self.fully_connected_forward(Layer1ValueActivation, self.params[\"W2\"], self.params[\"b2\"])\n\n # Cache\n cache_list =[cacheL1, cacheL1A, cacheL2]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return scores, cache_list", "def forward(self, states):\n raise NotImplementedError()", "def move_forward(power):\n message = \"FORWARD:\" + str(power) + '\\n'\n sock.sendall(message)\n return", "def forward(network, X):\r\n activations = []\r\n input = X\r\n for i in range(len(network)):\r\n activations.append(network[i].forward(X))\r\n X = network[i].forward(X)\r\n \r\n assert len(activations) == len(network)\r\n return activations", "def forward(self, *args, **kwargs):\n raise NotImplementedError", "def forward(self, input_tensor: torch.Tensor):\n self.network_output = self.network.forward(input_tensor.type(self.data_type))\n return self.network_output", "def forward_once(self, x):\n\t\t#x = F.normalize(self.network(x), p=2)\n\t\tx = self.network(x)\n\t\treturn x", "def forward(self):\n self.img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)", "def _forward(self, x):\n global global_epoch\n global_epoch += 1\n bias = -np.ones((x.shape[0], 1))\n tail = np.zeros((x.shape[0], self.dim_hid+self.dim_out))\n nodes = np.concatenate((bias, x, tail), axis=1)\n weight = self.weight * self.connectivity\n for i in range(self.dim_in, self.dim_in+self.dim_hid+self.dim_out):\n net = nodes.dot(weight[i])\n nodes[:,i] = self.__sigmoid(net)\n nodes[:,self.dim_in:self.dim_in+self.dim_hid] *= self.hidden\n return nodes", "def fastforward_all():\n\twhile _running:\n\t\t_running[0].fastforward(noerror=True)", "def forward(self, input):\n raise NotImplementedError()", "def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')", "def move_forward():\n twister = Twist(linear=Vector3(x=0.5,y=0,z=0),angular=Vector3(x=0,y=0,z=0))\n pub.publish(twister)", "def forward(self, srcip, packet): #gets entire packet and srcip of that packet\n # get route to send packet\n best_route = self.get_route(srcip, packet[DEST]) #is a socket\n\n sock = best_route\n\n\n jsonpack = json.dumps(packet)\n sock.sendall(jsonpack.encode())\n # TODO fix src and dest\n return True", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def test_forward(self):\n validate_forward()", "def forward(self, *args, **kwargs):\n\n raise NotImplementedError()", "def forward(self, srcif, packet):\n # packet is already decoded\n def send_no_route():\n send_src = srcif[:-1]\n send_src += '1'\n self.sockets[srcif].send(json.dumps({\n SRCE: send_src,\n DEST: packet[SRCE],\n TYPE: NRTE,\n MESG: {}\n }).encode())\n # GEt correct route.\n sock_addr = self.get_route(srcif, packet[DEST])\n\n # If no route available, send no route message back\n if sock_addr == None:\n send_no_route()\n else:\n sock = self.sockets[sock_addr]\n # If socket is available, send to proper neighbor.\n sock.send(json.dumps(packet).encode())\n return False", "def forward(self, speed):\n self.controller.forward(speed)", "def forward(self, srcif, packet) -> bool:\n chosen_route = self.get_route(srcif, packet[DEST])\n if chosen_route is None:\n return False\n self.sockets[chosen_route[PEER]].sendall(json.dumps(packet).encode())\n return True", "def forward(self, inputs, outputs):\n super(copy, self).adjoint(inputs, outputs)", "def forward(self, s):", "def forward(self, srcif, packet): \n # TODO: will need to massively update this \n #print(\"PACKET FROM DATA: {0}\".format(packet))\n #print(\"ROUTING TABLE IS: {0}\".format(self.routes))\n dest = packet[\"dst\"]\n chosen_router = self.get_route(srcif, dest)\n if chosen_router is None:\n return False\n\n #TODO implement most specific route and business routes\n outroutes = []\n #print(\"CHOSEN ROUTER ISSSSSSSSSSSSSSSSSS\", chosen_router) \n #print(\"THIS IS FOR FORWARD:\", json.dumps(packet).encode(\"ascii\"))\n chosen_router.send(json.dumps(packet).encode(\"ascii\"))\n #return may need to be changed \n return True", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, input, context, state):\n raise NotImplementedError", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def forward_train(self, *args, **kwargs):\n pass", "def step_forward(self):\n if self.state_num < len(self.steps):\n print(\"\\nStepping forward to state %d.\" % int(self.state_num + 1))\n self.state_string[0] = \"Stepping forward to state \" + str(self.state_num + 1) + \".\"\n # Get process and resource involved.\n process = self.steps[self.state_num][0]\n resource = self.steps[self.state_num][2]\n # Is this a request?\n if self.steps[self.state_num][1]:\n print(\"Process %d requests resource %d.\" % (process, resource))\n self.state_string[1] = \"Process \" + str(process) + \" requests resource \" + str(resource) + \".\"\n # Is the resource not being used by a process?\n if self.available[resource] > 0:\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][process] += 1\n # Make resource unavailabe.\n self.available[resource] -= 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = process\n else:\n # Mark in request matrix the relationship between resource and process.\n self.request_edges[resource][process] += 1\n # Add our process to the graph and make a directed edge.\n if process not in self.graph:\n self.graph.add_vertex(process)\n if self.connected_v[resource] not in self.graph:\n self.graph.add_vertex(self.connected_v[resource])\n if not self.graph.does_edge_exist(process, self.connected_v[resource]):\n self.graph.add_edge(process, self.connected_v[resource])\n print(\"p{:d} --> p{:d}\".format(process, self.connected_v[resource]))\n else:\n print(\"Process %d releases resource %d.\" % (process, resource))\n self.state_string[0] = \"Process \" + str(process) + \" releases resource \" + str(resource) + \".\"\n # Remove connection in hold matrix.\n self.hold_edges[resource][process] -= 1\n # Does another process want this resource?\n if np.count_nonzero(self.request_edges[resource]) > 0:\n # Get next process that wants the resource.\n new_process = self.request_edges[resource].index(1)\n # Mark in hold matrix the relationship between resource and process.\n self.hold_edges[resource][new_process] += 1\n # Store the process ID that holds the resource.\n self.connected_v[resource] = new_process\n # Remove connection in request matrix.\n self.request_edges[resource][new_process] -= 1\n # Delete edge if it exists.\n if self.graph.does_edge_exist(new_process, self.connected_v[resource]):\n self.graph.delete_edge(new_process, self.connected_v[resource])\n print(\"Process %d now has resource %d.\" % (new_process, resource))\n self.state_string[1] = \"Process \" + str(new_process) + \" now has resource \" + str(resource) + \".\"\n else:\n print(\"Resource %d is now available.\" % resource)\n self.state_string[1] = \"Resource \" + str(resource) + \" is now available.\"\n # Mark resource as unowned by a process.\n self.available[resource] += 1\n # Empty process that owned the resource previously.\n self.connected_v[resource] = None\n # Advance the state.\n self.state_num += 1", "def forward(self, adj, z, n_nodes):\n x = z.repeat(n_nodes, 1)\n sequence = self.gcn(x, adj)\n\n return sequence", "def forward(self, x, mask):\n \"Follow Figure 1 for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)", "def test_propagate_forward(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n nn.propagate_forward([2, 3], test=True)\n model_output = nn.layers[-1].nodes[0].value\n\n self.assertEqual(round(model_output, 3), 0.823)", "def forward(self, x):\n residues = []\n # Downward Pass\n x = self.layers[0](x.unsqueeze(1))\n for layer in self.layers[1:self.half]:\n x = layer(x)\n residues.insert(0, x)\n\n # Upward Pass\n for idx, layer in enumerate(self.layers[self.half:(len(self.layers)-1)]):\n x = layer(x, residues[idx])\n x = self.layers[-1](x)\n\n return(x)", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self, *inputs):\n raise NotImplementedError", "def move_forward():\n pass", "def step_forward(self):", "def forward(self, x):\n x = self._activation(self.fully_connected_1(x))\n x = self._activation(self.fully_connected_2(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_3(x))\n x = self._activation(self.fully_connected_4(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_5(x))\n return self.fully_connected_out(x)", "def forward_train(self, *args, **kwargs):\n raise NotImplementedError('This interface should not be used in current training schedule. Please use `train_step` for training.')", "def feed_forward(self):\n pre = self.pre_layer.o\n self.post_layer.i = torch.matmul(pre, self.weight)", "def move_forward(self):\n self.x, self.y = self.compute_positions()", "def _forward(self, x, X, upto=None):\n if upto is not None: # cannot use 'if upto' here since it is 0-indexed\n # and layer0 is the first layer\n assert 0<=upto<=self._layer_counter\n counter = upto + 1\n else: counter = self._layer_counter\n\n y_previous, Y_previous = x, X\n # TODO: because we always need to compute F_i(X) at each layer i, this\n # is a huge overhead\n # feedforward\n for i in range(counter):\n layer = getattr(self, 'layer'+str(i))\n y, Y = layer(y_previous, Y_previous), layer(Y_previous, Y_previous)\n y_previous, Y_previous = y, Y\n\n return y", "def forward(source, destination):\n string = b' '\n while string:\n string = source.recv(10240)\n if string:\n destination.sendall(string)\n else:\n destination.shutdown(socket.SHUT_WR)\n try:\n source.shutdown(socket.SHUT_RD)\n except socket.error as ex:\n if ex.errno not in (57, 107): # pragma: no cover\n # socket.error: [Errno 57] Socket is not connected\n # error: [Errno 107] Transport endpoint is not connected\n raise", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)", "def go_forward(net):\n global w, back_loss, loss, l2_loss\n start_forward_time = time.time()\n\n # feed in data\n P = net(w).t()\n\n # calculate loss\n Y = P.mv(X)\n Ybar = Y.mean()\n back_loss = (Y - Ybar).norm(1) / (J)\n loss = back_loss / Ybar\n l2_loss = ((Y - Ybar).norm(2) ** 2) / (J * Ybar)\n\n return time.time() - start_forward_time", "def forward_pass(self, inputs):\n self._rbf_forward(inputs)\n self._slp_forward()\n return self.slp_outputs", "def forward(self, inputs):\n raise NotImplementedError", "def forward_batch(self,batcher, phase=0):\n pass", "def forward(self, x):\n # define feedforward behavior, applying activations as necessary\n out = self.leaky_relu(self.conv1(x))\n out = self.leaky_relu(self.conv2(out))\n out = self.leaky_relu(self.conv3(out))\n out = self.leaky_relu(self.conv4(out))\n\n out = self.res_blocks(out)\n\n out = self.leaky_relu(self.deconv1(out))\n out = self.leaky_relu(self.deconv2(out))\n out = self.leaky_relu(self.deconv3(out))\n\n # tanh applied to last layer\n out = F.tanh(self.out_layer(out))\n out = torch.clamp(out, min=-0.5, max=0.5)\n\n return out", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def forward(self, x):\n c_out = self.conv_net.forward(x)\n\n c_out_flat = c_out.flatten(start_dim=1)\n \n \n return self.linear.forward(c_out_flat)", "def adjoint(self, inputs, outputs):\n super(copy, self).forward(inputs, outputs)", "def feedForward(self, inputs):\n\n\t\tinputs = np.atleast_1d(inputs)\n\n\t\tif not len(inputs) == self.nInputs:\n\n\t\t\traise ValueError(\"The input vector is the wrong length for this network\")\n\n\t\t#don't forget we have a bias unit in here too\n\t\tfor i in range(1,self.nInputs+1):\n\t\t\tself.inputLayer[i].activation = inputs[i-1]\n\t\t\tself.inputLayer[i].output = inputs[i-1]\t\t\t\n\n\t\tfor layer in self.hiddenLayers:\n\n\t\t\tfor unit in layer:\n\n\t\t\t\tunit.forwardValue()\n\n\t\tfor unit in self.outputLayer:\n\t\n\t\t\tunit.forwardValue()", "def forward(self, X, training=False):\n pass", "def forward(self, state, action):\n x = torch.cat((state, action), dim=1)\n return self.net(x)", "def forward(self, x):\n x = self.input(x)\n x = self.in0(x)\n x = self.block0(x) + x\n x = self.block1(x) + x\n x = self.block2(x) + x\n x = self.block3(x) + x\n x = self.block4(x) + x\n x = self.in0(x)\n\n out = self.out(x)\n\n return out" ]
[ "0.7486405", "0.7486405", "0.72931826", "0.72931826", "0.72931826", "0.72568643", "0.71754724", "0.70931304", "0.70689535", "0.7054133", "0.69913656", "0.6969786", "0.69356275", "0.69356275", "0.69356275", "0.6921335", "0.6920985", "0.6747466", "0.6711534", "0.67010707", "0.66525286", "0.6641545", "0.6623176", "0.6617483", "0.6617483", "0.6617483", "0.66074246", "0.6604059", "0.6589724", "0.6578781", "0.6560978", "0.6535647", "0.65293694", "0.65143543", "0.65041804", "0.65041804", "0.64726806", "0.6471294", "0.6471294", "0.6459609", "0.6454982", "0.6453049", "0.6440824", "0.6428473", "0.6410913", "0.6408422", "0.64007837", "0.63997585", "0.63960415", "0.63766634", "0.63728476", "0.6368438", "0.636231", "0.63491744", "0.63488925", "0.6317315", "0.6317315", "0.63163906", "0.63152647", "0.6309884", "0.6307033", "0.62857485", "0.6271954", "0.62711734", "0.62630504", "0.6253357", "0.6253357", "0.62506914", "0.6249723", "0.6248265", "0.62186116", "0.620725", "0.6206563", "0.619249", "0.61901283", "0.61656743", "0.61607224", "0.6154688", "0.6152447", "0.6138764", "0.6131553", "0.61183935", "0.61165875", "0.6115646", "0.61136824", "0.61108863", "0.61102486", "0.6110234", "0.6106236", "0.6099183", "0.6097814", "0.6097745", "0.6095042", "0.6089835", "0.6052115", "0.6044877", "0.60431135", "0.60409063", "0.6040325", "0.6039296", "0.603644" ]
0.0
-1
Initializes or resets the paramseter of the layer.
def reset_parameters(self) -> None: std = math.sqrt(3 / self.in_features) self.weight.data.uniform_(-std, std) self.bias.data.uniform_(-std, std)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def reset_parameters(self):\n init_method = getattr(init, self.initialization)\n for layer in range(self.num_layers):\n fc = self.get_fc(layer)\n init_method(fc.weight.data)\n if self.use_bias:\n init.constant(fc.bias.data, val=0)\n init_method(self.out.weight.data)\n init.constant(self.out.bias.data, val=0)", "def reset_parameters_lecun(self, param_init=0.1):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for conv_layer in [self.pointwise_conv1, self.pointwise_conv2, self.depthwise_conv]:\n for n, p in conv_layer.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def reset_parameters_lecun(self, param_init=0.1):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def initialize(self):\n self.conv1.reset_parameters()\n self.conv2.reset_parameters()", "def reset_parameters(self):\n\n for layer in self.layers:\n layer.reset_parameters()", "def reset_parameters(self):\n self.conv_in.reset_parameters()\n self.conv_out.reset_parameters()\n if self.lin is not None:\n self.lin.reset_parameters()", "def reset_parameters(self):\n # for item in self.layer_dict.children():\n self.encoder.reset_parameters()\n self.vq.reset_parameters()\n self.generator.reset_parameters()\n\n self.speaker_dict.reset_parameters()\n self.speaker_dense.reset_parameters()", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if p.dim() == 1:\n nn.init.constant_(p, 0.0)\n logger.info('Initialize %s with %s / %.3f' % (n, 'constant', 0.0))\n elif p.dim() == 2:\n nn.init.uniform_(p, a=-param_init, b=param_init)\n logger.info('Initialize %s with %s / %.3f' % (n, 'uniform', param_init))\n else:\n raise ValueError(n)", "def reset_params(self):\n for pp in self.params:\n if 'optimizer_param' in pp.tags:\n pp.set_value(np.zeros(pp.get_value(borrow=True).shape, dtype=theano.config.floatX))", "def _reset_parameters(self) -> None:\n self._setup_input = {\n \"P\": csc_matrix(2.0 * self.opt.P(self.p).toarray()),\n \"q\": self.opt.q(self.p).toarray().flatten(),\n }\n if self.opt_type in CONSTRAINED_OPT:\n A = self.opt.A(self.p)\n b = self.opt.b(self.p)\n self._setup_input[\"A\"] = csc_matrix(\n cs.vertcat(self.opt.M(self.p), A, -A).toarray()\n )\n self._setup_input[\"l\"] = (\n cs.vertcat(-self.opt.c(self.p), -b, b).toarray().flatten()\n )", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if 'conv' in n:\n continue\n init_with_uniform(n, p, param_init)", "def restore_init_param_dict(self):\n self.param_dict = self._init_param_dict\n self._set_primary_behaviors()", "def __init__( self, parameters={} ):\n self.params = {}\n self.reset(parameters)", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def resetParams(self):\n self.prediction = cons.init_pred # Classifier payoff - initialized to a constant initial payoff value\n self.error = cons.init_err # Classifier error - initialized to a constant initial error value\n self.fitness = cons.init_fit # Classifier fitness - initialized to a constant initial fitness value", "def reset_parameters(self):\n self.embedding.reset_parameters()\n self.init_embedding()", "def set_params(self):\r\n pass", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_uniform(n, p, param_init)", "def reset_state(self):\n \n dvals = [float(val) for val in self.base_param.values()]\n self._parent.set_parameters(dvals)\n super(type(self._parent), self._parent).run_iteration()", "def reset_parameters(self,z0):\n self._set_z0(z0)\n self._set_parameters()", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with kaiming_uniform style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if p.dim() == 1:\n nn.init.constant_(p, 0.0)\n logger.info('Initialize %s with %s / %.3f' % (n, 'constant', 0.0))\n elif p.dim() in [2, 4]:\n nn.init.kaiming_uniform_(p, mode='fan_in', nonlinearity='relu')\n logger.info('Initialize %s with %s / %.3f' % (n, 'kaiming_uniform', param_init))\n else:\n raise ValueError(n)", "def set_params(self):\n raise NotImplementedError", "def reset_parameters(self):\n logger.info('===== Initialize %s =====' % self.__class__.__name__)\n nn.init.normal_(self.embed.weight, mean=0.0, std=self.d_model ** -0.5)\n nn.init.constant_(self.embed.weight[self.pad], 0)\n if self.output is not None and not self.tie_embedding:\n nn.init.xavier_uniform_(self.output.weight)\n nn.init.constant_(self.output.bias, 0.0)", "def reset_parameters(self):\n self.lin.reset_parameters()\n self.att.reset_parameters()\n self.gnn_score.reset_parameters()\n if self.gnn_intra_cluster is not None:\n self.gnn_intra_cluster.reset_parameters()\n self.select.reset_parameters()", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if 'score.monotonic_energy.v.weight_g' in n or 'score.monotonic_energy.r' in n:\n logger.info('Skip initialization of %s' % n)\n continue\n if 'score.monotonic_energy.conv1d' in n:\n logger.info('Skip initialization of %s' % n)\n continue\n if 'score.chunk_energy.v.weight_g' in n or 'score.chunk_energy.r' in n:\n logger.info('Skip initialization of %s' % n)\n continue\n if 'linear_lm_gate.fc.bias' in n and p.dim() == 1:\n nn.init.constant_(p, -1.0)\n logger.info('Initialize %s with %s / %.3f' % (n, 'constant', -1.0))\n continue\n init_with_uniform(n, p, param_init)", "def restore_parameters(self):\n for p in self.parameters:\n setattr(self, p, self.parameters[p].init_value)\n self.set_symmetry()", "def _reset_params(self):\n for p in self.parameters():\n if p.dim() > 1:\n torch.nn.init.xavier_normal_(p)", "def resetparams(self, parameters):\n self.weights = None\n try:\n self.params = parameters\n except AttributeError:\n # Variable self.params does not exist, so not updated\n # Create an empty set of params for future reference\n self.params = {}", "def reset_parameters(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)", "def reset_parameters(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)", "def reset_parameters(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)", "def set_parameters(self):\n\n if self.model_with_set_params:\n return\n\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self.geometry)\n self.model = self._model_with_set_params", "def __init__(self):\n self.layer_scope = None\n self.out = None", "def initializeParameters(self):\n\n self.params[2].value = False\n self.params[3].enabled = False\n self.params[7].value = True\n self.params[7].enabled = False\n self.params[8].value = None\n self.params[8].enabled = False", "def reset_parameters(self):\n self.lstm.reset_parameters()", "def _reset_parameters(self):\n self._solver_input[\"P\"] = cvxopt.matrix(2.0 * self.opt.P(self.p).toarray())\n self._solver_input[\"q\"] = cvxopt.matrix(self.opt.q(self.p).toarray().flatten())\n if self.opt_type in CONSTRAINED_OPT:\n if self.opt.nk > 0:\n self._solver_input[\"G\"] = cvxopt.matrix(-self.opt.M(self.p).toarray())\n self._solver_input[\"h\"] = cvxopt.matrix(\n self.opt.c(self.p).toarray().flatten()\n )\n if self.opt.na > 0:\n self._solver_input[\"A\"] = cvxopt.matrix(self.opt.A(self.p).toarray())\n self._solver_input[\"b\"] = cvxopt.matrix(-self.opt.b(self.p).toarray())", "def _reset_parameters(self):\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def _set_init_param_dict(self):\n\n self.param_dict = {}\n\n try:\n suppress_warning = self._suppress_repeated_param_warning\n except AttributeError:\n suppress_warning = False\n msg = (\"\\n\\nThe param_dict key %s appears in more than one component model.\\n\"\n \"This is permissible, but if you are seeing this message you should be sure you \"\n \"understand it.\\nIn particular, double-check that this parameter does not have \"\n \"conflicting meanings across components.\\n\"\n \"\\nIf you do not wish to see this message every time you instantiate, \\n\"\n \"simply attach a _suppress_repeated_param_warning attribute \\n\"\n \"to any of your component models and set this variable to ``True``.\\n\")\n\n for component_model in self.model_dictionary.values():\n\n if not hasattr(component_model, 'param_dict'):\n component_model.param_dict = {}\n intersection = set(self.param_dict) & set(component_model.param_dict)\n if intersection != set():\n for key in intersection:\n if suppress_warning is False:\n warn(msg % key)\n\n for key, value in component_model.param_dict.iteritems():\n self.param_dict[key] = value\n\n self._init_param_dict = copy(self.param_dict)", "def init_params(self):\n blah", "def reset_parameters(self):\n for item in self.components.values():\n try:\n item.reset_parameters()\n except:\n pass", "def _set_params(self,x):\r\n self.k._set_params(x)", "def initialize_params(self, params):\n pass", "def reset_parameters(self):\n self.rnn.reset_parameters()\n self.action_lookup.reset_parameters()\n\n # self.state_dict_lookup.reset_parameters()\n self.own_c_lookup.reset.parameters()\n self.own_s_lookup.reset_parameters()\n\n self.th_1_lookup.reset_parameters()\n self.th_2_lookup.reset_parameters()\n self.th_3_lookup.reset_parameters()\n self.f_1_lookup.reset.parameters()\n self.f_2_lookup.reset_parameters()\n self.f_3_lookup.reset_parameters()\n self.f_4_lookup.reset_parameters()\n\n self.bu_msg_lookup.reset_parameters()\n\n self.i_t_lookup.reset_parameters()\n self.lives_lookup.reset_parameters()\n\n self.prev_action_lookup.reset_parameters()\n # self.message.apply(weight_reset)\n self.output.apply(weight_reset)\n for p in self.rnn.parameters():\n p.data.uniform_(*self.init_param_range)", "def __init__(self):\n self._params = None", "def reset_parameters(self) -> None:\n for name, param in self.named_parameters():\n if not (name == 'word_embedding.weight' and self.use_pretrained_embeddings):\n nn.init.normal(param, std=0.1)", "def set_default_parameters(self):\n super().set_default_parameters()", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def reset_layer(self):\n if self.W is None:\n if self.sparse_initialize:\n W_values = self.sparse_initialize_weights()\n else:\n if self.activation == theano.tensor.tanh:\n born = np.sqrt(6. / (self.n_in + self.n_out))\n else:\n born = 4 * np.sqrt(6. / (self.n_in + self.n_out))\n W_values = np.asarray(self.rng.uniform(\n low=-born,\n high=born,\n size=(self.n_in, self.n_out)),\n dtype=theano.config.floatX)\n\n self.W = theano.shared(value=W_values, name='W', borrow=True)\n\n if self.b is None:\n b_values = np.zeros(int(self.n_out/self.num_pieces),\n dtype=theano.config.floatX)\n self.b = theano.shared(value=b_values, name='b', borrow=True)\n\n if self.sparser is None:\n s_values = np.ones(\n int(self.n_out/self.num_pieces), dtype=theano.config.floatX)\n self.sparser = theano.shared(value=s_values, name='sparser',\n borrow=True)\n # The layer parameters\n self.params = [self.W, self.b]", "def _resetParam(self,scn,context):\n\n\t\tif self.camipo:\n\t\t\tself.cam.ipo = self.camipo\n\n\t\tcontext.renderPath = self.path\n\t\tself.cam.lens = self.lens\n\t\tself.cam.scale = self.scale\n\t\tself.cam.shiftX = self.shiftX\n\t\tself.cam.shiftY = self.shiftY\n\t\tself.scn.update()\n\t\treturn", "def gather_params(self):\n for layer in self.layers:\n for name, value in layer.params.iteritems():\n self.params[name] = value", "def reset_parameters(self):\n self.apply(ixvr)", "def reset_parameters(self):\n self.apply(ixvr)", "def reset(self):\n self.params.resetParams()", "def reset_parameters(self, reset_mode='glorot_uniform'):\n\n if reset_mode == 'glorot_uniform':\n if self.weight_decomp == 'block':\n nn.init.xavier_uniform_(self.blocks, gain=nn.init.calculate_gain('relu'))\n elif self.weight_decomp == 'basis':\n nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu'))\n else:\n nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))\n\n if self.bias is not None:\n torch.nn.init.zeros_(self.bias)\n elif reset_mode == 'schlichtkrull':\n if self.weight_decomp == 'block':\n nn.init.xavier_uniform_(self.blocks, gain=nn.init.calculate_gain('relu'))\n elif self.weight_decomp == 'basis':\n nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu'))\n else:\n nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu'))\n\n if self.bias is not None:\n torch.nn.init.zeros_(self.bias)\n elif reset_mode == 'uniform':\n stdv = 1.0 / math.sqrt(self.weights.size(1))\n if self.weight_decomp == 'block':\n self.blocks.data.uniform_(-stdv, stdv)\n elif self.weight_decomp == 'basis':\n self.bases.data.uniform_(-stdv, stdv)\n self.comps.data.uniform_(-stdv, stdv)\n else:\n self.weights.data.uniform_(-stdv, stdv)\n\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n else:\n raise NotImplementedError(f'{reset_mode} parameter initialisation method has not been implemented')", "def setParameters(self, params):\n self.module._setParameters(params)\n # update parameters for learner\n self.learner.setModule(self.module)", "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n if parameter == 'predictor':\n if isinstance(value, chainer.Link):\n del self.predictor\n with self.init_scope():\n self.predictor = value\n else:\n assert False, 'predictor is not Chain instance'\n elif parameter in ['lossfun', 'accfun', 'device']:\n setattr(self, parameter, value)\n else:\n self.sk_params.update({parameter: value})\n return self", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def reset_params(self):\n self.blur = -1\n self.closing = -1\n self.thresh = -1", "def _set_training_params(self, params):\n self.lyapunov_hybrid_system.lyapunov_relu.load_state_dict(\n params[\"lyap_relu_params\"])\n if not self.R_options.fixed_R:\n self.R_options._variables = params[\"R_params\"].clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n self.lyapunov_hybrid_system.system.controller_network.\\\n load_state_dict(params[\"controller_params\"])", "def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()", "def set_hyperparams(self, params):", "def setParams(self, paramSet):\r\n pass", "def reset_parameters(self):\n \n for i in range(self.num_layers):\n getattr(self, 'LSTMCell%i'%(i+1)).reset_parameters()", "def _reset(self):\n self._logger.info(\"Parameters before saving.\")\n self.inspect_state()\n self._alpha_guide_prior_params = None\n self._param_store = None\n self._logger = None", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def reset_parameters(self, initializer=torch.nn.init.normal_):\n for p in self.parameters():\n if p.requires_grad:\n initializer(p)\n return self", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def resetparams(self, parameters):\n try:\n utils.update_dictionary_items(self.params,parameters)\n except AttributeError:\n # Variable self.params does not exist, so not updated\n # Create an empty set of params for future reference\n self.params = {}", "def reset_parameters(self) -> None:\n for gnn_block in self.gnn_blocks:\n gnn_block.reset_parameters()", "def reset_parameters(self, p: Dict[str, ArrayType]) -> None:\n self.p = self.opt.parameters.dict2vec(p)\n self._p_dict = self.opt.parameters.vec2dict(self.p)", "def __init__( self, parameters={} ):\n self.params = {}\n self.reset(parameters)\n self.cost_data = None", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def reset() -> None:\n Parameter.by_name = {}", "def _reset_parameters(self):\n if self.cfg.initial_forget_bias is not None:\n self.hindcast_lstm.bias_hh_l0.data[self.cfg.hidden_size:2 * self.cfg.hidden_size] = self.cfg.initial_forget_bias", "def _init_layers(self) -> None:\n weight_nums, bias_nums = [], []\n for i in range(self.num_layers):\n if i == 0:\n weight_nums.append((self.in_channels + 2) * self.feat_channels)\n bias_nums.append(self.feat_channels)\n elif i == self.num_layers - 1:\n weight_nums.append(self.feat_channels * 1)\n bias_nums.append(1)\n else:\n weight_nums.append(self.feat_channels * self.feat_channels)\n bias_nums.append(self.feat_channels)\n\n self.weight_nums = weight_nums\n self.bias_nums = bias_nums\n self.num_params = sum(weight_nums) + sum(bias_nums)", "def reset(self):\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k],\n initializer=self.initializer)", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def __init__(self, **kwargs):\n base.Layer.__init__(self, **kwargs)\n self._num_output = self.spec.get('num_output', 0)\n if self._num_output <= 0:\n raise base.InvalidLayerError(\n 'Incorrect or unspecified num_output for %s' % self.name)\n self._reg = self.spec.get('reg', None)\n self._filler = self.spec.get('filler', None)\n self._weight = base.Blob(filler=self._filler)\n self._has_bias = self.spec.get('bias', True)\n if self._has_bias:\n self._bias_filler = self.spec.get('bias_filler', None)\n self._bias = base.Blob(filler=self._bias_filler)\n self._param = [self._weight, self._bias]\n else:\n self._param = [self._weight]", "def _reset_parameters(self):\r\n\t\tfor p in self.parameters():\r\n\t\t\tif p.dim() > 1:\r\n\t\t\t\txavier_uniform_(p)", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k * 2],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k * 2],\n initializer=self.initializer)", "def set_parameters(self, params):\n self.kp = params.pgain", "def init_params(self):\n self.conv = Conv(self.conv_layers[0][-1], self.out_channels, padding=self.padding,stride=self.stride)\n self.W = torch.randn(self.num_labels, self.cout_numel, requires_grad=True)\n self.T = torch.randn(self.num_labels, self.num_labels, requires_grad=True)", "def set_params(self, **kwargs):\n ...", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)", "def reset_parameters(self):\n logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_like_transformer_xl(n, p, std=0.02)", "def set_params(self, params):", "def preinitialize(self):\n for group in self.param_groups:\n for p in group['params']:\n if group['momentum'] != 0:\n self.state[p][\"momentum_buffer\"] = torch.zeros_like(\n p, device=\"cpu\"\n ).to(p.device)", "def reset_parameters(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def reset_parameters(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def set_params(self, **kwargs) -> NoReturn:\n pass", "def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()", "def __init__(self):\n self.layers = []\n self.best_loss = None", "def initialize_parameters(self):\n for i in range(1, self.L):\n self.W[i - 1] = np.random.randn(self.layer_dims[i], self.layer_dims[i - 1]) * 0.01\n self.b[i - 1] = np.zeros((self.layer_dims[i], 1))", "def set_parameters(self, **kwargs):\n kwargs.pop('population_size', None)\n super().set_parameters(population_size=1, **kwargs)\n self.candidates = None", "def init_parameters(self):\n stdv = 1. / math.sqrt(self.weight.data.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)", "def add_reset(self, name):\n if not 'reset_dict' in self.__dict__.keys():\n self.reset_dict = {}\n current_params = [p.get_value() for p in self.params]#lasagne.layers.get_all_param_values(self.layer)\n updates = {p:p0 for p, p0 in zip(self.params,current_params)}\n reset_fn = theano.function([],None, updates=updates)\n # \n self.reset_dict[name] = reset_fn", "def _init_model_params(self):\n super()._init_model_params()\n\n if 'e' in self.init_params:\n if self.init_type == 'uniform':\n if self.nr_no_train_de == 0:\n self.B = [\n np.full(\n (self.n_states, self.n_features[i]), 1.0 / self.n_features[i])\n for i in range(self.n_emissions)\n ]\n else:\n check_if_attributes_set(self, attr='e')\n else:\n if self.nr_no_train_de == 0:\n self.B = [\n np.random.rand(self.n_states, self.n_features[i])\n for i in range(self.n_emissions)\n ]\n for i in range(self.n_emissions):\n normalise(self.B[i], axis=1)\n\n else:\n check_if_attributes_set(self, attr='e')" ]
[ "0.75554377", "0.7497733", "0.74958974", "0.73909175", "0.73410285", "0.7286859", "0.7208283", "0.71635944", "0.7134587", "0.71306086", "0.71203345", "0.71100825", "0.71006113", "0.70917517", "0.7006945", "0.69947183", "0.69425595", "0.69156086", "0.6810136", "0.6797556", "0.6797429", "0.67745006", "0.6760542", "0.67542905", "0.6747125", "0.6746427", "0.67459446", "0.6726911", "0.67085314", "0.6708472", "0.6708472", "0.6708472", "0.6699847", "0.669609", "0.665998", "0.66129434", "0.6609195", "0.6583799", "0.65659916", "0.6565928", "0.65546876", "0.65473944", "0.65459263", "0.65455216", "0.65391874", "0.6538698", "0.6513188", "0.65084815", "0.6500688", "0.64980876", "0.64793843", "0.647749", "0.647749", "0.6451865", "0.6445374", "0.6439186", "0.6413901", "0.64102304", "0.63909024", "0.63891304", "0.638592", "0.6360318", "0.63568527", "0.6353663", "0.6353059", "0.6334986", "0.63349086", "0.6333789", "0.6331906", "0.63304496", "0.63261265", "0.6326106", "0.6316334", "0.6310132", "0.6308181", "0.63047993", "0.6304705", "0.63034", "0.6298586", "0.6276693", "0.62742186", "0.62684655", "0.62664574", "0.62594706", "0.6256923", "0.6242214", "0.6237409", "0.6232792", "0.6224828", "0.62247455", "0.6217275", "0.6217275", "0.6210258", "0.6207675", "0.62075055", "0.62069553", "0.62032026", "0.6201406", "0.6172868", "0.6165452" ]
0.6573624
38
Forward pass of the layer.
def forward(self, input_x: Tensor) -> Tensor: self.epsilon_weight.normal_() bias = self.bias if bias is not None: self.epsilon_bias.normal_() bias = bias + self.sigma_bias * self.epsilon_bias.data noisy_weights = self.sigma_weight * self.epsilon_weight.data + self.weight return F.linear(input_x, noisy_weights, bias)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x):\n return self.layers(x)", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def forward(self, input):\n return self.layers(input)", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\toutput = self._layers[0].forward(x)\n\t\tfor i in range(1, len(self._layers)):\n\t\t\toutput = self._layers[i].forward(output)\n\t\treturn output\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def forward(self, x):\n pass", "def forward(self, curr_layer):\n if self.cin == self.cout and self.stride == 1:\n return self.path(curr_layer) + curr_layer\n else:\n return self.path(curr_layer)", "def forward(self, curr_layer):\n if self.cin == self.cout and self.stride == 1:\n return self.path(curr_layer) + curr_layer\n else:\n return self.path(curr_layer)", "def forward(self, curr_layer):\n if self.cin == self.cout and self.stride == 1:\n return self.path(curr_layer) + curr_layer\n else:\n return self.path(curr_layer)", "def forward(self, x_in):\r\n # x_out = torch.zeros_like(x_in)\r\n\r\n for layer in self.layers: #Call forward function of each layer in order\r\n x_out = layer.forward(x_in)\r\n # print(\"Forward pass Seq: \", layer, x_in, x_out)\r\n x_in = x_out # output of the layer is passed as input to the next layer\r\n self.temp = x_in\r\n return x_out", "def forward(self, x):\n residues = []\n # Downward Pass\n x = self.layers[0](x.unsqueeze(1))\n for layer in self.layers[1:self.half]:\n x = layer(x)\n residues.insert(0, x)\n\n # Upward Pass\n for idx, layer in enumerate(self.layers[self.half:(len(self.layers)-1)]):\n x = layer(x, residues[idx])\n x = self.layers[-1](x)\n\n return(x)", "def forward(self, x): # pylint: disable=invalid-name\n x = self.layer4(self.layer3(self.layer2(self.layer1(x))))\n return x.mean((-2, -1))", "def layer_forward_prop(self):\n raise NotImplementedError(\"Must be created in child classes\")", "def _layer_forward(self, z_prev, layer, use_relu=True):\n\n self.__dict__['z_prev_'+layer] = z_prev\n b = self.__getattribute__('b_'+layer)\n w = self.__getattribute__('w_'+layer)\n\n dim_out = w.shape[0]\n\n # simplification due to np broadcasting\n a = z_prev@w.T + b\n\n z = relu(a) if use_relu else a\n\n return (a, z)", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x.squeeze(0)", "def forward(self, x):\n\n assert(len(x.shape) == 4)\n\n x_out = self.layers(x) + x\n\n return x_out", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def forward(self, x, **kwargs):\n pass", "def forward(self, input: torch.Tensor) -> torch.Tensor:\n\n x = self.layers(input)\n\n return x", "def base_forward(self, x):\r\n pass", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self, input):\n raise NotImplementedError()", "def forward(self, x): \n out = self.layer1(x)\n out = self.layer2(out)\n\n out = out.reshape(out.size(0), -1)\n \n out = self.dropout(out)\n out = self.fc1(out)\n out = self.fc2(out)\n \n return out", "def _forward(self):\n\n tf.summary.image(\"image\", tensor=tf.reshape(self.x, (self.batch_size, 28, 28, 1)), max_outputs=10)\n x = self.x\n\n # x = layers.dropout(self.x, keep_prob=0.7)\n # with tf.variable_scope(\"layer1\") as scope:\n h = tf.nn.relu(layers.fully_connected(x, num_outputs=self.input_size // 2, activation_fn=None))\n # tf.summary.histogram(\"moving_mean1\", tf.get_variable(scope + \"moving_mean\"))\n # with tf.variable_scope(\"layer2\") as scope:\n # h = tf.nn.relu(layers.fully_connected(h, num_outputs=32, activation_fn=None))\n # tf.summary.histogram(\"moving_mean2\", tf.get_variable(\"moving_mean\"))\n # with tf.variable_scope(\"layer3\") as scope:\n self.logits = layers.fully_connected(h, num_outputs=10, activation_fn=None)\n # tf.summary.histogram(\"moving_mean3\", tf.get_variable(\"moving_mean\"))\n\n self.probability = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.probability, axis=1)", "def forward(self, x):\n X = np.concatenate((np.ones((x.shape[0],1)), x), axis = 1)\n W = np.concatenate((self.b, self.w), axis = 0)\n self.a = np.dot(X,W)\n return self.a\n raise NotImplementedError(\"Layer forward pass not implemented.\")", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def zStoreForwardPropagate(self, inputVector):\r\n # Preform the forward propagation through the layers\r\n # setting the output of one layer to the input of the next\r\n for layer in self.layers:\r\n inputVector = layer.zStoreForwardPropagate(inputVector)\r\n # The output of the last layer is returned \r\n return inputVector", "def feed_forward(self):\n pre = self.pre_layer.o\n self.post_layer.i = torch.matmul(pre, self.weight)", "def forward(self, observation: Tensor) -> Tensor:\n pass", "def forward_pass(self):", "def _forward(self, x, X, upto=None):\n if upto is not None: # cannot use 'if upto' here since it is 0-indexed\n # and layer0 is the first layer\n assert 0<=upto<=self._layer_counter\n counter = upto + 1\n else: counter = self._layer_counter\n\n y_previous, Y_previous = x, X\n # TODO: because we always need to compute F_i(X) at each layer i, this\n # is a huge overhead\n # feedforward\n for i in range(counter):\n layer = getattr(self, 'layer'+str(i))\n y, Y = layer(y_previous, Y_previous), layer(Y_previous, Y_previous)\n y_previous, Y_previous = y, Y\n\n return y", "def forward(self)->None:", "def forward(self, inputs):\n raise NotImplementedError", "def feedForward(self):\n # Calculate the current values of the first layer\n self.layer1 = sigmoid(np.dot(self.input, self.weights1))\n\n # Calculate the sigmoid of the second layer which is the output\n self.output = sigmoid(np.dot(self.layer1, self.weights2))", "def forward(self, inp):\n return inp.dot(self.W) + self.b", "def forward(self, *inputs):\n raise NotImplementedError", "def forward_pass_on_convolutions(x, target_layer):\n net.features[-1].register_forward_hook(save_target_output)", "def _forward(self, X):\n firstLayer = True\n for layer, fcn in self.model.named_children():\n if 'recurrent' in layer:\n if firstLayer:\n Y, hidden = fcn(X)\n else:\n Y, hidden = fcn(Y)\n elif 'dropout' in layer:\n Y = fcn(Y)\n elif 'linear' in layer:\n Y = fcn(Y.view((Y.shape[1], Y.shape[0]*Y.shape[-1])))\n else:\n Y = fcn(Y)\n\n firstLayer = False\n\n return Y", "def forward(self, x: Tensor) -> Tensor: # type: ignore\n x = self.backbone(x)\n x = x.view(x.size(0), -1)\n if self.head_layers is not None:\n out = self.imagehead(x)\n return out\n else:\n return x", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def forward(self, x):\n return x", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def forward(self, x):\n # define feedforward behavior, applying activations as necessary\n out = self.leaky_relu(self.conv1(x))\n out = self.leaky_relu(self.conv2(out))\n out = self.leaky_relu(self.conv3(out))\n out = self.leaky_relu(self.conv4(out))\n\n out = self.res_blocks(out)\n\n out = self.leaky_relu(self.deconv1(out))\n out = self.leaky_relu(self.deconv2(out))\n out = self.leaky_relu(self.deconv3(out))\n\n # tanh applied to last layer\n out = F.tanh(self.out_layer(out))\n out = torch.clamp(out, min=-0.5, max=0.5)\n\n return out", "def forward(self):\n self.value = np.dot(self.x_node.value, self.w_node.value) + self.b_node.value", "def forward(self, x):\n for task_module_name in self.task_module_name_path[self.task_idx]:\n for layer in self.task_modules[task_module_name]:\n x = layer(x)\n #x = self.task_modules[task_module_name](x)\n x = x.view(x.size(0), -1)\n x = self.classification_layers[str(self.task_idx)](x)\n return x", "def forward(self):\n pass", "def forward(self):\n pass", "def feature_forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n return self.net(x)", "def forward(self, input=None):\n if (input is not None) and (self.result is None):\n self.result = self.act(input)\n\n # Pull the input from previous network layers\n elif self.result is None:\n in_result = []\n\n # Apply a separate activation to each resulting input if applicable\n if self.G.in_activation:\n for i, n in enumerate(self.input):\n in_result.append( self.G.in_activation[i](n()).type(_tensor(\"FloatTensor\")) )\n\n else:\n for n in self.input:\n in_result.append( n() )\n\n # Concatenate input along the lat dim\n self.result = self.act(torch.cat(in_result, in_result[0].dim() - 1))\n\n return self.result.view(*self.G.d_out)", "def forward(self, curr_layer):\n if self.cin == self.cout and self.stride == 1:\n out = [path(curr_layer) for path in self.pathes]\n out.append(curr_layer)\n return sum(out) / len(out)\n else:\n out = [path(curr_layer) for path in self.pathes]\n return sum(out) / len(out)", "def forward(self, curr_layer):\n if self.cin == self.cout and self.stride == 1:\n out = [path(curr_layer) for path in self.pathes]\n out.append(curr_layer)\n return sum(out) / len(out)\n else:\n out = [path(curr_layer) for path in self.pathes]\n return sum(out) / len(out)", "def forward(self, curr_layer):\n if self.cin == self.cout and self.stride == 1:\n out = [path(curr_layer) for path in self.pathes]\n out.append(curr_layer)\n return sum(out) / len(out)\n else:\n out = [path(curr_layer) for path in self.pathes]\n return sum(out) / len(out)", "def forward(self, x):\n x = self.pool(x)\n x = self.conv(x)\n x = x.reshape(x.shape[0], -1)\n x = self.relu(self.fc1(x))\n x = self.dropout1(x)\n x = self.fc2(x)\n x = self.dropout2(x)\n x = self.fc3(x)\n x = self.dropout3(x)\n x = self.fc4(x)\n\n return x", "def forward(self, input, context, state):\n raise NotImplementedError", "def forward(self, x):\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying hyperbolic tangent function\n x = torch.tanh(self.fc_layers[-1](x))\n # Return the better action for the input state\n return x", "def forward(self, x):\n for l in self.layers:\n w = l.weights\n b = l.biases\n x = self.sigmoid(np.dot(x, w) + b)\n return x", "def forward(self, x):\n previous_batch, current_batch = x\n previous_batch_pc, previous_batch_f = previous_batch[0], previous_batch[1]\n current_batch_pc, current_batch_f = current_batch[0], current_batch[1]\n\n f1 = previous_batch_pc[:, :, 3:]\n pc1 = previous_batch_pc[:, :, :3]\n\n f2 = current_batch_pc[:, :, 3:]\n pc2 = current_batch_pc[:, :, :3]\n\n batch_size, n_points_prev, _ = previous_batch_pc.shape\n batch_size, n_points_cur, _ = current_batch_pc.shape\n\n # All outputs of the following layers are tuples of (pos, features)\n # --- Point Feature Part ---\n pf_prev_1, pf_prev_2, pf_prev_3 = self._point_feature_net(pc1.float(), f1.float())\n pf_curr_1, pf_curr_2, pf_curr_3 = self._point_feature_net(pc2.float(), f2.float())\n\n # --- Flow Embedding / Point Mixture Part ---\n _, fe_2, fe_3 = self._point_mixture(x1=pf_prev_3, x2=pf_curr_3)\n\n # --- Flow Refinement Part ---\n x = self._flow_refinement(pf_curr_1=pf_curr_1, pf_curr_2=pf_curr_2, pf_curr_3=pf_curr_3, fe_2=fe_2, fe_3=fe_3)\n\n # --- Final fully connected layer ---\n pos, features = x\n features = features.transpose(1, 2)\n x = self._fc(features)\n return x", "def forward(self, x: torch.Tensor, dim: int = 0, p: int = 1):\n raise NotImplementedError", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, x):\n # Convolutional Layers\n ## add pooling layers\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = x.view(-1, 256) # flatten to pass to fully connected layers\n\n # fully connected layers\n ## and dropout layers\n x = F.relu(self.dropout(self.fc1(x)))\n x = F.relu(self.dropout(self.fc2(x)))\n x = self.fc3(x)\n\n return x", "def __feedforward(self, X):\n A = X\n for layer in self.layers:\n layer._Dense__forward(A)\n A = layer.A\n return A", "def forward(self, state):\n output = self.conv_layers(state)\n output = output.view(-1, 7*7*64)\n output = self.fc(output)\n return output", "def forward(self, X, training=True):\n self.layer_input = X\n return np.dot(X, self.W) + self.b", "def forward(self, state):\n x = state\n for layer in self.linear_layers[:-1]:\n x = F.relu(layer(x))\n x = self.linear_layers[-1](x)\n return x", "def forward( self, x ):\n x = x + self.pe[ :x.size(0), : ]\n return self.dropout( x )", "def forward(self, x):\n return self.activation_function(self.backbone_model(x))", "def forward(self, x: Tensor) -> Any: # type: ignore[override]\n return self.model(x)", "def forward(self, input_var: Tensor, hidden: Optional[HiddenDict] = None, **additional: Dict) -> RecurrentOutput:\n ...", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def forward(self, x):\n self.activations[0] = np.dot(x,self.weights[0]) + self.biases[0]\n self.zetas[0] = self.activation_f(self.activations[0])\n for i in range(1, self.n_layers-1):\n self.activations[i] = np.dot(self.zetas[i-1],self.weights[i]) \\\n + self.biases[i]\n self.zetas[i] = self.activation_f(self.activations[i])\n self.activations[-1] = np.dot(self.zetas[-2],self.weights[-1]) \\\n + self.biases[-1]\n self.zetas[-1] = self.activation_out_f(self.activations[-1])\n if self.activation_out_function == 'softmax':\n z = np.sum(self.zetas[-1], axis=1)\n z = np.reshape(z,(-1,1))\n self.zetas[-1] = np.divide(self.zetas[-1],z)\n return self.zetas[-1]", "def forward(self, x):\n # x = state\n \n x = F.relu(self.input(x))\n x = self.output(x)\n \n return x", "def forward(self, x):\n return self.relu(self.conv(x))", "def forward(self, x):\n x, self.hidden = self.gru(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x", "def forward(self, X):\n self._X = X # For backprop later on.\n self._z = np.dot(X, self._W) + self._b\n a = self._act.a(self._z)\n return a", "def forward_pass(self, x):\r\n self.a = (self.w.T @ x.T).T + self.b # Weighted sum of x with weight matrix(augmented with bias)\r\n self.x = x\r\n return self.a", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self, x):\n #print('output of fetures.children() : %s'%str([i for i in self.features.children()]))\n #print(\"shape of input is %s\" % str(x.size()))\n for layer_no, layer in enumerate(self.features.children()):\n\n if layer_no is 23:\n y = layer(x)\n if layer_no is 33:\n z = layer(x)\n x = layer(x)\n\n #print('debug')\n #print('layer info: %s'%str(layer))\n #print(\"shape of x is %s\" % str(x.size()))\n\n x = self.conv1D_downstream1(x)\n x = self.conv1D_downstream2(x)\n x = self.upsample_1(x)\n\n z = self.conv1D_pool4(z)\n y = self.conv1D_pool3(y)\n #print('debug')\n #print(\"shape of x is %s\"%str(x.size()))\n #print(\"shape of z is %s\" % str(z.size()))\n\n if x.size() is not z.size():\n x = nn.functional.interpolate(x,size = (z.size()[2],z.size()[3]), mode = 'nearest')\n x = x+ z\n x = self.upsample_2(x)\n x = x+y\n x = self.upsample_3(x)\n\n return x", "def forward(self, x):\n x = self.conv(x)\n return x", "def forward(self, input, target):\n\n #return self.bce(input_, target)\n return self.bce(input, target)", "def forward(self, x):\n #batch_size = x.shape[0]\n out = self.model(x)\n return out", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, input_):\n out = self.fc(input_)\n out = self.bn(out)\n out = self.relu(out)\n return torch.cat([out, input_], dim=1)", "def forwardPropagate(self, inputVector):\r\n # Preform the forward propagation through the layers\r\n # setting the output of one layer to the input of the next\r\n for layer in self.layers:\r\n inputVector = layer.forwardPropagate(inputVector)\r\n # The output of the last layer is returned \r\n return inputVector", "def forward(self, x):\n\n # 2.2 BUG: Did Bob do anything wrong in the forward method?\n # HINT: Usually a CNN would expect correctly normalized data.\n # Roughly make input to be within -1 to 1 range\n x = (x - 127.5) / 127.5\n\n # Apply conv layers\n x = self.convs(x)\n\n # Global average pooling\n x = x.mean(-1).mean(-1)\n\n # Output layer\n x = self.output(x)\n\n return x", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def forward_train(self, *args, **kwargs):\n pass", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forward(self, states):\n raise NotImplementedError()", "def forward(self, x):\n\n out = self.model(x)\n\n return out", "def forward(self, X):\n X = np.asarray(X)\n \n if (len(X.shape) == 1):\n X = np.reshape(X, (1, len(X)))\n \n if (self.inputBias == True):\n # add a bias unit to each row\n rows = []\n \n for i in range(0, X.shape[0]):\n rows.append(np.append(X[i],1))\n \n X = np.asarray(rows)\n \n \n if (len(self.hlayers) == 0):\n print(\"No hidden layers yet! Please add hidden layers.\")\n return 0\n \n z = np.matmul(X, self.weights[0]) # result of inputlayer x weights\n a = self.hlayers[0].activate(z) # apply activation function at first hidden layer\n \n if (len(self.hlayers) > 1):\n for i in range(1, len(self.hlayers)):\n z = np.matmul(a, self.weights[i])\n a = self.hlayers[i].activate(z)\n return a" ]
[ "0.7267547", "0.7236484", "0.72101915", "0.72012955", "0.7173363", "0.7161442", "0.71286094", "0.71286094", "0.71286094", "0.7043126", "0.70097", "0.6996241", "0.69847035", "0.69749874", "0.6956434", "0.6956434", "0.694666", "0.694666", "0.6942731", "0.6934393", "0.69069153", "0.6899355", "0.68955344", "0.6851149", "0.68389964", "0.68389964", "0.68389964", "0.6828289", "0.6811425", "0.68074214", "0.68024224", "0.6780486", "0.67699623", "0.67699623", "0.676735", "0.6762519", "0.67537796", "0.67428374", "0.67003036", "0.6679613", "0.66739434", "0.6672479", "0.6662067", "0.6660923", "0.6657849", "0.6654442", "0.66339654", "0.66323274", "0.66305304", "0.66215485", "0.6621198", "0.6618724", "0.6614478", "0.66134465", "0.66103166", "0.66103166", "0.66102606", "0.66086286", "0.660695", "0.6599531", "0.6599531", "0.6599531", "0.659792", "0.65854806", "0.6584228", "0.6581178", "0.6567659", "0.65667033", "0.65569746", "0.65526974", "0.65502083", "0.65499353", "0.65494967", "0.6543542", "0.651621", "0.65136003", "0.6513041", "0.6510266", "0.6506664", "0.6500169", "0.6494587", "0.648797", "0.64873654", "0.64832294", "0.64814466", "0.64790815", "0.6478109", "0.6473618", "0.6469466", "0.64648294", "0.64629996", "0.6460714", "0.6457317", "0.6456387", "0.6455129", "0.6454289", "0.6450736", "0.6450736", "0.64501053", "0.6449292", "0.6446627" ]
0.0
-1
Takes in a distribution and actions and returns log prob of actions under the distribution.
def get_log_prob(self, pi: Categorical, actions: Tensor): return pi.log_prob(actions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs", "def action_log_probs(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob", "def compute_policy_log_probs(available_actions, policy, actions):\n def compute_log_probs(probs, labels):\n # Select arbitrary element for unused arguments (log probs will be masked)\n labels = tf.maximum(labels, 0)\n indices = tf.stack([tf.range(tf.shape(labels)[0]), labels], axis=1)\n # TODO tf.log should suffice\n return safe_log(tf.gather_nd(probs, indices))\n\n\n fn_id, arg_ids = actions\n fn_pi, arg_pis = policy\n # TODO: this should be unneccessary\n fn_pi = mask_unavailable_actions(available_actions, fn_pi)\n fn_log_prob = compute_log_probs(fn_pi, fn_id)\n tf.summary.scalar('log_prob/fn', tf.reduce_mean(fn_log_prob))\n\n log_prob = fn_log_prob\n for arg_type in arg_ids.keys():\n arg_id = arg_ids[arg_type]\n arg_pi = arg_pis[arg_type]\n arg_log_prob = compute_log_probs(arg_pi, arg_id)\n arg_log_prob *= tf.to_float(tf.not_equal(arg_id, -1))\n log_prob += arg_log_prob\n tf.summary.scalar('log_prob/arg/%s' % arg_type.name,\n tf.reduce_mean(arg_log_prob))\n\n return log_prob", "def get_log_prob(self, states, actions):\n\n mean, log_std = self.__network.forward(tr.from_numpy(states).float())\n\n actions = tr.from_numpy(actions).float()\n log_prob = - (actions - mean) ** 2\n log_prob /= (2.0 * tr.exp(log_std) ** 2 + 1e-10)\n log_prob -= log_std + 0.5 * self.__output_dim * np.log(2 * np.pi)\n return log_prob.sum(1, keepdim=True)", "def get_log_prob(self, pi: Normal, actions: Tensor):\n return pi.log_prob(actions).sum(axis=-1)", "def log_prob(self, state, time_step, rec_states_p, action, return_entropy=True):\n action_index = int(tf.argmax(action, axis=-1).numpy())\n #print(\"action to get prob\")\n #print(action)\n\n probs, _ = self.get_action(state, time_step, rec_states_p)\n log_prob = tf.math.log(probs[0][action_index])\n log_prob = tf.expand_dims(log_prob, -1)\n #print(\"action index\", action_index)\n #print(\"original probs\")\n #print(probs)\n #print(\"logs\")\n #print(log_prob)\n if return_entropy:\n entropy = -tf.reduce_sum(probs * tf.math.log(probs), axis=-1)\n entropy = tf.expand_dims(entropy, -1)\n return log_prob, entropy\n else: return log_prob", "def _graph_fn_get_distribution_log_probs(self, key, parameters, actions):\n # For bounded continuous action spaces, need to unscale (0.0 to 1.0 for beta distribution).\n if self.bounded_action_space[key] is True:\n actions = (actions - self.action_space.low) / (self.action_space.high - self.action_space.low)\n return self.distributions[key].log_prob(parameters, actions)", "def logprob(self, actions, action_logits):\n neg_log_prob = F.nll_loss(action_logits, actions, reduction='none')\n return -neg_log_prob", "def logprob(self, action_sample, policy_params):\n return self.head.logprob(action_sample, policy_params)", "def logp(self, args):\n mean, stddev, action = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n logp = dist.log_prob(action)\n return logp", "def log_prob(actions, logits, reduction=\"none\"):\n # Equivalent to tf.sparse_softmax_cross_entropy_with_logits.\n\n loss = torch.nn.CrossEntropyLoss(reduction=reduction)\n\n # logits: shape [BATCH_SIZE, CLASS_SIZE]\n # actions: shape [BATCH_SIZE]\n neg_log_prob = loss(logits, torch.squeeze(actions, dim=-1))\n\n log_prob = -neg_log_prob\n\n return log_prob", "def act(self, state):\n state = torch.from_numpy(state).float().unsqueeze(0)\n logits = self.forward(state)\n distribution = torch.distributions.Categorical(logits=logits)\n action = distribution.sample()\n log_prob = distribution.log_prob(action).unsqueeze(-1)\n entropy = distribution.entropy().unsqueeze(-1)\n return action.item(), log_prob, entropy", "def produce_action_and_action_info(self, state):\n action_probabilities = self.actor_local(state)\n max_probability_action = torch.argmax(action_probabilities, dim=-1)\n action_distribution = create_actor_distribution(self.action_types, action_probabilities, self.action_size)\n action = action_distribution.sample().cpu()\n # Have to deal with situation of 0.0 probabilities because we can't do log 0\n z = action_probabilities == 0.0\n z = z.float() * 1e-8\n log_action_probabilities = torch.log(action_probabilities + z)\n return action, (action_probabilities, log_action_probabilities), max_probability_action", "def act(self, state):\n state = torch.from_numpy(state).float()\n logits, values = self.forward(state)\n distribution = torch.distributions.Categorical(logits=logits)\n action = distribution.sample()\n log_prob = distribution.log_prob(action).unsqueeze(-1)\n entropy = distribution.entropy().unsqueeze(-1)\n action = action.item() if len(action) == 1 else action.data.numpy()\n return action, log_prob, entropy, values", "def log_prob(target_distribution, x0, xs, accepteds):\n return np.mean([target_distribution.log_probability(x) for x in xs])", "def get_action_log_probs(self, nn_input, actions, internal_states=None):\n out = self.get_logits_parameters_log_probs(nn_input, internal_states)\n\n # Probabilities under current action.\n action_log_probs = self._graph_fn_get_distribution_log_probs(out[\"parameters\"], actions)\n\n return dict(action_log_probs=action_log_probs, logits=out[\"logits\"],\n last_internal_states=out[\"last_internal_states\"])", "def get_log_prob(self, state: rlt.FeatureData, squashed_action: torch.Tensor):\n if self.use_l2_normalization:\n # TODO: calculate log_prob for l2 normalization\n # https://math.stackexchange.com/questions/3120506/on-the-distribution-of-a-normalized-gaussian-vector\n # http://proceedings.mlr.press/v100/mazoure20a/mazoure20a.pdf\n pass\n\n loc, scale_log = self._get_loc_and_scale_log(state)\n raw_action = torch.atanh(squashed_action)\n r = (raw_action - loc) / scale_log.exp()\n log_prob = self._normal_log_prob(r, scale_log)\n squash_correction = self._squash_correction(squashed_action)\n if SummaryWriterContext._global_step % 1000 == 0:\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/loc\", loc.detach().cpu()\n )\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/scale_log\", scale_log.detach().cpu()\n )\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/log_prob\", log_prob.detach().cpu()\n )\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/squash_correction\", squash_correction.detach().cpu()\n )\n return torch.sum(log_prob - squash_correction, dim=1).reshape(-1, 1)", "def get_action(self, state, action=None):\n logits = self.actor(state)\n\n # Multinomial Distribution (to sample from action spaces with probabilities governed by logits)\n probs = Categorical(logits=logits)\n if action is None:\n action = probs.sample()\n return action, probs.log_prob(action), probs.entropy()", "def log_prob_increase(target_distribution, x0, xs, accepteds):\n return target_distribution.log_probability(xs[-1]) - target_distribution.log_probability(x0)", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)", "def log_prob(self):", "def get_action(self, s):\n probs = self.predict(s)\n action = torch.multinomial(probs, 1).item()\n log_prob = torch.log(probs[action])\n return action, log_prob", "def select_action(policy, state):\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n pr = policy(Variable(state))\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def compute_unclipped_logrho(behavior_logits, target_logits, actions):\n target_log_prob = log_prob(actions, target_logits, reduction=\"none\")\n behavior_log_prob = log_prob(actions, behavior_logits, reduction=\"none\")\n\n return target_log_prob - behavior_log_prob", "def distribution_probability(self, game):\n dist_probability = {}\n\n total_visits = sum(self.root.n_a.values())\n\n for action, visits in self.root.n_a.items():\n dist_probability[action] = visits/total_visits\n return dist_probability", "def pathProb(self, path):\n # Establish initial state distribution.\n estState = []\n for s in range(self.P):\n estState.append(self.initial(path[0][0], s))\n logProb = 0\n for step in range(1, len(path)):\n # Calculate a softmax probability that the agent uses each alpha\n # vector, then sort by action.\n lastF = path[step-1][0]\n lastP = path[step-1][1]\n thisF = path[step][0]\n thisP = path[step][1]\n\n # These are log probs.\n actionProbs = [0.0]*self.A\n totalWeight = float('-inf')\n maxScore = float('-inf')\n for action in range(self.A):\n score = self.valueLookAhead(lastF, estState, action)\n maxScore = max(score, maxScore)\n actionProbs[action] = self.tau * score\n totalWeight = logAdd(totalWeight, self.tau * score)\n # Tally up the probability that the agent goes to the correct state.\n pTrans = 0\n actionTable = {}\n for action in range(self.A):\n nextSTable = self.trans(lastF, lastP)[action]\n if not (thisF, thisP) in nextSTable:\n continue\n pThisAction = nextSTable[(thisF, thisP)] * \\\n math.exp(actionProbs[action] - totalWeight)\n actionTable[action] = pThisAction\n pTrans += pThisAction\n if pTrans == 0:\n return float('-inf')\n logProb += math.log(pTrans)\n\n # Choose which action we are taking.\n for action in actionTable:\n actionTable[action] /= pTrans\n thisAction = randomSample(actionTable) #random!\n\n # Update the agent's guess of the hidden states.\n nextEstState = [0.0]*self.P\n thisObs = randomSample(self.obs(lastF, lastP)) #random!\n for guessP in range(self.P):\n # What is the probability we are in state guessP?\n pGuessP = estState[guessP] * self.obs(lastF, guessP)[thisObs]\n # Given that we are in state guessP, what is the probability that\n # we move to each new state in P?\n newStates = self.trans(lastF, guessP)[thisAction]\n for newState, prob in newStates.iteritems():\n if newState[0] == thisF:\n nextEstState[newState[1]] += pGuessP * prob\n # Normalize nextEstState.\n estState = [i/sum(nextEstState) for i in nextEstState]\n return logProb", "def target_log_prob_fn(self, *args, **kwargs): # pylint: disable=unused-argument\n\n def log_joint_fn(*args, **kwargs): # pylint: disable=unused-argument\n states = dict(zip(self.unobserved.keys(), args))\n states.update(self.observed)\n interceptor = interceptors.CollectLogProb(states)\n with ed.interception(interceptor):\n self._f(self._cfg)\n\n log_prob = sum(interceptor.log_probs)\n return log_prob\n return log_joint_fn", "def sample(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob, dist", "def mlp_categorical_policy(x, a, hidden_sizes, activation, output_activation, action_space):\n\n # number of actions possible...they are numbered 0 through n-1\n act_dim = action_space.n\n\n # get a tensorflow neural network to give us a vector output\n # of pre-normalized log probabilities of each action\n logits = mlp(x, list(hidden_sizes)+[act_dim], activation, None)\n\n # then do a softmax to normalize the probabilities\n # so logp_all is the log of the normalized probabilities of each action\n logp_all = tf.nn.log_softmax(logits)\n\n\n # now, create `pi`,\n # which will be a tensor containing the index\n # of the action we have selected (randomly, according to the\n # probabilities implied by the neural network)\n\n # the line that does this is dense, so here is some commentary:\n # squeeze removes all dimensions of size one, and\n # multinomial draws samples according to the multinomial distribution,\n # ie. according to the probabilities implied by the logits\n # https://www.tensorflow.org/api_docs/python/tf/random/multinomial\n # TODO: tf is deprecating multinomial;\n # we should probably change this to tf.random.categorical instead\n pi = tf.squeeze(tf.multinomial(logits, 1), axis=1)\n\n # calculate the log of the probability of selecting the specific\n # actions (pi / a) given states x\n # to do this, use a one_hot on the action index to get a vector\n # with a one in that slot and 0s elsewhere,\n # then dot with logp_all (which we already constructed)\n # to get a the value of the probability of that specific action\n # reduce_sum will give us a tensor which is just a number with this value\n # (or the sum of the log probs of multiple actions, if we used this\n # function to calculate probabilities over a trajectory, ie.\n # x and a/pi both contain several elements, representing different\n # actions to take in different states.\n # in this case, by summing the log probs, we essentially\n # log the product of individual probabilities, ie. finding\n # the log prob of the entire trajectory)\n logp = tf.reduce_sum(tf.one_hot(a, depth=act_dim) * logp_all, axis=1)\n logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * logp_all, axis=1)\n\n return pi, logp, logp_pi", "def prob(self, state, action):\n if state + action == 100:\n reward = 1\n else:\n reward = 0\n\n return [(state + action, self._p_head, reward), (state - action, 1 - self._p_head, 0)]", "def log_probability(self, samples):\n pass", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs", "def evaluate_actions(self, obs: torch.Tensor, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n latent_pi, latent_vf = self._get_latent(obs, pi=True, vf=True)\n distribution = self._get_action_dist_from_latent(latent_pi)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n return values, log_prob, distribution.entropy()", "def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n obs = self.bodynet(obs)\n latent_pi, latent_vf, latent_sde = self._get_latent(obs)\n distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n return values, log_prob, distribution.entropy()", "def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):\n assert step_rewards is not None\n probabilities = torch.exp(torch.tensor(step_rewards, dtype=self.dtype))\n probabilities = probabilities / torch.sum(probabilities)\n\n if action is not None:\n return probabilities[action]\n else:\n return probabilities", "def get_action_probability_dict(self, state):\n pass", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def sample_actions(self, agent_outputs):\n logits, state_values = agent_outputs\n policy = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)\n return np.array([np.random.choice(len(p), p=p) for p in policy])", "def log_prob(self, sents):\n log_prob = 0\n for sent in sents:\n log_prob += self.sent_log_prob(sent)\n return log_prob", "def log_prob(self, scores : torch.Tensor, permutations):\n s = torch.log(select_indices(scores, permutations))\n n = len(scores)\n p = self.upto if self.upto is not None else n - 1\n return -sum(\n torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0))\n for k in range(p))", "def log_probability(self, sequence):\n sequence = self._transform(sequence)\n\n T = len(sequence)\n\n if T > 0 and sequence[0][_TAG]:\n last_state = sequence[0][_TAG]\n p = self._priors.logprob(last_state) + self._output_logprob(\n last_state, sequence[0][_TEXT]\n )\n for t in range(1, T):\n state = sequence[t][_TAG]\n p += self._transitions[last_state].logprob(\n state\n ) + self._output_logprob(state, sequence[t][_TEXT])\n last_state = state\n return p\n else:\n alpha = self._forward_probability(sequence)\n p = logsumexp2(alpha[T - 1])\n return p", "def summarize_action_dist(action_distributions,\n action_specs,\n name=\"action_dist\"):\n import tensorflow_probability as tfp\n from tf_agents.distributions.utils import SquashToSpecNormal\n action_specs = tf.nest.flatten(action_specs)\n actions = tf.nest.flatten(action_distributions)\n\n for i, (dist, action_spec) in enumerate(zip(actions, action_specs)):\n if isinstance(dist, SquashToSpecNormal):\n dist = dist.input_distribution\n if not isinstance(dist, tfp.distributions.Normal):\n # Only support Normal currently\n continue\n action_dim = action_spec.shape[-1]\n log_scale = tf.math.log(dist.scale)\n for a in range(action_dim):\n tf.summary.histogram(\n name=\"%s_log_scale/%s/%s\" % (name, i, a),\n data=log_scale[..., a])\n tf.summary.histogram(\n name=\"%s_loc/%s/%s\" % (name, i, a), data=dist.loc[..., a])", "def __sample_policy_action(probs):\n # Subtract a tiny value from probabilities in order to avoid\n # \"ValueError: sum(pvals[:-1]) > 1.0\" in numpy.multinomial\n probs = probs - np.finfo(np.float32).epsneg\n\n action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]\n############################################################################################\n # action_indexes = [np.argmax(p) for p in probs] #select the action with the highest probability instead of randomly sampling\n # print(action_indexes)\n # print('++++++++++++++++++++++++')\n############################################################################################\n return action_indexes", "def compute_policy_entropy(available_actions, policy, actions):\n _,arg_ids = actions\n\n fn_pi, arg_pis = policy\n fn_pi = mask_unavailable_actions(available_actions, fn_pi)\n entropy = tf.reduce_mean(compute_entropy(fn_pi))\n tf.summary.scalar('entropy/fn', entropy)\n\n for arg_type in arg_ids.keys():\n arg_id = arg_ids[arg_type]\n arg_pi = arg_pis[arg_type]\n batch_mask = tf.to_float(tf.not_equal(arg_id, -1))\n arg_entropy = safe_div(\n tf.reduce_sum(compute_entropy(arg_pi) * batch_mask),\n tf.reduce_sum(batch_mask))\n entropy += arg_entropy\n tf.summary.scalar('used/arg/%s' % arg_type.name,\n tf.reduce_mean(batch_mask))\n tf.summary.scalar('entropy/arg/%s' % arg_type.name, arg_entropy)\n\n return entropy", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def _logprob(\n op: Op,\n values: Sequence[TensorVariable],\n *inputs: TensorVariable,\n **kwargs,\n):\n raise NotImplementedError(f\"Logprob method not implemented for {op}\")", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def _logp(self, trace, **inputs):\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n\n phen_probs = [calc_log(trace[idx])\n for idx in np.random.randint(0, len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp", "def action_distribution(self, state):\n means, stds = self.__call__(state)\n dist = Normal(means, torch.exp(stds))\n\n return dist", "def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))", "def select_action(policy, state):\n #torch.manual_seed(RAND_SEED) # Seed here is causing kernel to crash\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n #print(state) # for 2b\n pr = policy(Variable(state))\n #print(pr) # for 2c\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def __logprob__(self, cv, vsense):\n return 1.0 / (1.0 + np.exp(-np.dot(cv, vsense)))", "def _logprob(self, sample):\n return 0, 0", "def test_posterior_logprobs(self):\n x = list(product([True, False], repeat=2))\n xs = list(e for e in product(x, repeat=3))\n all_obs = list(o for o in xs\n if all(any(e) and not all(e) for e in o))\n total = logsumexp(list(posterior_logprobs(np.array(obs), self.S, self.A, self.E)[1]\n for obs in all_obs))\n assert_allclose(total, np.log(1))", "def call(self, states):\n dist, mode = self.get_dist_and_mode(states)\n samples = dist.sample()\n log_probs = dist.log_prob(samples)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return mode, samples, log_probs", "def get_action_from_logits_and_probabilities(self, logits, probabilities, deterministic=None):\n self.logger.warn(\"Deprecated API method `get_action_from_logits_and_probabilities` used!\"\n \"Use `get_action_from_logits_and_parameters` instead.\")\n\n deterministic = self.deterministic if deterministic is None else deterministic\n\n action = self._graph_fn_get_action_components(logits, probabilities, deterministic)\n\n return dict(action=action)", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def calculate_entropy(prob):\n return -(prob * math.log(prob,2))", "def render_action_distribution(name,\n act_dist,\n action_spec,\n n_samples=500,\n n_bins=20,\n **kwargs):\n\n def _approximate_probs(dist, x_range):\n \"\"\"Given a 1D continuous distribution, sample a bunch of points to\n form a histogram to approximate the distribution curve. The values of\n the histogram are densities (integral equal to 1 over the bin range).\n\n Args:\n dist (Distribution): action distribution whose param is rank-2\n x_range (tuple[float]): a tuple of ``(min_x, max_x)`` for the domain\n of the distribution.\n\n Returns:\n np.array: a 2D matrix where each row is a prob hist for a dim\n \"\"\"\n mode = dist_utils.get_mode(dist)\n assert len(\n mode.shape) == 2, \"Currently only support rank-2 distributions!\"\n dim = mode.shape[-1]\n points = dist.sample(sample_shape=(n_samples, )).cpu().numpy()\n points = np.reshape(points, (-1, dim))\n probs = []\n for d in range(dim):\n hist, _ = np.histogram(\n points[:, d], bins=n_bins, density=True, range=x_range)\n probs.append(hist)\n return np.stack(probs)\n\n def _render_act_dist(path, dist, spec):\n if spec.is_discrete:\n assert isinstance(dist, td.categorical.Categorical)\n probs = dist.probs.reshape(-1).cpu().numpy()\n x_range, legends = None, None\n else:\n x_range = (np.min(spec.minimum), np.max(spec.maximum))\n probs = _approximate_probs(dist, x_range)\n legends = [\"d%s\" % i for i in range(probs.shape[0])]\n\n name_ = name if path == '' else name + '/' + path\n return render_curve(\n name=name_, data=probs, legends=legends, x_range=x_range)\n\n return nest.py_map_structure_with_path(_render_act_dist, act_dist,\n action_spec)", "def sumLogProb(a, b):\n if a > b:\n return a + log1p(exp(b - a))\n else:\n return b + log1p(exp(a - b))", "def getPolicy(self, state):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n possibleActions = self.mdp.getPossibleActions(state)\n #checking for terminal state (no possible actions)\n if len(possibleActions) is 0: \n return None\n \n #attempt at using the Counter\n eValsActions = util.Counter()\n for action in possibleActions:\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n eValsActions[action] += probability * (self.mdp.getReward( state, action, transitionState) + self.discount * self.values[transitionState])\n \n return eValsActions.argMax()\n \n #fail attempt using lists :(\n \"\"\"\n #list to hold the expected value of the actions\n eValsActions = []\n #iterate through all actions and their transtion states\n for action in possibleActions:\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #expected value of reward with discount * the value of the transitions\n eValsActions[action] += probability * (self.mdp.getReward( state, action, transitionState) + self.discount * self.values[transitionState])\n \n #now iterate through and find the action with the best value\n #(that will be the best action)\n maxVal = -float(\"inf\")\n bestAction = None\n for action in possibleActions:\n if eValsActions[action] > maxVal:\n maxVal = eValsAction[action]\n bestAction = action\n \"\"\"\n return action\n # END OUR CODE", "def logits_expert_is_high(\n self,\n state: th.Tensor,\n action: th.Tensor,\n next_state: th.Tensor,\n done: th.Tensor,\n log_policy_act_prob: Optional[th.Tensor] = None,\n ) -> th.Tensor:\n if log_policy_act_prob is None:\n raise TypeError(\n \"Non-None `log_policy_act_prob` is required for this method.\",\n )\n reward_output_train = self._reward_net(state, action, next_state, done)\n return reward_output_train - log_policy_act_prob", "def assignProbablities(self, gameState):\n legalActions = gameState.getLegalActions()\n numDiceActive = sum(gameState.numDicePerPlayer)\n probActionTuples = []\n\n for action in legalActions:\n currentHand = gameState.hands[self.agentIndex]\n currentAction = action\n remainingTotalDice = gameState.totalNumDice - gameState.numDicePerPlayer[self.agentIndex]\n assert remainingTotalDice > 0\n remainingActionCount = currentAction[2] - currentHand[currentAction[1]]\n if remainingActionCount > remainingTotalDice:\n if action[0] == 'deny':\n probActionTuples.append((1, action))\n else:\n probActionTuples.append((0, action))\n elif remainingActionCount > 0:\n # or (action[0] == \"confirm\" and remainingActionCount == 0)\n if action[0] == \"bid\":\n probActionTuples.append((self.bidProbability(remainingTotalDice, remainingActionCount), action))\n elif action[0] == \"deny\":\n probActionTuples.append((1 - self.bidProbability(remainingTotalDice, remainingActionCount), action))\n else:\n probActionTuples.append((self.confirmProbability(remainingTotalDice, remainingActionCount), action))\n elif remainingActionCount == 0:\n if action[0] == \"bid\":\n probActionTuples.append((1, action))\n elif action[0] == \"deny\":\n probActionTuples.append((0, action))\n else:\n probActionTuples.append((self.confirmProbability(remainingTotalDice, remainingActionCount), action))\n else:\n if action[0] == \"bid\":\n probActionTuples.append((1, action))\n else:\n probActionTuples.append((0, action))\n\n return probActionTuples", "def act(self, state, action=None, calc_ent=False):\r\n #state = torch.FloatTensor(state / 255).to(self.device)\r\n assert state.dtype == 'uint8'\r\n state = torch.tensor(state / 255., dtype=torch.float, device=self.device)\r\n #state = torch.from_numpy(state /255).float().to(self.device)\r\n\r\n action_probs, value_ext, value_int = self.model(state)\r\n dist = Categorical(action_probs)\r\n if action is None:\r\n action = dist.sample()\r\n log_prob = dist.log_prob(action)\r\n entropy = dist.entropy() if calc_ent else None\r\n\r\n return {'a': action,\r\n 'log_pi_a': log_prob,\r\n 'ent': entropy,\r\n 'v_ext': value_ext.squeeze(),\r\n 'v_int': value_int.squeeze()}", "def logits_to_log_prob(self, logits):\n\n reduction_indices = len(logits.shape.as_list()) - 1\n max_logits = tf.math.reduce_max(\n logits, axis=reduction_indices, keepdims=True)\n safe_logits = tf.subtract(logits, max_logits)\n sum_exp = tf.math.reduce_sum(\n tf.exp(safe_logits), axis=reduction_indices, keepdims=True)\n log_probs = tf.math.subtract(safe_logits, tf.math.log(sum_exp))\n return log_probs", "def choose_action(self, features_all_arms) -> Tuple[torch.Tensor, torch.Tensor]:\n actor_output = self.policy.act(obs=features_all_arms)\n chosen_action = torch.argmax(actor_output.action, dim=1)\n log_prob = actor_output.log_prob\n return torch.unsqueeze(chosen_action, 1), log_prob", "def mlp_categorical_policy(x, a, action_space, hidden_sizes=[64], activation=tf.tanh,\n output_activation=None):\n act_dim = get_dim_from_space(action_space)\n logits = mlp(x, act_dim, hidden_sizes, activation, output_activation)\n # random action selection based off raw probabilities\n actions = tf.squeeze(tf.multinomial(logits, 1), axis=1, name=\"pi\")\n action_mask = tf.one_hot(a, act_dim)\n # Calculate the log probability for each action taken in trajectory\n # log probability = log_prob of action if action taken otherwise 0 (hence action mask)\n log_probs = action_mask * tf.nn.log_softmax(logits)\n # sum log probs for a given trajectory\n log_probs_sum = tf.reduce_sum(log_probs, axis=1)\n return actions, log_probs_sum", "def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):\n if action is not None:\n return torch.tensor(1.0 / len(legal_actions), dtype=self.dtype)\n else:\n return 1.0 / len(legal_actions) * torch.ones(len(legal_actions), dtype=self.dtype)", "def _update_distribution(self, trajectories):\n costs = trajectories[\"costs\"].copy()\n actions = trajectories[\"actions\"].copy()\n Q = cost_to_go(costs, self.gamma_seq)\n best_id = np.argmin(Q, axis = 0)[0]\n self.mean_action = (1.0 - self.step_size) * self.mean_action +\\\n self.step_size * actions[best_id]", "def log_likelihood(self, state, obs, act):\n indices = np.array([self.Gittins[state['successes'][i], state['failures'][i]] for i in range(self.env.n_arms)])\n greedy_arms = np.where(np.isclose(indices,indices.max()))[0]\n return np.log(1/len(greedy_arms)) if act in greedy_arms else -1e8", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n\n sum_weights = sum(self.weights[s])\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * w / sum_weights) + (g / n) for w in self.weights[s]]\n # print(state, pi_s)\n return pi_s", "def prob_logit(x):\n try:\n if len(x.shape) != 1:\n raise ValueError(\"unexpected shape of input vector\\nexpected:\" + str(1) + \", actual: \" + str(len(x.shape)))\n except ValueError as e:\n print(e)\n print()\n raise\n\n x = 1.0 * np.exp(-x)\n\n probability = np.concatenate(\n (\n (x / (1.0 + x)).reshape(x.shape[0], 1),\n (1.0 / (1.0 + x)).reshape(x.shape[0], 1)\n ),\n axis=1\n )\n\n return probability", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n a = self.config['alpha']\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n weights = self.weights[state]\n # print(weights)\n\n\n # obtains the probability vector from Hedge: p_i(t) = (1+alpha)^s_i(t) / sum_{j \\in K} (1+alpha)^s_j(t)\n sum_weights_exponentials = sum([(1 + a) ** w for w in weights])\n pre_prob = [(((1 + a) ** w) / sum_weights_exponentials) for w in weights]\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * p) + (g / n) for p in pre_prob]\n\n return pi_s", "def logprob(predictions, labels):\n # prevent negative probability\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def logits_expert_is_high(\n self,\n state: th.Tensor,\n action: th.Tensor,\n next_state: th.Tensor,\n done: th.Tensor,\n log_policy_act_prob: Optional[th.Tensor] = None,\n ) -> th.Tensor:\n del log_policy_act_prob\n logits = self._reward_net(state, action, next_state, done)\n assert logits.shape == state.shape[:1]\n return logits", "def predict_log_prob(self, stories, queries):\n feed_dict = {self._stories: stories, self._queries: queries}\n return self._sess.run(self.predict_log_prob_op, feed_dict=feed_dict)", "def test_log_prob_fn(task_name, jit_compile, batch_size, implementation, posterior):\n task = sbibm.get_task(task_name)\n prior = task.get_prior()\n prior_dist = task.get_prior_dist()\n posterior_dist = task._get_reference_posterior(num_observation=1)\n\n log_prob = task._get_log_prob_fn(\n num_observation=1,\n implementation=implementation,\n jit_compile=jit_compile,\n posterior=posterior,\n )\n\n parameters = prior(num_samples=batch_size)\n\n # Test whether batching works\n if batch_size > 1:\n for b in range(batch_size):\n torch.allclose(\n log_prob(parameters)[b], log_prob(parameters[b, :].reshape(1, -1))\n )\n torch.allclose(\n posterior_dist.log_prob(parameters)[b],\n posterior_dist.log_prob(parameters[b, :].reshape(1, -1)),\n )\n\n # Test whether proportionality holds\n diff_ref = log_prob(parameters) - posterior_dist.log_prob(parameters)\n if not posterior:\n diff_ref += prior_dist.log_prob(parameters)\n for _ in range(10):\n parameters = prior(num_samples=batch_size)\n diff = log_prob(parameters) - posterior_dist.log_prob(parameters)\n if not posterior:\n diff += prior_dist.log_prob(parameters)\n assert torch.allclose(diff, diff_ref)", "def logprob_dc(counts, prior, axis=None):\n # Note that this excludes the factorial(counts) term, since we explicitly\n # track permutations in assignments.\n return gammaln(np.add(counts, prior, dtype=np.float32)).sum(axis)", "def initialize_distribution(states, actions):\n dist = {}\n\n for i in states:\n dist[i] = {}\n for j in actions:\n dist[i][j] = [0.0]\n\n return dist", "def proba_from_log_odds(self, log_odds):\n return (1/(1 + math.exp(log_odds)))", "def probability(self, sequence):\n return 2 ** (self.log_probability(self._transform(sequence)))", "def log_prob(self, samples):\n return -0.5 * sum_except_batch(\n np.log(2 * np.pi) + self.logstd + \\\n tf.exp(-2 * self.logstd) * tf.square(samples - self.mean))", "def log_prob(self, x):\n z, log_det = self.backward_p(x)\n return self.prior.log_prob(z) + log_det", "def compute_importance_weights(behavior_logits, target_logits, actions):\n logrho = compute_unclipped_logrho(behavior_logits, target_logits, actions)\n print(\"logrho:\", logrho) if debug else None\n print(\"logrho.shape:\", logrho.shape) if debug else None\n\n # change to pytorch version\n return torch.clamp(torch.exp(logrho), max=1.)", "def log_prob_from_logits(x):\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))", "def scalar_log_prob(distribution, val):\n log_prob_val = distribution.log_prob(val)\n if len(log_prob_val.shape) == 1:\n return log_prob_val\n elif len(log_prob_val.shape) > 2:\n raise ValueError('log_prob_val has unexpected shape {}.'.format(\n log_prob_val.shape))\n return jnp.sum(log_prob_val, axis=1)", "def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy", "def probability_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n target_ids = args.target_ids.reshape(logits.shape[0], 1)\n logits = logits.softmax(dim=-1)\n # Extracts the ith score from the softmax output over the vocabulary (dim -1 of the logits)\n # where i is the value of the corresponding index in target_ids.\n return logits.gather(-1, target_ids).squeeze(-1)", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def prob_2_entropy(prob):\r\n n, c, h, w = prob.size()\r\n return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)", "def crossentropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return -torch.log2(probability_fn(args))", "def logprob(self):\n assert len(self._added_rows) == self._num_rows\n TODO('https://github.com/posterior/treecat/issues/27')", "def logprob(self):\n assert len(self._added_rows) == self._num_rows\n TODO('https://github.com/posterior/treecat/issues/26')", "def act(self, state: State) -> Distribution:\n return self._gen_behaviour(self._gen_policy_params(state))", "def get_action_prob(self, game, probabilistic=True):\n for _ in range(Config.numMCTSSims):\n self.search(game)\n\n state = game.string_representation()\n counts = [\n self.Nsa.get((state, action), 0) for action in range(game.get_action_size())\n ]\n\n if probabilistic:\n if sum(counts) != 0:\n return [x / sum(counts) for x in counts]\n # TODO: understand this case (no valid actions)\n\n probs = [0] * len(counts)\n probs[np.argmax(counts)] = 1\n return probs", "def log_prob(self, th):\n\n\t\tif len(th.shape) == 2:\n\t\t\tth0, th1 = th[:,0], th[:,1]\n\t\t\tmask = (th0 > 0.) * (th1 > 0.)\n\t\telif len(th.shape) == 1:\n\t\t\tth0, th1 = float(th[0]), float(th[1])\n\t\t\tmask = torch.tensor([th0 > 0., th1 > 0.])\n\t\telse:\n\t\t\traise IndexError(\"This class is only for 2D Gamma prior for GSE model\")\n\t\tth0, th1 = torch.as_tensor(th0), torch.as_tensor(th1)\n\t\tvals = (self.beta_prior.log_prob(th0) + self.gamma_prior.log_prob(th1)).reshape(-1)\n\t\tvals = vals.numpy()\n\t\tvals[~mask] = -float('inf')\n\t\treturn vals", "def compute_probability_of_state(state):\n p = compute_log_probability_of_text(state[\"text\"], state[\"char_to_ix\"], \n state[\"frequency_statistics\"], state[\"transition_matrix\"])\n \n return p", "def _get_multinomial_logits(self, action, params):\n c_0 = params.shelf_life_at_arrival_distribution_c_0\n c_1 = params.shelf_life_at_arrival_distribution_c_1\n # Assume logit for useful_life=1 is 0, concatenate with logits\n # for other ages using provided coefficients and order size action\n\n # Parameters are provided in ascending remaining shelf life\n # So reverse to match ordering of stock array which is in\n # descending order of remaining useful life so that oldest\n # units are on the RHS\n return jnp.hstack([0, c_0 + (c_1 * action)])[::-1]" ]
[ "0.78015953", "0.76985294", "0.74659884", "0.7390132", "0.7234296", "0.718054", "0.71776885", "0.71425647", "0.7029758", "0.694614", "0.6867766", "0.67514867", "0.67146116", "0.6675534", "0.66467613", "0.6508409", "0.647576", "0.6389438", "0.6382274", "0.63791734", "0.6377181", "0.6375707", "0.63297397", "0.62908536", "0.6285766", "0.6279738", "0.62551045", "0.6234557", "0.62043434", "0.6159739", "0.61508495", "0.6141987", "0.6130724", "0.6126621", "0.61012626", "0.6087458", "0.6071404", "0.6060294", "0.6039263", "0.6031196", "0.6023759", "0.6019619", "0.60098654", "0.59944224", "0.5980989", "0.59439707", "0.59254366", "0.59097165", "0.59095937", "0.5907104", "0.5897519", "0.5892748", "0.5884274", "0.58732694", "0.58681875", "0.585025", "0.5836464", "0.5829168", "0.5827178", "0.5792258", "0.5791624", "0.57659495", "0.5760683", "0.5756402", "0.57480866", "0.5739269", "0.5730925", "0.57234627", "0.5719165", "0.57139474", "0.57109517", "0.5689098", "0.56734824", "0.56591326", "0.565728", "0.56444556", "0.56370693", "0.56336033", "0.563052", "0.56107444", "0.5610629", "0.5598624", "0.5594092", "0.55853075", "0.55847913", "0.5580518", "0.55697274", "0.5566737", "0.5553388", "0.5545977", "0.554016", "0.5539805", "0.55376995", "0.55326325", "0.55318916", "0.5531869", "0.5530374", "0.55273753", "0.55218995", "0.5514399" ]
0.72526956
4
Takes in a distribution and actions and returns log prob of actions under the distribution.
def get_log_prob(self, pi: Normal, actions: Tensor): return pi.log_prob(actions).sum(axis=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs", "def action_log_probs(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob", "def compute_policy_log_probs(available_actions, policy, actions):\n def compute_log_probs(probs, labels):\n # Select arbitrary element for unused arguments (log probs will be masked)\n labels = tf.maximum(labels, 0)\n indices = tf.stack([tf.range(tf.shape(labels)[0]), labels], axis=1)\n # TODO tf.log should suffice\n return safe_log(tf.gather_nd(probs, indices))\n\n\n fn_id, arg_ids = actions\n fn_pi, arg_pis = policy\n # TODO: this should be unneccessary\n fn_pi = mask_unavailable_actions(available_actions, fn_pi)\n fn_log_prob = compute_log_probs(fn_pi, fn_id)\n tf.summary.scalar('log_prob/fn', tf.reduce_mean(fn_log_prob))\n\n log_prob = fn_log_prob\n for arg_type in arg_ids.keys():\n arg_id = arg_ids[arg_type]\n arg_pi = arg_pis[arg_type]\n arg_log_prob = compute_log_probs(arg_pi, arg_id)\n arg_log_prob *= tf.to_float(tf.not_equal(arg_id, -1))\n log_prob += arg_log_prob\n tf.summary.scalar('log_prob/arg/%s' % arg_type.name,\n tf.reduce_mean(arg_log_prob))\n\n return log_prob", "def get_log_prob(self, states, actions):\n\n mean, log_std = self.__network.forward(tr.from_numpy(states).float())\n\n actions = tr.from_numpy(actions).float()\n log_prob = - (actions - mean) ** 2\n log_prob /= (2.0 * tr.exp(log_std) ** 2 + 1e-10)\n log_prob -= log_std + 0.5 * self.__output_dim * np.log(2 * np.pi)\n return log_prob.sum(1, keepdim=True)", "def get_log_prob(self, pi: Categorical, actions: Tensor):\n return pi.log_prob(actions)", "def log_prob(self, state, time_step, rec_states_p, action, return_entropy=True):\n action_index = int(tf.argmax(action, axis=-1).numpy())\n #print(\"action to get prob\")\n #print(action)\n\n probs, _ = self.get_action(state, time_step, rec_states_p)\n log_prob = tf.math.log(probs[0][action_index])\n log_prob = tf.expand_dims(log_prob, -1)\n #print(\"action index\", action_index)\n #print(\"original probs\")\n #print(probs)\n #print(\"logs\")\n #print(log_prob)\n if return_entropy:\n entropy = -tf.reduce_sum(probs * tf.math.log(probs), axis=-1)\n entropy = tf.expand_dims(entropy, -1)\n return log_prob, entropy\n else: return log_prob", "def _graph_fn_get_distribution_log_probs(self, key, parameters, actions):\n # For bounded continuous action spaces, need to unscale (0.0 to 1.0 for beta distribution).\n if self.bounded_action_space[key] is True:\n actions = (actions - self.action_space.low) / (self.action_space.high - self.action_space.low)\n return self.distributions[key].log_prob(parameters, actions)", "def logprob(self, actions, action_logits):\n neg_log_prob = F.nll_loss(action_logits, actions, reduction='none')\n return -neg_log_prob", "def logprob(self, action_sample, policy_params):\n return self.head.logprob(action_sample, policy_params)", "def logp(self, args):\n mean, stddev, action = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n logp = dist.log_prob(action)\n return logp", "def log_prob(actions, logits, reduction=\"none\"):\n # Equivalent to tf.sparse_softmax_cross_entropy_with_logits.\n\n loss = torch.nn.CrossEntropyLoss(reduction=reduction)\n\n # logits: shape [BATCH_SIZE, CLASS_SIZE]\n # actions: shape [BATCH_SIZE]\n neg_log_prob = loss(logits, torch.squeeze(actions, dim=-1))\n\n log_prob = -neg_log_prob\n\n return log_prob", "def act(self, state):\n state = torch.from_numpy(state).float().unsqueeze(0)\n logits = self.forward(state)\n distribution = torch.distributions.Categorical(logits=logits)\n action = distribution.sample()\n log_prob = distribution.log_prob(action).unsqueeze(-1)\n entropy = distribution.entropy().unsqueeze(-1)\n return action.item(), log_prob, entropy", "def produce_action_and_action_info(self, state):\n action_probabilities = self.actor_local(state)\n max_probability_action = torch.argmax(action_probabilities, dim=-1)\n action_distribution = create_actor_distribution(self.action_types, action_probabilities, self.action_size)\n action = action_distribution.sample().cpu()\n # Have to deal with situation of 0.0 probabilities because we can't do log 0\n z = action_probabilities == 0.0\n z = z.float() * 1e-8\n log_action_probabilities = torch.log(action_probabilities + z)\n return action, (action_probabilities, log_action_probabilities), max_probability_action", "def act(self, state):\n state = torch.from_numpy(state).float()\n logits, values = self.forward(state)\n distribution = torch.distributions.Categorical(logits=logits)\n action = distribution.sample()\n log_prob = distribution.log_prob(action).unsqueeze(-1)\n entropy = distribution.entropy().unsqueeze(-1)\n action = action.item() if len(action) == 1 else action.data.numpy()\n return action, log_prob, entropy, values", "def log_prob(target_distribution, x0, xs, accepteds):\n return np.mean([target_distribution.log_probability(x) for x in xs])", "def get_action_log_probs(self, nn_input, actions, internal_states=None):\n out = self.get_logits_parameters_log_probs(nn_input, internal_states)\n\n # Probabilities under current action.\n action_log_probs = self._graph_fn_get_distribution_log_probs(out[\"parameters\"], actions)\n\n return dict(action_log_probs=action_log_probs, logits=out[\"logits\"],\n last_internal_states=out[\"last_internal_states\"])", "def get_log_prob(self, state: rlt.FeatureData, squashed_action: torch.Tensor):\n if self.use_l2_normalization:\n # TODO: calculate log_prob for l2 normalization\n # https://math.stackexchange.com/questions/3120506/on-the-distribution-of-a-normalized-gaussian-vector\n # http://proceedings.mlr.press/v100/mazoure20a/mazoure20a.pdf\n pass\n\n loc, scale_log = self._get_loc_and_scale_log(state)\n raw_action = torch.atanh(squashed_action)\n r = (raw_action - loc) / scale_log.exp()\n log_prob = self._normal_log_prob(r, scale_log)\n squash_correction = self._squash_correction(squashed_action)\n if SummaryWriterContext._global_step % 1000 == 0:\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/loc\", loc.detach().cpu()\n )\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/scale_log\", scale_log.detach().cpu()\n )\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/log_prob\", log_prob.detach().cpu()\n )\n SummaryWriterContext.add_histogram(\n \"actor/get_log_prob/squash_correction\", squash_correction.detach().cpu()\n )\n return torch.sum(log_prob - squash_correction, dim=1).reshape(-1, 1)", "def get_action(self, state, action=None):\n logits = self.actor(state)\n\n # Multinomial Distribution (to sample from action spaces with probabilities governed by logits)\n probs = Categorical(logits=logits)\n if action is None:\n action = probs.sample()\n return action, probs.log_prob(action), probs.entropy()", "def log_prob_increase(target_distribution, x0, xs, accepteds):\n return target_distribution.log_probability(xs[-1]) - target_distribution.log_probability(x0)", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)", "def log_prob(self):", "def get_action(self, s):\n probs = self.predict(s)\n action = torch.multinomial(probs, 1).item()\n log_prob = torch.log(probs[action])\n return action, log_prob", "def select_action(policy, state):\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n pr = policy(Variable(state))\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def compute_unclipped_logrho(behavior_logits, target_logits, actions):\n target_log_prob = log_prob(actions, target_logits, reduction=\"none\")\n behavior_log_prob = log_prob(actions, behavior_logits, reduction=\"none\")\n\n return target_log_prob - behavior_log_prob", "def distribution_probability(self, game):\n dist_probability = {}\n\n total_visits = sum(self.root.n_a.values())\n\n for action, visits in self.root.n_a.items():\n dist_probability[action] = visits/total_visits\n return dist_probability", "def pathProb(self, path):\n # Establish initial state distribution.\n estState = []\n for s in range(self.P):\n estState.append(self.initial(path[0][0], s))\n logProb = 0\n for step in range(1, len(path)):\n # Calculate a softmax probability that the agent uses each alpha\n # vector, then sort by action.\n lastF = path[step-1][0]\n lastP = path[step-1][1]\n thisF = path[step][0]\n thisP = path[step][1]\n\n # These are log probs.\n actionProbs = [0.0]*self.A\n totalWeight = float('-inf')\n maxScore = float('-inf')\n for action in range(self.A):\n score = self.valueLookAhead(lastF, estState, action)\n maxScore = max(score, maxScore)\n actionProbs[action] = self.tau * score\n totalWeight = logAdd(totalWeight, self.tau * score)\n # Tally up the probability that the agent goes to the correct state.\n pTrans = 0\n actionTable = {}\n for action in range(self.A):\n nextSTable = self.trans(lastF, lastP)[action]\n if not (thisF, thisP) in nextSTable:\n continue\n pThisAction = nextSTable[(thisF, thisP)] * \\\n math.exp(actionProbs[action] - totalWeight)\n actionTable[action] = pThisAction\n pTrans += pThisAction\n if pTrans == 0:\n return float('-inf')\n logProb += math.log(pTrans)\n\n # Choose which action we are taking.\n for action in actionTable:\n actionTable[action] /= pTrans\n thisAction = randomSample(actionTable) #random!\n\n # Update the agent's guess of the hidden states.\n nextEstState = [0.0]*self.P\n thisObs = randomSample(self.obs(lastF, lastP)) #random!\n for guessP in range(self.P):\n # What is the probability we are in state guessP?\n pGuessP = estState[guessP] * self.obs(lastF, guessP)[thisObs]\n # Given that we are in state guessP, what is the probability that\n # we move to each new state in P?\n newStates = self.trans(lastF, guessP)[thisAction]\n for newState, prob in newStates.iteritems():\n if newState[0] == thisF:\n nextEstState[newState[1]] += pGuessP * prob\n # Normalize nextEstState.\n estState = [i/sum(nextEstState) for i in nextEstState]\n return logProb", "def target_log_prob_fn(self, *args, **kwargs): # pylint: disable=unused-argument\n\n def log_joint_fn(*args, **kwargs): # pylint: disable=unused-argument\n states = dict(zip(self.unobserved.keys(), args))\n states.update(self.observed)\n interceptor = interceptors.CollectLogProb(states)\n with ed.interception(interceptor):\n self._f(self._cfg)\n\n log_prob = sum(interceptor.log_probs)\n return log_prob\n return log_joint_fn", "def sample(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob, dist", "def mlp_categorical_policy(x, a, hidden_sizes, activation, output_activation, action_space):\n\n # number of actions possible...they are numbered 0 through n-1\n act_dim = action_space.n\n\n # get a tensorflow neural network to give us a vector output\n # of pre-normalized log probabilities of each action\n logits = mlp(x, list(hidden_sizes)+[act_dim], activation, None)\n\n # then do a softmax to normalize the probabilities\n # so logp_all is the log of the normalized probabilities of each action\n logp_all = tf.nn.log_softmax(logits)\n\n\n # now, create `pi`,\n # which will be a tensor containing the index\n # of the action we have selected (randomly, according to the\n # probabilities implied by the neural network)\n\n # the line that does this is dense, so here is some commentary:\n # squeeze removes all dimensions of size one, and\n # multinomial draws samples according to the multinomial distribution,\n # ie. according to the probabilities implied by the logits\n # https://www.tensorflow.org/api_docs/python/tf/random/multinomial\n # TODO: tf is deprecating multinomial;\n # we should probably change this to tf.random.categorical instead\n pi = tf.squeeze(tf.multinomial(logits, 1), axis=1)\n\n # calculate the log of the probability of selecting the specific\n # actions (pi / a) given states x\n # to do this, use a one_hot on the action index to get a vector\n # with a one in that slot and 0s elsewhere,\n # then dot with logp_all (which we already constructed)\n # to get a the value of the probability of that specific action\n # reduce_sum will give us a tensor which is just a number with this value\n # (or the sum of the log probs of multiple actions, if we used this\n # function to calculate probabilities over a trajectory, ie.\n # x and a/pi both contain several elements, representing different\n # actions to take in different states.\n # in this case, by summing the log probs, we essentially\n # log the product of individual probabilities, ie. finding\n # the log prob of the entire trajectory)\n logp = tf.reduce_sum(tf.one_hot(a, depth=act_dim) * logp_all, axis=1)\n logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * logp_all, axis=1)\n\n return pi, logp, logp_pi", "def prob(self, state, action):\n if state + action == 100:\n reward = 1\n else:\n reward = 0\n\n return [(state + action, self._p_head, reward), (state - action, 1 - self._p_head, 0)]", "def log_probability(self, samples):\n pass", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs", "def evaluate_actions(self, obs: torch.Tensor, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n latent_pi, latent_vf = self._get_latent(obs, pi=True, vf=True)\n distribution = self._get_action_dist_from_latent(latent_pi)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n return values, log_prob, distribution.entropy()", "def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n obs = self.bodynet(obs)\n latent_pi, latent_vf, latent_sde = self._get_latent(obs)\n distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n return values, log_prob, distribution.entropy()", "def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):\n assert step_rewards is not None\n probabilities = torch.exp(torch.tensor(step_rewards, dtype=self.dtype))\n probabilities = probabilities / torch.sum(probabilities)\n\n if action is not None:\n return probabilities[action]\n else:\n return probabilities", "def get_action_probability_dict(self, state):\n pass", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def sample_actions(self, agent_outputs):\n logits, state_values = agent_outputs\n policy = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)\n return np.array([np.random.choice(len(p), p=p) for p in policy])", "def log_prob(self, sents):\n log_prob = 0\n for sent in sents:\n log_prob += self.sent_log_prob(sent)\n return log_prob", "def log_prob(self, scores : torch.Tensor, permutations):\n s = torch.log(select_indices(scores, permutations))\n n = len(scores)\n p = self.upto if self.upto is not None else n - 1\n return -sum(\n torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0))\n for k in range(p))", "def log_probability(self, sequence):\n sequence = self._transform(sequence)\n\n T = len(sequence)\n\n if T > 0 and sequence[0][_TAG]:\n last_state = sequence[0][_TAG]\n p = self._priors.logprob(last_state) + self._output_logprob(\n last_state, sequence[0][_TEXT]\n )\n for t in range(1, T):\n state = sequence[t][_TAG]\n p += self._transitions[last_state].logprob(\n state\n ) + self._output_logprob(state, sequence[t][_TEXT])\n last_state = state\n return p\n else:\n alpha = self._forward_probability(sequence)\n p = logsumexp2(alpha[T - 1])\n return p", "def summarize_action_dist(action_distributions,\n action_specs,\n name=\"action_dist\"):\n import tensorflow_probability as tfp\n from tf_agents.distributions.utils import SquashToSpecNormal\n action_specs = tf.nest.flatten(action_specs)\n actions = tf.nest.flatten(action_distributions)\n\n for i, (dist, action_spec) in enumerate(zip(actions, action_specs)):\n if isinstance(dist, SquashToSpecNormal):\n dist = dist.input_distribution\n if not isinstance(dist, tfp.distributions.Normal):\n # Only support Normal currently\n continue\n action_dim = action_spec.shape[-1]\n log_scale = tf.math.log(dist.scale)\n for a in range(action_dim):\n tf.summary.histogram(\n name=\"%s_log_scale/%s/%s\" % (name, i, a),\n data=log_scale[..., a])\n tf.summary.histogram(\n name=\"%s_loc/%s/%s\" % (name, i, a), data=dist.loc[..., a])", "def __sample_policy_action(probs):\n # Subtract a tiny value from probabilities in order to avoid\n # \"ValueError: sum(pvals[:-1]) > 1.0\" in numpy.multinomial\n probs = probs - np.finfo(np.float32).epsneg\n\n action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]\n############################################################################################\n # action_indexes = [np.argmax(p) for p in probs] #select the action with the highest probability instead of randomly sampling\n # print(action_indexes)\n # print('++++++++++++++++++++++++')\n############################################################################################\n return action_indexes", "def compute_policy_entropy(available_actions, policy, actions):\n _,arg_ids = actions\n\n fn_pi, arg_pis = policy\n fn_pi = mask_unavailable_actions(available_actions, fn_pi)\n entropy = tf.reduce_mean(compute_entropy(fn_pi))\n tf.summary.scalar('entropy/fn', entropy)\n\n for arg_type in arg_ids.keys():\n arg_id = arg_ids[arg_type]\n arg_pi = arg_pis[arg_type]\n batch_mask = tf.to_float(tf.not_equal(arg_id, -1))\n arg_entropy = safe_div(\n tf.reduce_sum(compute_entropy(arg_pi) * batch_mask),\n tf.reduce_sum(batch_mask))\n entropy += arg_entropy\n tf.summary.scalar('used/arg/%s' % arg_type.name,\n tf.reduce_mean(batch_mask))\n tf.summary.scalar('entropy/arg/%s' % arg_type.name, arg_entropy)\n\n return entropy", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def _logprob(\n op: Op,\n values: Sequence[TensorVariable],\n *inputs: TensorVariable,\n **kwargs,\n):\n raise NotImplementedError(f\"Logprob method not implemented for {op}\")", "def _logp(self, trace, **inputs):\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n\n phen_probs = [calc_log(trace[idx])\n for idx in np.random.randint(0, len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def action_distribution(self, state):\n means, stds = self.__call__(state)\n dist = Normal(means, torch.exp(stds))\n\n return dist", "def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))", "def select_action(policy, state):\n #torch.manual_seed(RAND_SEED) # Seed here is causing kernel to crash\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n #print(state) # for 2b\n pr = policy(Variable(state))\n #print(pr) # for 2c\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def __logprob__(self, cv, vsense):\n return 1.0 / (1.0 + np.exp(-np.dot(cv, vsense)))", "def _logprob(self, sample):\n return 0, 0", "def test_posterior_logprobs(self):\n x = list(product([True, False], repeat=2))\n xs = list(e for e in product(x, repeat=3))\n all_obs = list(o for o in xs\n if all(any(e) and not all(e) for e in o))\n total = logsumexp(list(posterior_logprobs(np.array(obs), self.S, self.A, self.E)[1]\n for obs in all_obs))\n assert_allclose(total, np.log(1))", "def call(self, states):\n dist, mode = self.get_dist_and_mode(states)\n samples = dist.sample()\n log_probs = dist.log_prob(samples)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return mode, samples, log_probs", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def get_action_from_logits_and_probabilities(self, logits, probabilities, deterministic=None):\n self.logger.warn(\"Deprecated API method `get_action_from_logits_and_probabilities` used!\"\n \"Use `get_action_from_logits_and_parameters` instead.\")\n\n deterministic = self.deterministic if deterministic is None else deterministic\n\n action = self._graph_fn_get_action_components(logits, probabilities, deterministic)\n\n return dict(action=action)", "def calculate_entropy(prob):\n return -(prob * math.log(prob,2))", "def render_action_distribution(name,\n act_dist,\n action_spec,\n n_samples=500,\n n_bins=20,\n **kwargs):\n\n def _approximate_probs(dist, x_range):\n \"\"\"Given a 1D continuous distribution, sample a bunch of points to\n form a histogram to approximate the distribution curve. The values of\n the histogram are densities (integral equal to 1 over the bin range).\n\n Args:\n dist (Distribution): action distribution whose param is rank-2\n x_range (tuple[float]): a tuple of ``(min_x, max_x)`` for the domain\n of the distribution.\n\n Returns:\n np.array: a 2D matrix where each row is a prob hist for a dim\n \"\"\"\n mode = dist_utils.get_mode(dist)\n assert len(\n mode.shape) == 2, \"Currently only support rank-2 distributions!\"\n dim = mode.shape[-1]\n points = dist.sample(sample_shape=(n_samples, )).cpu().numpy()\n points = np.reshape(points, (-1, dim))\n probs = []\n for d in range(dim):\n hist, _ = np.histogram(\n points[:, d], bins=n_bins, density=True, range=x_range)\n probs.append(hist)\n return np.stack(probs)\n\n def _render_act_dist(path, dist, spec):\n if spec.is_discrete:\n assert isinstance(dist, td.categorical.Categorical)\n probs = dist.probs.reshape(-1).cpu().numpy()\n x_range, legends = None, None\n else:\n x_range = (np.min(spec.minimum), np.max(spec.maximum))\n probs = _approximate_probs(dist, x_range)\n legends = [\"d%s\" % i for i in range(probs.shape[0])]\n\n name_ = name if path == '' else name + '/' + path\n return render_curve(\n name=name_, data=probs, legends=legends, x_range=x_range)\n\n return nest.py_map_structure_with_path(_render_act_dist, act_dist,\n action_spec)", "def sumLogProb(a, b):\n if a > b:\n return a + log1p(exp(b - a))\n else:\n return b + log1p(exp(a - b))", "def getPolicy(self, state):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n possibleActions = self.mdp.getPossibleActions(state)\n #checking for terminal state (no possible actions)\n if len(possibleActions) is 0: \n return None\n \n #attempt at using the Counter\n eValsActions = util.Counter()\n for action in possibleActions:\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n eValsActions[action] += probability * (self.mdp.getReward( state, action, transitionState) + self.discount * self.values[transitionState])\n \n return eValsActions.argMax()\n \n #fail attempt using lists :(\n \"\"\"\n #list to hold the expected value of the actions\n eValsActions = []\n #iterate through all actions and their transtion states\n for action in possibleActions:\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #expected value of reward with discount * the value of the transitions\n eValsActions[action] += probability * (self.mdp.getReward( state, action, transitionState) + self.discount * self.values[transitionState])\n \n #now iterate through and find the action with the best value\n #(that will be the best action)\n maxVal = -float(\"inf\")\n bestAction = None\n for action in possibleActions:\n if eValsActions[action] > maxVal:\n maxVal = eValsAction[action]\n bestAction = action\n \"\"\"\n return action\n # END OUR CODE", "def logits_expert_is_high(\n self,\n state: th.Tensor,\n action: th.Tensor,\n next_state: th.Tensor,\n done: th.Tensor,\n log_policy_act_prob: Optional[th.Tensor] = None,\n ) -> th.Tensor:\n if log_policy_act_prob is None:\n raise TypeError(\n \"Non-None `log_policy_act_prob` is required for this method.\",\n )\n reward_output_train = self._reward_net(state, action, next_state, done)\n return reward_output_train - log_policy_act_prob", "def assignProbablities(self, gameState):\n legalActions = gameState.getLegalActions()\n numDiceActive = sum(gameState.numDicePerPlayer)\n probActionTuples = []\n\n for action in legalActions:\n currentHand = gameState.hands[self.agentIndex]\n currentAction = action\n remainingTotalDice = gameState.totalNumDice - gameState.numDicePerPlayer[self.agentIndex]\n assert remainingTotalDice > 0\n remainingActionCount = currentAction[2] - currentHand[currentAction[1]]\n if remainingActionCount > remainingTotalDice:\n if action[0] == 'deny':\n probActionTuples.append((1, action))\n else:\n probActionTuples.append((0, action))\n elif remainingActionCount > 0:\n # or (action[0] == \"confirm\" and remainingActionCount == 0)\n if action[0] == \"bid\":\n probActionTuples.append((self.bidProbability(remainingTotalDice, remainingActionCount), action))\n elif action[0] == \"deny\":\n probActionTuples.append((1 - self.bidProbability(remainingTotalDice, remainingActionCount), action))\n else:\n probActionTuples.append((self.confirmProbability(remainingTotalDice, remainingActionCount), action))\n elif remainingActionCount == 0:\n if action[0] == \"bid\":\n probActionTuples.append((1, action))\n elif action[0] == \"deny\":\n probActionTuples.append((0, action))\n else:\n probActionTuples.append((self.confirmProbability(remainingTotalDice, remainingActionCount), action))\n else:\n if action[0] == \"bid\":\n probActionTuples.append((1, action))\n else:\n probActionTuples.append((0, action))\n\n return probActionTuples", "def act(self, state, action=None, calc_ent=False):\r\n #state = torch.FloatTensor(state / 255).to(self.device)\r\n assert state.dtype == 'uint8'\r\n state = torch.tensor(state / 255., dtype=torch.float, device=self.device)\r\n #state = torch.from_numpy(state /255).float().to(self.device)\r\n\r\n action_probs, value_ext, value_int = self.model(state)\r\n dist = Categorical(action_probs)\r\n if action is None:\r\n action = dist.sample()\r\n log_prob = dist.log_prob(action)\r\n entropy = dist.entropy() if calc_ent else None\r\n\r\n return {'a': action,\r\n 'log_pi_a': log_prob,\r\n 'ent': entropy,\r\n 'v_ext': value_ext.squeeze(),\r\n 'v_int': value_int.squeeze()}", "def logits_to_log_prob(self, logits):\n\n reduction_indices = len(logits.shape.as_list()) - 1\n max_logits = tf.math.reduce_max(\n logits, axis=reduction_indices, keepdims=True)\n safe_logits = tf.subtract(logits, max_logits)\n sum_exp = tf.math.reduce_sum(\n tf.exp(safe_logits), axis=reduction_indices, keepdims=True)\n log_probs = tf.math.subtract(safe_logits, tf.math.log(sum_exp))\n return log_probs", "def choose_action(self, features_all_arms) -> Tuple[torch.Tensor, torch.Tensor]:\n actor_output = self.policy.act(obs=features_all_arms)\n chosen_action = torch.argmax(actor_output.action, dim=1)\n log_prob = actor_output.log_prob\n return torch.unsqueeze(chosen_action, 1), log_prob", "def mlp_categorical_policy(x, a, action_space, hidden_sizes=[64], activation=tf.tanh,\n output_activation=None):\n act_dim = get_dim_from_space(action_space)\n logits = mlp(x, act_dim, hidden_sizes, activation, output_activation)\n # random action selection based off raw probabilities\n actions = tf.squeeze(tf.multinomial(logits, 1), axis=1, name=\"pi\")\n action_mask = tf.one_hot(a, act_dim)\n # Calculate the log probability for each action taken in trajectory\n # log probability = log_prob of action if action taken otherwise 0 (hence action mask)\n log_probs = action_mask * tf.nn.log_softmax(logits)\n # sum log probs for a given trajectory\n log_probs_sum = tf.reduce_sum(log_probs, axis=1)\n return actions, log_probs_sum", "def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):\n if action is not None:\n return torch.tensor(1.0 / len(legal_actions), dtype=self.dtype)\n else:\n return 1.0 / len(legal_actions) * torch.ones(len(legal_actions), dtype=self.dtype)", "def _update_distribution(self, trajectories):\n costs = trajectories[\"costs\"].copy()\n actions = trajectories[\"actions\"].copy()\n Q = cost_to_go(costs, self.gamma_seq)\n best_id = np.argmin(Q, axis = 0)[0]\n self.mean_action = (1.0 - self.step_size) * self.mean_action +\\\n self.step_size * actions[best_id]", "def log_likelihood(self, state, obs, act):\n indices = np.array([self.Gittins[state['successes'][i], state['failures'][i]] for i in range(self.env.n_arms)])\n greedy_arms = np.where(np.isclose(indices,indices.max()))[0]\n return np.log(1/len(greedy_arms)) if act in greedy_arms else -1e8", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n\n sum_weights = sum(self.weights[s])\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * w / sum_weights) + (g / n) for w in self.weights[s]]\n # print(state, pi_s)\n return pi_s", "def prob_logit(x):\n try:\n if len(x.shape) != 1:\n raise ValueError(\"unexpected shape of input vector\\nexpected:\" + str(1) + \", actual: \" + str(len(x.shape)))\n except ValueError as e:\n print(e)\n print()\n raise\n\n x = 1.0 * np.exp(-x)\n\n probability = np.concatenate(\n (\n (x / (1.0 + x)).reshape(x.shape[0], 1),\n (1.0 / (1.0 + x)).reshape(x.shape[0], 1)\n ),\n axis=1\n )\n\n return probability", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n a = self.config['alpha']\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n weights = self.weights[state]\n # print(weights)\n\n\n # obtains the probability vector from Hedge: p_i(t) = (1+alpha)^s_i(t) / sum_{j \\in K} (1+alpha)^s_j(t)\n sum_weights_exponentials = sum([(1 + a) ** w for w in weights])\n pre_prob = [(((1 + a) ** w) / sum_weights_exponentials) for w in weights]\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * p) + (g / n) for p in pre_prob]\n\n return pi_s", "def logprob(predictions, labels):\n # prevent negative probability\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def logits_expert_is_high(\n self,\n state: th.Tensor,\n action: th.Tensor,\n next_state: th.Tensor,\n done: th.Tensor,\n log_policy_act_prob: Optional[th.Tensor] = None,\n ) -> th.Tensor:\n del log_policy_act_prob\n logits = self._reward_net(state, action, next_state, done)\n assert logits.shape == state.shape[:1]\n return logits", "def predict_log_prob(self, stories, queries):\n feed_dict = {self._stories: stories, self._queries: queries}\n return self._sess.run(self.predict_log_prob_op, feed_dict=feed_dict)", "def test_log_prob_fn(task_name, jit_compile, batch_size, implementation, posterior):\n task = sbibm.get_task(task_name)\n prior = task.get_prior()\n prior_dist = task.get_prior_dist()\n posterior_dist = task._get_reference_posterior(num_observation=1)\n\n log_prob = task._get_log_prob_fn(\n num_observation=1,\n implementation=implementation,\n jit_compile=jit_compile,\n posterior=posterior,\n )\n\n parameters = prior(num_samples=batch_size)\n\n # Test whether batching works\n if batch_size > 1:\n for b in range(batch_size):\n torch.allclose(\n log_prob(parameters)[b], log_prob(parameters[b, :].reshape(1, -1))\n )\n torch.allclose(\n posterior_dist.log_prob(parameters)[b],\n posterior_dist.log_prob(parameters[b, :].reshape(1, -1)),\n )\n\n # Test whether proportionality holds\n diff_ref = log_prob(parameters) - posterior_dist.log_prob(parameters)\n if not posterior:\n diff_ref += prior_dist.log_prob(parameters)\n for _ in range(10):\n parameters = prior(num_samples=batch_size)\n diff = log_prob(parameters) - posterior_dist.log_prob(parameters)\n if not posterior:\n diff += prior_dist.log_prob(parameters)\n assert torch.allclose(diff, diff_ref)", "def logprob_dc(counts, prior, axis=None):\n # Note that this excludes the factorial(counts) term, since we explicitly\n # track permutations in assignments.\n return gammaln(np.add(counts, prior, dtype=np.float32)).sum(axis)", "def initialize_distribution(states, actions):\n dist = {}\n\n for i in states:\n dist[i] = {}\n for j in actions:\n dist[i][j] = [0.0]\n\n return dist", "def proba_from_log_odds(self, log_odds):\n return (1/(1 + math.exp(log_odds)))", "def probability(self, sequence):\n return 2 ** (self.log_probability(self._transform(sequence)))", "def log_prob(self, samples):\n return -0.5 * sum_except_batch(\n np.log(2 * np.pi) + self.logstd + \\\n tf.exp(-2 * self.logstd) * tf.square(samples - self.mean))", "def log_prob(self, x):\n z, log_det = self.backward_p(x)\n return self.prior.log_prob(z) + log_det", "def compute_importance_weights(behavior_logits, target_logits, actions):\n logrho = compute_unclipped_logrho(behavior_logits, target_logits, actions)\n print(\"logrho:\", logrho) if debug else None\n print(\"logrho.shape:\", logrho.shape) if debug else None\n\n # change to pytorch version\n return torch.clamp(torch.exp(logrho), max=1.)", "def log_prob_from_logits(x):\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))", "def scalar_log_prob(distribution, val):\n log_prob_val = distribution.log_prob(val)\n if len(log_prob_val.shape) == 1:\n return log_prob_val\n elif len(log_prob_val.shape) > 2:\n raise ValueError('log_prob_val has unexpected shape {}.'.format(\n log_prob_val.shape))\n return jnp.sum(log_prob_val, axis=1)", "def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy", "def probability_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n logits = args.attribution_model.output2logits(args.forward_output)\n target_ids = args.target_ids.reshape(logits.shape[0], 1)\n logits = logits.softmax(dim=-1)\n # Extracts the ith score from the softmax output over the vocabulary (dim -1 of the logits)\n # where i is the value of the corresponding index in target_ids.\n return logits.gather(-1, target_ids).squeeze(-1)", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def crossentropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return -torch.log2(probability_fn(args))", "def prob_2_entropy(prob):\r\n n, c, h, w = prob.size()\r\n return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)", "def act(self, state: State) -> Distribution:\n return self._gen_behaviour(self._gen_policy_params(state))", "def logprob(self):\n assert len(self._added_rows) == self._num_rows\n TODO('https://github.com/posterior/treecat/issues/27')", "def get_action_prob(self, game, probabilistic=True):\n for _ in range(Config.numMCTSSims):\n self.search(game)\n\n state = game.string_representation()\n counts = [\n self.Nsa.get((state, action), 0) for action in range(game.get_action_size())\n ]\n\n if probabilistic:\n if sum(counts) != 0:\n return [x / sum(counts) for x in counts]\n # TODO: understand this case (no valid actions)\n\n probs = [0] * len(counts)\n probs[np.argmax(counts)] = 1\n return probs", "def logprob(self):\n assert len(self._added_rows) == self._num_rows\n TODO('https://github.com/posterior/treecat/issues/26')", "def log_prob(self, th):\n\n\t\tif len(th.shape) == 2:\n\t\t\tth0, th1 = th[:,0], th[:,1]\n\t\t\tmask = (th0 > 0.) * (th1 > 0.)\n\t\telif len(th.shape) == 1:\n\t\t\tth0, th1 = float(th[0]), float(th[1])\n\t\t\tmask = torch.tensor([th0 > 0., th1 > 0.])\n\t\telse:\n\t\t\traise IndexError(\"This class is only for 2D Gamma prior for GSE model\")\n\t\tth0, th1 = torch.as_tensor(th0), torch.as_tensor(th1)\n\t\tvals = (self.beta_prior.log_prob(th0) + self.gamma_prior.log_prob(th1)).reshape(-1)\n\t\tvals = vals.numpy()\n\t\tvals[~mask] = -float('inf')\n\t\treturn vals", "def compute_probability_of_state(state):\n p = compute_log_probability_of_text(state[\"text\"], state[\"char_to_ix\"], \n state[\"frequency_statistics\"], state[\"transition_matrix\"])\n \n return p", "def _get_multinomial_logits(self, action, params):\n c_0 = params.shelf_life_at_arrival_distribution_c_0\n c_1 = params.shelf_life_at_arrival_distribution_c_1\n # Assume logit for useful_life=1 is 0, concatenate with logits\n # for other ages using provided coefficients and order size action\n\n # Parameters are provided in ascending remaining shelf life\n # So reverse to match ordering of stock array which is in\n # descending order of remaining useful life so that oldest\n # units are on the RHS\n return jnp.hstack([0, c_0 + (c_1 * action)])[::-1]" ]
[ "0.78019595", "0.7698052", "0.74672115", "0.7391153", "0.7252926", "0.71811974", "0.71788377", "0.71434426", "0.7029142", "0.69469213", "0.686866", "0.67506903", "0.6712733", "0.6674502", "0.664789", "0.6507693", "0.6476692", "0.6389269", "0.63830537", "0.63797027", "0.63770247", "0.63739526", "0.63304424", "0.62914354", "0.62855715", "0.6279588", "0.6255065", "0.6234224", "0.620668", "0.6160409", "0.6150202", "0.6141891", "0.61305535", "0.61263794", "0.6102398", "0.60865754", "0.6070728", "0.60615855", "0.6039113", "0.60310465", "0.6024393", "0.6020024", "0.6010564", "0.59959924", "0.5980927", "0.59445155", "0.59248775", "0.5910316", "0.5908379", "0.5908214", "0.58981836", "0.58935237", "0.5884897", "0.5874052", "0.5866826", "0.5849584", "0.5835171", "0.5826035", "0.58260006", "0.57924664", "0.579113", "0.5765406", "0.57617056", "0.57572263", "0.5747302", "0.5739034", "0.57306087", "0.57231593", "0.57211375", "0.57151747", "0.57122815", "0.5689766", "0.5674528", "0.56596106", "0.56580377", "0.56443405", "0.5637825", "0.5631598", "0.5630155", "0.561238", "0.5611717", "0.5598927", "0.55940425", "0.55855274", "0.558503", "0.55820626", "0.55699396", "0.5567219", "0.5553323", "0.5546518", "0.55403787", "0.5539404", "0.55387473", "0.55328596", "0.55319583", "0.5531282", "0.55312014", "0.55265766", "0.5522053", "0.55152816" ]
0.72346884
5
Optimizes the distribution of allocations for a set of stock symbols.
def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \ syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False): # Read in adjusted closing prices for given symbols, date range dates = pd.date_range(sd, ed) prices_all = get_data(syms, dates) # automatically adds SPY prices = prices_all[syms] # only portfolio symbols prices_SPY = prices_all['SPY'] # only SPY, for comparison later # find the allocations for the optimal portfolio #1 provide an initial guess for x allocs = np.ones(len(syms))/len(syms) #2 Provide constraints to the optimizer bounds = [(0,1) for i in syms] constraints = ({ 'type': 'eq', 'fun': lambda inputs: 1.0 - np.sum(inputs) }) #3 call the optimizer res = spo.minimize(get_sharpe_ratio, allocs, args=prices, bounds = bounds, constraints=constraints) allocs = res.x # Get daily portfolio value port_val = get_portfolio_value(prices, allocs, 1.0) # Get portfolio statistics cr, adr, sddr, sr = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252) # Compare daily portfolio value with SPY using a normalized plot if gen_plot: # add code to plot here df_temp = pd.concat([port_val, prices_SPY], keys=['Portfolio', 'SPY'], axis=1) plot_normalized_data(df_temp) return allocs, cr, adr, sddr, sr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_best_allocation():\n\n # symbols = ['BRCM', 'TXN', 'IBM', 'HNZ'] \n symbols = ['AAPL', 'GOOG', 'IBM', 'MSFT']\n # ['GOOG','AAPL','GLD','XOM']\n basic_portfolio = BasicPortfolio(symbols, dt.datetime(2014, 1, 1), dt.datetime(2014, 12, 31))\n\n alloc = range(4)\n\n sharpe_max = 0\n alloc_max = alloc[:]\n\n for i in range(11):\n alloc[0] = i * 0.1\n for j in range(11 - i):\n alloc[1] = j * 0.1\n for k in range(11 - i - j):\n alloc[2] = k * 0.1\n alloc[3] = (10 - i - j - k) * 0.1\n\n vol, daily_ret, sharpe, cum_ret = \\\n basic_portfolio.analyze(alloc)\n\n if sharpe > sharpe_max:\n sharpe_max = sharpe\n alloc_max = alloc[:]\n\n print 'Best sharpe ratio is ', sharpe_max\n print 'Best allocation is', alloc_max\n\n ref_symbol = '$SPX'\n\n basic_portfolio.plot_with_reference(alloc_max, ref_symbol, source='local')", "def big_analysis(beta0s=[0.5, 0.8, 1.1, 1.4, 1.7], ks=range(6), betaps=[1.2, 1.5, 2, 3]):", "def test_allocation_strategy_opt_allocs():\n prices = np.array([[10, 10], [11, 15], [12, 5], [13, 10]])\n allocs = AllocationStrategy.opt_allocs(prices, neg_sharpe_ratio)\n np.testing.assert_almost_equal(allocs, [1, 0])", "def prepare_optimization(items,schedule,df_pred):\n itemblocks_to_produce = schedule[itemnames()].sum(0).to_dict()\n blocks_available = schedule.blockid.unique()\n block_order = pd.unique(schedule.blockid)\n forecasted_block_prices = df_pred['forecasted_price'].to_dict()\n actual_block_prices = df_pred['price'].to_dict()\n item_consumptions = items.set_index('item').consumption.to_dict()\n return(itemblocks_to_produce,blocks_available,forecasted_block_prices,\n actual_block_prices,item_consumptions,block_order)", "def knapsack(items, capacity):\r\n pass", "def get_new_allocation(self, day, init=False):\n \"\"\n if init and self.data_train is None:\n # Use uniform allocation\n cur_day_op = self.data.get_op(relative=False)[day, :] # opening prices on |cur_day|\n return util.get_uniform_allocation(self.num_stocks, cur_day_op)\n\n predicted_price_rel = self.predict_price_relatives(day)\n\n # Compute mean price relative of available stocks (x bar at t+1)\n today_op = self.data.get_op(relative=False)[day, :]\n avail_stocks = util.get_avail_stocks(today_op)\n avail_idxs = util.get_available_inds(avail_stocks)\n ppr_avail = predicted_price_rel[avail_idxs] # predicted price relatives of available stocks\n mean_price_rel = np.mean(ppr_avail)\n\n lam = self.compute_lambda(ppr_avail, mean_price_rel, avail_idxs) # lambda at t+1\n\n # limit lambda to avoid numerical problems from acting too aggressively.\n # (referenced from marigold's implementation: https://github.com/Marigold/universal-portfolios)\n lam = min(100000, lam)\n\n # Note: we don't perform simplex project b/c negative values (shorting) is allowed.\n new_b = np.zeros(self.num_stocks)\n for i, _ in enumerate(new_b):\n ppr = predicted_price_rel[i]\n if ppr > 0:\n new_b[i] = self.b[i] + lam * (ppr - mean_price_rel)\n\n # Normalize b so that it sums to 1\n sum_b = np.linalg.norm(new_b, ord=1)\n return (1.0 / sum_b) * new_b", "def recalc_stocks(stocks, feature, args):\r\n \r\n for stock in stocks:\r\n expression = 'stock.' + feature + '_calc(' + args + ')'\r\n exec(expression)\r\n \r\n return", "def get_52_week_high_low_for_stocks(stocks):\n print(\"Fetching stock quotes.\")\n # Build a full list of symbols\n symbols = []\n for key in stocks.keys():\n symbols.append(key)\n\n num_of_batches = int(len(symbols)/BATCH_SIZE) + 1\n\n all_stocks_df = pandas.DataFrame()\n\n #all_stocks_df = pandas.DataFrame()\n\n # Get quotes for all the stocks in batches\n for i in range(0, num_of_batches):\n print(\"Fetching quotes in batch: \" + str(i+1) + \"/\" + str(num_of_batches))\n start = i*BATCH_SIZE\n end = start + BATCH_SIZE\n batch_symbols = symbols[start: end]\n batch_symbols_query = '+'.join(batch_symbols)\n request_url = YAHOO_FINANCE_API + \"?\" + YAHOO_FINANCE_SYMBOL_PARAM + \"=\" + batch_symbols_query +\\\n \"&\" + YAHOO_FINANCE_FORMAT_PARAM + \"=\" + YAHOO_FINANCE_SYMBOL_PARAM + YAHOO_FINANCE_52_ASK_PRICE +\\\n YAHOO_FINANCE_BID_PRICE + YAHOO_FINANCE_52_CLOSE_PRICE + YAHOO_FINANCE_52_WEEK_LOW +\\\n YAHOO_FINANCE_52_WEEK_HIGH + YAHOO_FINANCE_52_LOW_CHANGE +\\\n YAHOO_FINANCE_52_HIGH_CHANGE + YAHOO_FINANCE_DIV_YIELD\n r = requests.get(request_url)\n\n # Read the returned CSV as a pandas table\n # Returned format is NAME,ASK,BID,52-wLow,52-wHigh\n df = pandas.read_table(StringIO(r.text), header=None, sep=',')\n all_stocks_df = all_stocks_df.append(df, ignore_index=True)\n\n # Delay to slow down things\n time.sleep(1)\n\n\n # Assign columns\n print(\"Stock quotes have been fetched. Beginning analysis...\")\n all_stocks_df.columns=['symbol', 'ask', 'bid', 'close', '52w-low', '52w-high', '52w-low-change', '52w-high-change', 'div-iteryield']\n\n # Add the percent change columns\n all_stocks_df['52w-%-low-change'] = all_stocks_df['52w-low-change']/all_stocks_df['52w-low']*100\n all_stocks_df['52w-%-high-change'] = all_stocks_df['52w-high-change'] / all_stocks_df['52w-high'] * 100\n\n # Add the names and sectors\n all_stocks_df['name'] = \"\"\n all_stocks_df['sector'] = \"\"\n for index, row in all_stocks_df.iterrows():\n all_stocks_df.loc[index, 'name'] = stocks[row['symbol']][0]\n all_stocks_df.loc[index, 'sector'] = stocks[row['symbol']][1]\n\n\n # Process the received quotes\n sorted_values = all_stocks_df.sort_values('52w-%-low-change')\n\n # Done\n print(\"Analysis completed.\")\n return sorted_values", "def _optimise(self):\n pass", "def portfolio_allocation(self, data, total_risk):\n total_rating = data[\"rating\"].sum()\n shares = {}\n risk_amt = total_risk\n for _, row in data.iterrows():\n numshares = int(float(row[\"rating\"]) / float(total_rating) * float(risk_amt) / float(row[\"price\"]))\n if numshares > 10:\n multiplier = int(numshares / 10)\n numshares = multiplier * 10\n shares[row[\"symbol\"]] = numshares\n\n risk_amt -= numshares * row[\"price\"]\n # debug\n # for k, v in shares.items():\n # print(\"[*] Ticker: {}, Shares: {}\".format(k, v))\n return shares", "def free(amounts: Dict[str, int]) -> None:\n for name, amount in amounts.items():\n assert 0 <= amount <= Resources.total[name] - Resources.available[name]\n Resources.available[name] += amount", "async def _garbage_collect_sim(self, base: str, trade_size: float, reserved: float):\n\n if not config['trade_garbage_collect']:\n return\n\n base_mult = await self.market.get_base_mult(config['trade_base'], base)\n current_balance = self.balancer.sim_balances[base] * base_mult - reserved\n\n if current_balance >= trade_size:\n return\n\n open_trades_by_time = []\n for pair in self.trades:\n if pair.split('-')[0] == base:\n for trade in self.trades[pair]['open']:\n open_trades_by_time.append((trade['open_time'], trade))\n\n open_trades_sorted = [trade_tuple[1] for trade_tuple in sorted(open_trades_by_time, key=lambda x: x[0])]\n\n if open_trades_sorted:\n collect_trade = open_trades_sorted[0]\n await self._sell_sim(collect_trade, 'GARBAGE COLLECT SELL', remit=False)\n self.trades[collect_trade['pair']]['open'].remove(collect_trade)", "def optimize_weights(self, generations):\n for gen in range(generations):\n print(\" Generation: %s\" % gen)\n self._pop_f1 = 0\n self._queue_search(self.population)\n self._queue.join()\n self._scores = {}\n while not self._results.empty():\n (index, f1) = self._results.get()\n self._scores[index] = f1\n self._pop_f1 += f1\n ranks = sorted(range(self.population_size), key=lambda s: (self._scores.get(s)))\n self._report(ranks)\n self._next_generation(ranks)", "def improve_population(self):\r\n for index in range(len(self.district_population)):\r\n district = self.district_population[index]\r\n districtsolution = hillclimber.HillClimber(district, self.cable_cost, self.battery_cost)\r\n self.district_population[index] = districtsolution.run(1000, 80000)\r\n self.cost_populations[index] = district.total_cost(self.battery_cost, self.cable_cost)", "def reallocate(banks):\n distributions = dict()\n cycles = 0\n\n while tuple(banks) not in distributions:\n distributions[tuple(banks)] = cycles\n redistribute(banks, banks.index(max(banks)))\n cycles += 1\n\n cycles_in_loop = cycles - distributions[tuple(banks)]\n return cycles, cycles_in_loop", "def find_allocation_with_min_shering(self):\n for consumption_graph in self.graph_generator.generate_all_consumption_graph():\n self.find_allocation_for_graph(consumption_graph)\n return self.min_sharing_allocation", "def minimize(self):\n pass", "def gen_14BQ_OH():\r\n q_smiles_base = {}\r\n q_smiles_mid = {}\r\n q_smiles_base['1,4-BQ,2-OH'] = '[H]OC1=C([H])C(=O)C([H])=C([H])C1=O'\r\n q_smiles_base['1,4-BQ,Full-OH'] = 'OC1=C(O)C(=O)C(O)=C(O)C1=O'\r\n q_smiles_base['1,4-BQ'] = 'O=C1C=CC(=O)C=C1'\r\n\r\n q_smiles_mid['1,4-BQ'] = 'O=C1C=CC(=O)C=C1'\r\n q_smiles_mid['1,4-BQ,2-OH'] = 'OC1=CC(=O)C=CC1=O'\r\n q_smiles_mid['1,4-BQ,2,3-OH'] = 'OC1=C(O)C(=O)C=CC1=O'\r\n q_smiles_mid['1,4-BQ,2,3,5-OH'] = 'OC1=CC(=O)C(O)=C(O)C1=O'\r\n q_smiles_mid['1,4-BQ,Full-OH'] = 'OC1=C(O)C(=O)C(O)=C(O)C1=O' \r\n\r\n return q_smiles_base, q_smiles_mid", "def analyse_rsi(stocks_data, oversold=25, overbought=85):\n print('\\n--- RSI ANALYSIS ---')\n for stock_symbol, df in stocks_data.items():\n rsi = find_rsi(df)\n min_rsi = oversold\n max_rsi = overbought\n for i in range(len(rsi[1])):\n r = rsi[1][i]\n if r < min_rsi:\n min_rsi = r\n elif r > max_rsi:\n max_rsi = r\n\n mins = []\n maxs = []\n for i in range(len(rsi[1])):\n r = rsi[1][i]\n if r <= min_rsi * 1.2:\n mins.append((rsi[0][i], rsi[1][i]))\n elif r >= max_rsi * 0.95:\n maxs.append((rsi[0][i], rsi[1][i]))\n\n if mins:\n print(stock_symbol)\n [print(m[0], m[1]) for m in mins]\n if maxs:\n print(stock_symbol)\n [print(m[0], m[1]) for m in maxs]", "def entrycalc(self, lows, o):\n price = float(self.price)\n \n #print(nextTrade==price,nextTradeSeller==price)\n for i in range(2, self.entries + 1):\n if len(self.entryprices) > 0:\n avgentryprice = sum(self.entryprices) / len(self.entryprices)\n #if previous entry has been placed and current hasn't and other args are met\n if self.dentry[\"placedOrder\" + str(i - 1) + self.chartnumber] and price < avgentryprice and float(price) < lows[-2] and float(price) < float(o) and not self.dentry[\"placedOrder\" + str(i) + self.chartnumber]:\n self.dentry[\"placedOrder\" + str(i) + self.chartnumber] = True\n #add these to dict\n print(\"trade number\",str(i))\n self.dentry[\"tradeEntries\" + str(i) + self.chartnumber] += 1\n #self.totalentries += 1\n \n #I changed these from price to nextTrade\n self.dentry[\"orderPrice\" + str(i) + self.chartnumber] = price\n #self.dentry[\"orderPrice\" + str(i) + chartnumber] = self.nextTrade\n \n #altbuy = int(self.dentry[\"buy\" + str(i) + chartnumber] / price)\n altbuy = int(self.dentry[\"buy\" + str(i) + self.chartnumber] / self.nextTrade)\n \n #self.availablebase -= altbuy * price\n self.availablebase -= altbuy * self.nextTrade\n altbuy -= altbuy * .001\n self.amtofalt += altbuy\n ###HOW LONG TO WE WANT ENTRYPRICES TO BE??\n \n #self.entryprices.append(price)\n self.entryprices.append(self.nextTrade)\n if self.graphics:\n self.graph.buy(self.masterDick[\"currentPrice\" + self.chartnumber], self.masterDick[\"count\" + self.chartnumber], self.chartnumber, i)\n #print(\"Fun:\",self.amtofalt)\n print(\"Buy\" + str(i),self.dentry[\"buy\" + str(i) + self.chartnumber])\n break", "def __call__(self):\n gains = []\n numSim = self.nsim\n \n for n in range(numSim):\n if ENTRY_STRATEGY == 'random':\n gains.append(self.runRandomEntryStrat()) \n elif ENTRY_STRATEGY == 'delayed':\n gains.append(self.runDelayedEntryStrat())\n else:\n raise NotImplemented(\n 'Market entry strategy \"%s\" not implemented' % ENTRY_STRATEGY)\n # Average wasted time\n self.wastedTime = float(self.wastedTime) / numSim\n \n means = np.average(gains)\n medians = np.median(gains)\n low_25 = np.percentile(gains, 25)\n high_25 = np.percentile(gains, 75)\n '''\n freq,bins = np.histogram(gains, bins=25)\n freq = freq.astype('f')/sum(freq.astype('f'))\n \n \n if self.cumulative:\n \n bins_cp = np.array([(bins[a] + bins[a+1])/2.0 for a in range(len(bins)-1)])\n pos_freq = freq[bins_cp>=medians]\n neg_freq = freq[bins_cp<medians]\n pos_freq = np.array([sum(pos_freq[q:]) for q in range(len(pos_freq))])\n neg_freq = np.array([sum(neg_freq[q::-1]) for q in range(len(neg_freq))])\n freq = np.hstack([neg_freq, pos_freq])\n '''\n freq = []\n step = 1\n rng = range(0,51,step)[1:]\n freq += rng\n bins = np.percentile(gains, rng)\n \n rng = range(50,101,step)[1:]\n freq += [100 - a for a in rng]\n bins = np.hstack([bins, np.percentile(gains, rng)])\n freq = np.array(freq)\n \n \"\"\"\n for a in range(0,51,step)[1:]:\n freq.append(a)\n bins.append(np.percentile(gains, a))\n for a in range(50,101,step)[1:]:\n freq.append(100 - a)\n bins.append(np.percentile(gains, a)) \n bins = np.array(bins)\n freq = np.array(freq)\n \"\"\"\n \n X = np.ones(len(bins))*self.daysHeld\n Y = bins\n C = freq\n \n return (means, medians, X, Y, C, self.daysHeld, low_25, high_25, self.wastedTime)", "def processMarketOrders(self):\n try:\n nextRound = self.currentRound+1\n resultsList = []\n master = {}\n self.genMarketStat()\n myMarketStat = self.marketStats[str(self.currentRound)]\n \n # sorted lists of market orders\n master['buyAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'AL', 'min':0})\n master['buyEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'EC', 'min':0})\n master['buyIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'IA', 'min':0})\n master['sellAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'AL', 'max':0})\n master['sellEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'EC', 'max':0})\n master['sellIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'IA', 'max':0})\n \n for res in ['AL', 'EC', 'IA']:\n for sellOrder in master['sell%s' % res]:\n # min sell order gets first chance to sell its product\n if sellOrder.amountUsed == sellOrder.amount:\n pass # seller has sold all he wants with this order\n else:\n i = 0\n for buyOrder in master['buy%s' % res]:\n # determine price, allow for bidding on price\n try:\n nextBuyOrder = master['buy%s' % res][i+1]\n if nextBuyOrder.max < buyOrder.max and (nextBuyOrder.max+1) >= sellOrder.min:\n price = nextBuyOrder.max + 1\n else:\n price = buyOrder.max\n except IndexError:\n price = buyOrder.max\n # max buy order gets first chance to buy sellers product\n resultsList.append(self.processMarketTransaction(buyOrder, sellOrder, price))\n i += 1\n \n # set the average market prices for this round\n if getattr(myMarketStat, 'volSold%s' % res) > 0:\n setattr(myMarketStat, 'avgSold%s' % res, (getattr(myMarketStat, 'sumSold%s' % res) / \n getattr(myMarketStat, 'volSold%s' % res)))\n \n # clean up market orders for next round\n for orderID in self.marketOrders.keys():\n myMarketOrder = self.marketOrders[orderID]\n myMarketOrder.cleanUp()\n if myMarketOrder.amount == 0:\n resultsList.append('cancel market Order=%s' % orderID)\n self.cancelMarketOrder(orderID)\n \n return str(resultsList)\n except:\n return 'galaxy->processMarketOrders error'", "def minimize_risk(data, returns=None, strict=True, riskfree=None, max_alloc=1,\n short_sell=False, scale=1, ret=False, verbose=True, plotit=False):\n logger = logging.getLogger(__name__)\n if ret:\n weekly = data\n else:\n weekly = get_returns(data, 'simple')\n ret = weekly.mean().values * scale\n cov = weekly.cov().values * scale\n if short_sell:\n return pd.DataFrame()\n n = data.shape[1]\n if riskfree is None:\n aloc = pd.DataFrame(columns=np.append(data.columns, ['Volatility','Return']))\n bounds = [(0,max_alloc)]*n\n else:\n ret = np.append(ret, riskfree)\n cov = np.hstack([ np.vstack([cov,np.zeros([1,n])]), np.zeros([n+1,1]) ])\n aloc = pd.DataFrame(columns=np.append(data.columns, ['risk-free','Volatility','Return']))\n bounds = [(0,max_alloc)]*n + [(0,1)]\n n += 1\n if returns is None:\n returns = np.linspace(min(ret),max(ret), 25, endpoint=True)\n\n from scipy.optimize import minimize\n from basic.useful import progress_bar\n def func(alpha):\n def loss(x):\n return x.dot(cov).dot(x)\n def jac(x):\n return cov.dot(x) * 2\n cons1 = {'type':'eq',\n 'fun': lambda x: np.ones(n).dot(x) - 1,\n 'jac': lambda x: np.ones(n)}\n types = 'eq'\n if not strict: types = 'ineq'\n cons2 = {'type':types,\n 'fun': lambda x: ret.dot(x) - alpha,\n 'jac': lambda x: ret}\n x = minimize(loss, np.ones(n)/n, jac=jac, constraints=[cons1,cons2], bounds=bounds, method='SLSQP')\n aloc.loc[alpha, :] = np.append(np.round(x['x'],4), [np.sqrt(x['fun']), ret.dot(x['x'])] )\n return \"\"\n progress_bar(returns, func, disable=not verbose)\n if plotit:\n import matplotlib.pyplot as plt\n from invest.plot import return_vol\n vol = np.sqrt( np.diag(cov) )\n return_vol(ret, vol, data.columns)\n plt.plot(aloc.Volatility*100, aloc.Return*100, '.-')\n sharpe = aloc.Return/aloc.Volatility\n arg = sharpe.argmax()\n plt.plot(aloc.Volatility[arg]*100, aloc.Return[arg]*100, 'rX', markersize=12)\n print(\"Max Sharpe ratio is {:.2f}\".format(sharpe[arg]))\n return aloc.astype(float)", "def minimize(self):\n raise NotImplementedError", "def CodePagesToReachedSize(reached_symbol_names, page_to_symbols):\n reached_symbol_names = set(reached_symbol_names)\n page_to_reached = {}\n for offset in page_to_symbols:\n total_size = sum(x[1] for x in page_to_symbols[offset])\n reached_size = sum(\n size_in_page for (name, size_in_page) in page_to_symbols[offset]\n if name in reached_symbol_names)\n page_to_reached[offset] = {'total': total_size, 'reached': reached_size}\n return page_to_reached", "def analyze(allocs, stackstr, progname, depth, threshold_mallocs, threshold_score):\n if len(allocs) < int(threshold_mallocs):\n # Ignore call sites with too few mallocs\n return []\n analyzed_list = []\n # The set of sizes of allocated objects.\n sizes = set()\n # A histogram of the # of objects allocated of each size.\n size_histogram = defaultdict(int)\n # mallocs - frees (of things allocated in this context)\n actual_footprint = 0\n # max actual_footprint\n peak_footprint = 0\n # index of alloc w/max footprint\n peak_footprint_index = 0\n # sum(mallocs) = the amount of memory used if frees were ignored\n nofree_footprint = 0\n # set of all thread ids used for malloc/free\n tids = set()\n # set of all (currently) allocated objects from this site\n mallocs = set()\n # total number of allocations\n num_allocs = 0\n # was size ever invoked? true iff size was invoked\n size_taken = False\n # true iff all size requests were properly aligned\n all_aligned = True\n # amount of space that would leak if frees were ignored\n would_leak = 0\n for (index, i) in enumerate(allocs):\n # If a size was taken, record this fact and continue.\n if i[\"action\"] == \"S\":\n size_taken = True\n continue\n if len(i[\"stack\"]) < depth:\n continue\n sizes.add(i[\"size\"])\n size_histogram[i[\"size\"]] += 1\n tids.add(i[\"tid\"])\n if i[\"action\"] == \"M\":\n if i[\"reqsize\"] == 0 or i[\"reqsize\"] % 16 != 0:\n # if all_aligned:\n # print(\"FIXME first reqsize not aligned: \" + str(i[\"reqsize\"]))\n all_aligned = False\n num_allocs += 1\n # Compute actual footprint (taking into account mallocs and frees).\n actual_footprint += i[\"size\"]\n if actual_footprint > peak_footprint:\n peak_footprint = actual_footprint\n peak_footprint_index = index\n # Compute total 'no-free' memory footprint (excluding frees) This\n # is how much memory would be consumed if we didn't free anything\n # until the end (as with regions/arenas). We use this to compute a\n # \"region score\" later.\n nofree_footprint += i[\"size\"]\n # Record the malloc so we can check it when freed.\n mallocs.add(i[\"address\"])\n elif i[\"action\"] == \"F\":\n if i[\"address\"] in mallocs:\n # Only reclaim memory that we have already allocated\n # (others are frees to other call sites).\n actual_footprint -= i[\"size\"]\n mallocs.remove(i[\"address\"])\n else:\n would_leak += i[\"size\"]\n # print(mallocs)\n # print(str(i[\"address\"]) + \" not found\")\n # Compute region_score (0 is worst, 1 is best - for region replacement).\n region_score = 0\n if nofree_footprint != 0:\n region_score = peak_footprint / nofree_footprint\n if region_score >= float(threshold_score):\n stk = eval(stackstr)\n output = {\n \"stack\": stk,\n \"allocs\": num_allocs,\n \"region_score\": region_score,\n \"threads\": tids,\n \"sizes\": sizes,\n \"size_histogram\": size_histogram,\n \"peak_footprint\": peak_footprint,\n \"nofree_footprint\": nofree_footprint,\n \"potential_leaks\": would_leak,\n \"size_taken\": size_taken,\n \"all_aligned\": all_aligned,\n }\n analyzed_list.append(output)\n return analyzed_list", "def stocks(values, maxSales):\n return 0", "def calc_performance(self):\n for symbol in self.portfolio.assets.keys():\n\n # Total the Performance of all the trades\n start = self.portfolio.trades[symbol].index[0]\n end = self.portfolio.trades[symbol].index[-1]\n trades = len(self.record[symbol])\n profit = self.record[symbol]['profit'].sum()\n loss = self.record[symbol]['loss'].sum()\n # Total or average the trade info for all the trades\n try:\n wins = len(self.record[symbol].groupby('win/loose').groups['w'])\n except (ValueError, KeyError):\n wins = 0\n try:\n losses = len(self.record[symbol].groupby('win/loose').groups['l'])\n except (ValueError, KeyError):\n losses = 0\n try:\n washes = len(self.record[symbol].groupby('win/loose').groups['-'])\n except (ValueError, KeyError):\n washes = 0\n max_drawdown = self.record[symbol]['drawdown'].max()\n average_drawdown = self.record[symbol]['drawdown'].mean()\n max_drawdown_time = self.record[symbol]['drawdown days'].max()\n average_drawdown_time = self.record[symbol]['drawdown days'].mean()\n # Average the risk and market comparisons for all trades\n vol_risk = self.record[symbol]['volatility'].mean()\n beta = self.record[symbol]['beta'].mean()\n lpm_risk = self.record[symbol]['lpm'].mean()\n e_r = self.record[symbol]['expected_return'].mean()\n # Calculate Risk measures\n treynor_ratio = (e_r - self.risk_free_return) / beta\n sharpe_ratio = (e_r - self.risk_free_return) / vol_risk\n # Package up the data for each symbol\n self.performance[symbol] = {\n 'start': start,\n 'end': end,\n 'trades': trades,\n 'wins': wins,\n 'losses': losses,\n 'washes': washes,\n 'profit': profit,\n 'loss': loss,\n 'net_profit': profit - loss,\n 'profit_factor': profit / loss if loss != 0 else 1.0,\n 'percent_profitable': wins / trades if trades != 0 else 0.0,\n 'average_trade_net_profit' : (profit - loss) / trades if trades != 0 else 0.0,\n 'max_drawdown' : max_drawdown,\n 'average_drawdown' : average_drawdown,\n 'max_drawdown_days' : max_drawdown_time,\n 'average_drawdown_days' : average_drawdown_time,\n 'volatility_risk' : vol_risk,\n 'beta' : beta,\n 'lower_partial_moment_risk' : lpm_risk,\n 't_r' : treynor_ratio,\n 's_r' : sharpe_ratio\n }\n\n return self", "def reduceUniverse(self):\r\n self.bondList = list(set([bond for grid in self.parent.gridList for bond in grid.bondList]))#set removes duplicates\r\n self.df = self.df.reindex(self.bondList)\r\n self.df = self.df[pandas.notnull(self.df['ISIN'])]\r\n self.rfbonds = list(self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers)].index)\r\n self.embondsisins = self.df.loc[~self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']\r\n self.rfbondsisins = self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']", "def find_max_profit(stock_prices,k):\n\teliminated_indices = set()\n\ttotal_profit = 0\n\n\t\n\tfor i in range(0,k):\n\t\tmax_profit = float('-inf')\n\t\tmin_price = float('inf')\n\t\t\n\t\tfor current_index,current_price in enumerate(stock_prices):\n\t\t\t# This condition takes care of note by making sure that \n\t\t\t# prices are not used in previous transaction.\n\t\t\tif current_index not in eliminated_indices:\n\t\t\t\tcurrent_profit = current_price - min_price\n\n\t\t\t\tif (current_profit > max_profit):\n\t\t\t\t\tbuying_price_index = min_price_index\n\t\t\t\t\tselling_price_index = current_index\n\t\t\t\t\tmax_profit = current_profit\n\n\t\t\t\t#min_price = min(min_price, current_price)\n\t\t\t\tif (current_price < min_price):\n\t\t\t\t\tmin_price = current_price\n\t\t\t\t\tmin_price_index = current_index\n\n\n\t\t# This for loop is to take care of Note\n\t\tfor i in range(buying_price_index,selling_price_index+1):\n\t\t\teliminated_indices.add(i)\n\n\t\ttotal_profit += max_profit\n\t\tprint('buying_price_index :',buying_price_index)\n\t\tprint(\"selling_price_index :\",selling_price_index)\n\n\treturn total_profit", "def budget_analysis(models, blocks, num_sweeps, sample_sizes, data, K, CUDA, device, batch_size=100):\n result_flags = {'loss_required' : False, 'ess_required' : True, 'mode_required' : False, 'density_required': True}\n\n ess = []\n density = []\n num_batches = int((data.shape[0] / batch_size))\n metrics = {'block' : [], 'num_sweeps' : [], 'sample_sizes' : [], 'ess' : [], 'density' : []}\n for block in blocks:\n for i in range(len(num_sweeps)):\n metrics['block'].append(block)\n time_start = time.time()\n num_sweep = int(num_sweeps[i])\n sample_size = int(sample_sizes[i])\n metrics['num_sweeps'].append(num_sweep)\n metrics['sample_sizes'].append(sample_size)\n resampler = Resampler(strategy='systematic',\n sample_size=sample_size,\n CUDA=CUDA,\n device=device)\n ess, density = 0.0, 0.0\n for b in range(num_batches):\n x = data[b*batch_size : (b+1)*batch_size].repeat(sample_size, 1, 1, 1)\n if CUDA:\n x = x.cuda().to(device)\n trace = apg_objective(models, x, result_flags, num_sweeps=num_sweep, block=block, resampler=resampler)\n ess += trace['ess'][-1].mean().item()\n density += trace['density'][-1].mean().item()\n metrics['ess'].append(ess / num_batches / sample_size)\n metrics['density'].append(density / num_batches)\n time_end = time.time()\n print('block=%s, num_sweep=%d, sample_size=%d completed in %ds' % (block, num_sweep, sample_size, time_end-time_start))\n return pd.DataFrame.from_dict(metrics)", "def create_get_average_price_request(self, symbol: str) -> Request:", "def _iter_assignments_by_transfer_sizes(self, worker_quotas, input_chunk_metas):\n total_transfers = dict((k, sum(v.chunk_size for v in chunk_to_meta.values()))\n for k, chunk_to_meta in input_chunk_metas.items())\n # operands with largest amount of data will be allocated first\n sorted_chunks = sorted(total_transfers.keys(), reverse=True,\n key=lambda k: total_transfers[k])\n for op_key in sorted_chunks:\n # compute data amounts held in workers\n worker_stores = defaultdict(lambda: 0)\n for meta in input_chunk_metas[op_key].values():\n for w in meta.workers:\n worker_stores[w] += meta.chunk_size\n\n max_size, max_workers = self._get_workers_with_max_size(worker_stores)\n if max_workers and max_size > 0.5 * total_transfers[op_key]:\n max_worker = random.choice(max_workers)\n if worker_quotas.get(max_worker, 0) <= 0:\n continue\n worker_quotas[max_worker] -= 1\n yield op_key, max_worker", "def _assign_sizes(self):", "def add_stock(self, symbol):\n verbose_message(\"Adding \" + symbol + \"...\")\n if symbol not in self.stocks:\n self.stocks += [symbol]\n\n data = StockData()\n\n data.name = StockDataCollection.get_stock_name(symbol)\n data.symbol = symbol\n data.market = StockDataCollection.get_market_data(symbol,\n str(self.start_date)[:USEFUL_TIMESTAMP_CHARS],\n str(self.end_date)[:USEFUL_TIMESTAMP_CHARS])\n\n # create a list of dates in the YYYY-MM-DD format\n data.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(data.market.index)]\n data.dates = data.market.index\n\n for i in data.dates:\n if i not in self.dates:\n self.dates += [i]\n self.dates.sort()\n self.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(self.dates)]\n\n for collection_function in self.features:\n collection_function(data)\n\n data.position = []\n for _ in data.dates:\n data.position += [0]\n if type(self.cash) is not pd.DataFrame:\n self.cash += [self.starting_capital]\n\n data.position = pd.DataFrame({\"Position\": data.position}).set_index(data.dates)\n if type(self.cash) is not pd.DataFrame:\n self.cash = pd.DataFrame({\"cash\": self.cash}).set_index(data.dates)\n debug_message(data)\n self.shuffled_data_reset()\n self.stock_data[symbol] = data", "def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored", "def costFun(self, S, x):", "def iex_equities(symbols):\n # strict this in memory so that we can reiterate over it\n symbols = tuple(symbols)\n\n def ingest(environ,\n asset_db_writer,\n minute_bar_writer, # ignored\n daily_bar_writer,\n adjustment_writer,\n calendar,\n start_session, # ignored\n end_session, # ignored\n cache,\n show_progress,\n output_dir):\n\n metadata = pd.DataFrame(np.empty(len(symbols), dtype=[\n ('start_date', 'datetime64[ns]'),\n ('end_date', 'datetime64[ns]'),\n ('auto_close_date', 'datetime64[ns]'),\n ('symbol', 'object'),\n ]))\n\n today = datetime.today()\n start = datetime(today.year-5,today.month,today.day)\n \n def _pricing_iter():\n sid = 0\n with maybe_show_progress(\n symbols,\n show_progress,\n label='Downloading IEX pricing data: ') as it, \\\n requests.Session() as session:\n for symbol in it:\n path = _cachpath(symbol, 'ohlcv')\n try:\n df = cache[path]\n except KeyError:\n df = cache[path] = get_historical_data(symbol, start=start, end=None, output_format='pandas').sort_index()\n df.index = pd.to_datetime(df.index)\n # the start date is the date of the first trade and\n # the end date is the date of the last trade\n start_date = df.index[0]\n end_date = df.index[-1]\n # The auto_close date is the day after the last trade.\n ac_date = end_date + pd.Timedelta(days=1)\n metadata.iloc[sid] = start_date, end_date, ac_date, symbol\n\n df.rename(\n columns={\n 'Open': 'open',\n 'High': 'high',\n 'Low': 'low',\n 'Close': 'close',\n 'Volume': 'volume',\n },\n inplace=True,\n )\n yield sid, df\n sid += 1\n\n daily_bar_writer.write(_pricing_iter(), show_progress=True)\n\n metadata['exchange'] = \"NYSE\"\n \n symbol_map = pd.Series(metadata.symbol.index, metadata.symbol)\n asset_db_writer.write(equities=metadata)\n\n adjustment_writer.write()\n\n return ingest", "def inout_creator(df = pd.DataFrame(), features='datosrahm.csv'):\r\n df = df\r\n \r\n start=time.time()\r\n \r\n datos=pd.read_csv(features)\r\n datos=datos.fillna(-1)\r\n\r\n dicc=dict(datos[['Symbol','Z']].values)\r\n\r\n dicc['D']=1\r\n dicc['Bk']=97\r\n dicc['Cf']=98\r\n dicc['Es']=99\r\n dicc['Fm']=100\r\n dicc['Md']=101\r\n dicc['No']=102\r\n dicc['Lr']=103\r\n \r\n max_sitios = max(df['sitios'].values)\r\n \r\n X=np.zeros((len(df),max_sitios,104))\r\n\r\n mult=np.zeros((len(df),max_sitios))\r\n wyckmul=np.load('support/WyckoffSG_dict.npy').item()['wyckmul']\r\n \r\n todelete = list()\r\n \r\n for row in range(len(df)):\r\n item=df['WyckOcc'][row]\r\n sitios=list(item.values()) \r\n sitocc=np.zeros((len(sitios),104)) \r\n spacegroup = str(df['sgnum'][row]).zfill(3)\r\n \r\n try:\r\n \r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n sitios] for i in j]\r\n \r\n except:\r\n print(row)\r\n print('There exists an error concerning with the space group of CIF ', df['cif'][row],'\\n')\r\n print('Please check in www.crystallography.net to provide the correct space group number of that CIF',\r\n '\\n','\\n')\r\n spacegroup=input('Give me the correct spacegroup:'+'\\n'+'\\n')\r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n list(df['WyckOcc'][row].values())] for i in j]\r\n \r\n occs=[]\r\n for i in range(len(sitios)):\r\n\r\n for j in list(sitios[i].values()):\r\n \r\n ocupacion=np.array(list(j.values()))\r\n llaves=[llave.replace('+','').replace('-','').replace('1',\r\n '').replace('2','').replace('3','').replace('4',\r\n '') for llave in np.array(list(j.keys()))]\r\n llaves=[llave.replace('.','') for llave in llaves]\r\n llaves=[llave.replace('5','').replace('6','').replace('7',\r\n '').replace('8','').replace('9','').replace('0',\r\n '') for llave in llaves]\r\n vector=np.zeros((1,104))\r\n occs=[sum(ocupacion)]+occs\r\n \r\n try:\r\n \r\n idx=[dicc[k] for k in llaves]\r\n \r\n except:\r\n print('The compound with the cif ', df['cif'][row], ' will be deleted')\r\n print('The database will be updated')\r\n todelete += [row]\r\n \r\n for k in idx:\r\n vector[0][k-1] = ocupacion[idx.index(k)]\r\n \r\n sitocc[i]=vector\r\n \r\n while sitocc.shape[0] != max_sitios:\r\n sitocc=np.concatenate((np.zeros((1,104)),sitocc))\r\n s=[0]+s\r\n \r\n X[row,:,:]=sitocc\r\n mult[row]=s\r\n \r\n features=datos.iloc[:,2:].values\r\n x=X[:,:,:96]\r\n \r\n fracsum = np.expand_dims(np.sum(x,axis=2), axis=2)\r\n \r\n x=np.dot(x,features) \r\n \r\n x = np.delete(x, todelete,axis=0)\r\n df = df.drop(df.index[todelete]).reset_index(drop=True)\r\n \r\n print('inout_creator lasted ',round(time.time()-start,2),' s') \r\n return x, fracsum, df", "def _get_position_sizes_dollars(self, sizes):\n if isinstance(sizes, dict):\n sizes = pd.Series(sizes)\n return (sizes * self.booksize).to_dict()", "def compute_increase_rate(input_data):\n rates = {}\n for comp in input_data:\n stock_prices = input_data[comp][1]\n rates[comp] = []\n for i in range(len(stock_prices)-1):\n # Add a new increase rate to the dictionary\n rates[comp].append((stock_prices[i] - stock_prices[i+1])/stock_prices[i+1])\n return rates", "def onAllocation(self, match):\n\t\tres=None\n\t\tsymbols=self.process(match[2])\n\t\trest=self.process(match[3])\n\t\tvalue=self.access(self.process(match[4]), 1)\n\t\tif ((len(symbols) == 1) and (not rest)):\n\t\t\tslot=F._slot(symbols[0].getReferenceName())\n\t\t\tres = [F.allocate(slot, value)]\n\t\telif True:\n\t\t\tres = []\n\t\t\tlast_symbol=symbols[-1]\n\t\t\tpivot_slot=F._slot(last_symbol.getReferenceName())\n\t\t\tres.append(F.allocate(pivot_slot, value))\n\t\t\tslot_value=F.resolve(F._ref(pivot_slot.getName()))\n\t\t\ti=0\n\t\t\tfor s in symbols:\n\t\t\t\tslot=F._slot(s.getReferenceName())\n\t\t\t\tsub_value=F.access(slot_value.copy(), F._number(i))\n\t\t\t\tif (s is last_symbol):\n\t\t\t\t\tres.append(F.assign(s.getReferenceName(), sub_value))\n\t\t\t\telif True:\n\t\t\t\t\tres.append(F.allocate(slot, sub_value))\n\t\t\t\ti = (i + 1)\n\t\t\tif rest:\n\t\t\t\tslot=F._slot(rest.getReferenceName())\n\t\t\t\tsub_value=F.slice(slot_value.copy(), i)\n\t\t\t\tres.append(F.allocate(slot, sub_value))\n\t\treturn res", "def main():\n href_list = fundlist.get_fund_list()\n\n single_values = None\n asset_allocations = None\n geo_allocations = None\n sector_allocations = None\n top10_holdings = None\n\n for href in href_list:\n url = 'http://idata.fundata.com' + href\n fund_profile = FundProfileScraper(url)\n\n value_dict = fund_profile.scrape_all_single_value()\n if single_values is None:\n single_values = pd.DataFrame([value_dict.values()],\n columns=value_dict.keys())\n else:\n temp_df = pd.DataFrame(value_dict.values(),\n columns=value_dict.keys())\n single_values.append(temp_df)\n\n asset_allocation_list = fund_profile.scrape_asset_allocation()\n allocations_with_href = [[href, asset_class]\n for asset_class in asset_allocation_list]\n if asset_allocations is None:\n asset_allocations = pd.DataFrame(\n allocations_with_href,\n columns=['href', 'asset_allocation']\n )\n else:\n temp_df = pd.DataFrame(\n allocations_with_href,\n columns=['href', 'asset_allocation']\n )\n asset_allocations.append(temp_df)\n\n\n geo_allocations_list = fund_profile.scrape_geo_allocation()\n geo_allocations_href = [[href, geo_class]\n for geo_class in geo_allocations_list]\n if geo_allocations is None:\n geo_allocations = pd.DataFrame(\n geo_allocations_href,\n columns=['href', 'geo_allocation']\n )\n else:\n temp_df = pd.DataFrame(\n geo_allocations_href,\n columns=['href', 'geo_allocation']\n )\n geo_allocations.append(temp_df)\n\n sector_allocations_list = fund_profile.scrape_sector_allocation()\n sector_allocations_href = [[href, sector_class]\n for sector_class in sector_allocations_list]\n if sector_allocations is None:\n sector_allocations = pd.DataFrame(\n sector_allocations_href,\n columns=['href', 'sector_allocation']\n )\n else:\n temp_df = pd.DataFrame(\n sector_allocations_href,\n columns=['href', 'sector_allocation']\n )\n sector_allocations.append(temp_df)\n\n top10_holding_list = fund_profile.scrape_top10_holdings()\n top10_holding_href = [[href, holding]\n for holding in top10_holding_list]\n if top10_holdings is None:\n top10_holdings = pd.DataFrame(\n top10_holding_href,\n columns=['href', 'holding']\n )\n else:\n temp_df = pd.DataFrame(\n top10_holding_href,\n columns=['href', 'holding']\n )\n top10_holdings.append(temp_df)\n\n time.sleep(randint(1, 5))\n\n single_values.to_pickle('./single_values.pkl')\n asset_allocations.to_pickle('./asset_allocations.pkl')\n geo_allocations.to_pickle('./geo_allocations.pkl')\n sector_allocations.to_pickle('sector_allocations.pkl')\n top10_holdings.to_pickle('top10_holdings.pkl')", "def _get_allocations_for_block(cls, block_trade, allocations):\n applicable_allocations = list()\n price = block_trade.Price()\n quantity = block_trade.Quantity()\n for allocation in allocations:\n if allocation.TrxTrade():\n continue\n if round(allocation.Price(), 2) != round(price, 2):\n continue\n if abs(allocation.Quantity()) > abs(quantity):\n continue\n if allocation.Portfolio() is not block_trade.Portfolio():\n continue\n if allocation.Instrument().Name() != block_trade.Instrument().Name():\n continue\n applicable_allocations.append(allocation)\n\n return applicable_allocations", "def ComputeRegenerativeBraking(self):\r\n pass", "def runApriori(data_iter, minSupport, minConfidence, tidToGranularityTid, granularityMin, maxK=99999999, removeImposibleImply=True, topFeaturesSet=set()):\n itemSet, transactionList, timestampList, itemTimestampIndexDict, granularityTimestampList = getItemSetTransactionList(data_iter, granularityMin)\n dateStampSet = set()\n for timestamp in timestampList:\n timestamp = dt.datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d')\n dateStampSet.add(timestamp)\n\n freqSet = defaultdict(int)\n largeSet = dict()\n # Global dictionary which stores (key=n-itemSets,value=support)\n # which satisfy minSupport\n\n assocRules = dict()\n # Dictionary which stores Association Rules\n\n start_ts = time.time()\n oneCSet = returnItemsWithMinSupport(itemSet,\n transactionList,\n timestampList,\n itemTimestampIndexDict,\n dateStampSet,\n minSupport,\n freqSet,\n tidToGranularityTid,\n granularityTimestampList)\n end_ts = time.time()\n print \"# LEN-1: \" + str(len(oneCSet))\n print \"Time: \" + str(end_ts - start_ts)\n\n currentLSet = oneCSet\n k = 2\n while(currentLSet != set([]) and k <= maxK):\n start_ts = time.time()\n largeSet[k - 1] = currentLSet\n if k == 2:\n currentLSet = joinSet(currentLSet, k, topFeaturesSet, True)\n else:\n currentLSet = joinSet(currentLSet, k, topFeaturesSet, True)\n currentCSet = returnItemsWithMinSupport(currentLSet,\n transactionList,\n timestampList,\n itemTimestampIndexDict,\n dateStampSet,\n minSupport,\n freqSet,\n tidToGranularityTid,\n granularityTimestampList)\n end_ts = time.time()\n print \"# LEN-%d: \" % (k) + str(len(currentCSet))\n print \"Time: \" + str(end_ts - start_ts)\n\n currentLSet = currentCSet\n k = k + 1\n\n def getSupport(item):\n \"\"\"local function which Returns the support of an item\"\"\"\n # return float(freqSet[item])/len(transactionList)\n return float(freqSet[item]) / len(dateStampSet)\n\n toRetItems = []\n for key, value in largeSet.items():\n toRetItems.extend([(tuple(item), getSupport(item))\n for item in value])\n toRetRules = []\n for key, value in largeSet.items()[1:]:\n for item in value:\n _subsets = map(frozenset, [x for x in subsets(item)])\n for element in _subsets:\n hasTime = False\n for i in element:\n if 'time_daily' in i:\n hasTime = True\n break\n if hasTime:\n remain = item.difference(element)\n if len(remain) > 0 and isImplyImpossible(element, remain):\n support = getSupport(item)\n confidence = support / getSupport(element)\n # confidence = getSupport(item)/getSupport(element)\n if confidence >= minConfidence:\n toRetRules.append(((tuple(element), tuple(remain)),\n confidence, support))\n return toRetItems, toRetRules, itemTimestampIndexDict, timestampList", "def utilization(allocs, n):\n assert n <= len(allocs)\n pages = defaultdict(int)\n in_use = 0\n mallocs = set()\n for (index, i) in enumerate(allocs):\n if index > n:\n break\n # Ignore objects larger than 1/2 the page size.\n if i[\"size\"] > Cheaper.__pagesize / 2:\n continue\n pageno = Cheaper.__pagesize * (i[\"address\"] // Cheaper.__pagesize)\n if i[\"action\"] == \"M\":\n mallocs.add(i[\"address\"])\n pages[pageno] += 1\n in_use += i[\"size\"]\n elif i[\"action\"] == \"F\":\n in_use -= i[\"size\"]\n if i[\"address\"] in mallocs:\n mallocs.remove(i[\"address\"])\n pages[pageno] -= 1\n if pages[pageno] == 0:\n del pages[pageno]\n if len(pages) > 0:\n return in_use / (Cheaper.__pagesize * len(pages))\n else:\n return 0", "def _shrink(self):\n self.capacity = round(self.capacity / self.factor)\n temp = [None] * self.capacity\n for i in range(self.capacity):\n temp[i] = self.store[i]\n self.store = temp", "def usefulquantities(dffin):\n dffin['log_length_box'] = np.log(dffin['length_box_um'])\n dffin['time_min']=dffin['time_sec']/60\n dffin['pred_length_box_um'] = np.exp(dffin['pred_log_length'])\n dffin['unique_id'] = dffin['cell']+dffin['time_sec'].apply(lambda x:str(x))\n dffin['cv_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x:\\\n np.std(x)/np.mean(x))\n dffin['std_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.std(x))\n dffin['mean_gr'] = dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin['mean_len'] = dffin.groupby('cell')['pred_length_box_um'].transform(lambda x: np.mean(x))\n dffin['norm_pred_growth_rate'] = (dffin['pred_growth_rate']-dffin.groupby('cell')['pred_growth_rate'].transform(lambda\\\n x: np.mean(x)))/dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin = rl.genalogy(dffin,'parent_cell') #Create genealogy\n dffin = rl.genalogy(dffin,'g_parent_cell')\n dffin = rl.genalogy(dffin,'g_g_parent_cell')\n dffin = dffin.set_index('unique_id')\n qq= dffin.groupby('cell').apply(lambda x: (x['pred_length_box_um']-x['pred_length_box_um'].iloc[0])/(x['pred_length_box_um'].iloc[-1]-x['pred_length_box_um'].iloc[0])).rename('add_len')\n jj= dffin.groupby('cell').apply(lambda x: (x['time_sec']-x['time_sec'].iloc[0])/(x['time_sec'].iloc[-1]-x['time_sec'].iloc[0])).rename('cell_cycle')\n return pd.concat([dffin, qq.reset_index().set_index('unique_id')['add_len'], jj.reset_index().set_index('unique_id')['cell_cycle']], axis=1, join='inner')", "def quantify(self, instances):\n ...", "def setSymbolSize(x=30):\n dislin.hsymbl(x)", "def update_data(self, stocks=None):\n if stocks is None:\n self.collect_all_stock_data()\n return\n for stock in stocks:\n self.add_stock(stock)", "async def update_trade_sizes(self):\n\n if config['trade_size_mult'] is None:\n return\n\n trade_sizes = await self._get_num_open_group_trades()\n for group, num in trade_sizes.items():\n trade_size = config['trade_min_size'] * config['trade_size_mult'] * (num + 1)\n if trade_size > config['trade_max_size']: trade_size = config['trade_max_size']\n if trade_size < config['trade_min_size']: trade_size = config['trade_min_size']\n\n old_trade_size = self.trade_sizes[group]\n self.trade_sizes[group] = trade_size\n if not math.isclose(old_trade_size, trade_size):\n self.log.info(\"Group '{}' trade size updated to {}.\", group, trade_size)", "def calc():\n global last_free, iteration, pin\n mf = gc.mem_free()\n if False and last_free < mf:\n print(\">>> ran gc, iteration=\", iteration)\n last_free = mf\n # allocate memory\n x = \"abc\" + str(iteration)\n pin(not pin())\n sleep_ms(2)", "def minimize(state_num, accepted_list, alphabet, list_of_dict, chart_list):\n\n indistinguishable = []\n\n for state_one in range(state_num):\n for state_two in range(state_num):\n if state_one != state_two:\n for elem in alphabet:\n LOGF.write(f\"TEST : [{state_one}, {state_two}] \")\n LOGF.write(f\"WITH {elem} \")\n next_one = list_of_dict[state_one][elem]\n next_two = list_of_dict[state_two][elem]\n LOGF.write(f\" {state_one} --> {next_one} \")\n LOGF.write(f\" {state_two} --> {next_two}\\n\")\n if str(next_one) in accepted_list and str(next_two) not in accepted_list:\n LOGF.write(f\"FLAG : [{state_one}, {state_two}] WITH {elem} \"\n f\"{state_one} --> {next_one} \"\n f\"{state_two} --> {next_two}\\n\\n\")\n chart_list[state_one][state_two] = 'X'\n chart_list[state_two][state_one] = 'X'\n chartprint(chart_list)\n if str(next_one) not in accepted_list and str(next_two) in accepted_list:\n LOGF.write(f\"FLAG : [{state_one}, {state_two}] WITH {elem} \"\n f\"{state_one} --> {next_one} \"\n f\"{state_two} --> {next_two}\\n\\n\")\n chart_list[state_one][state_two] = 'X'\n chart_list[state_two][state_one] = 'X'\n chartprint(chart_list)\n\n i = 0\n while i < state_num:\n j = 0\n while j < state_num:\n if chart_list[i][j] == '_' and chart_list[j][i] == '_' and i != j:\n if str(i) not in accepted_list and str(j) not in accepted_list:\n indistinguishable.append([i, j])\n j = j +1\n i = i + 1\n\n result = []\n for elem in indistinguishable:\n if elem not in result and elem.reverse() not in result:\n result.append(elem)\n LOGF.write(f\"INDISTINGUISHABLE: {elem}\\n\")\n print(f\"INDISTINGUISHABLE: {elem}\")\n if result:\n LOGF.write(f\"INPUT DFA IS NOT MINIMIZED\\n\\n\")\n print(\"\\nINPUT DFA IS NOT MINIMIZED\\n\")\n print(\"NEW MINIMIZED DFA:\")\n minimize_out(state_num, accepted_list, alphabet, list_of_dict, result)\n else:\n LOGF.write(f\"INPUT DFA IS ALREADY MINIMIZED\\n\\n\")\n print(f\"INPUT DFA IS MINIMIZED DFA\")", "def eg_sk():\n\n rxs = []\n a = []\n b = []\n c = []\n d = []\n e = []\n f = []\n g = []\n h = []\n i = []\n j = []\n\n for _ in range(1000):\n a.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n b.append(utils.gaussian(10.1, 1))\n\n for _ in range(1000):\n c.append(utils.gaussian(20, 1))\n\n for _ in range(1000):\n d.append(utils.gaussian(30, 1))\n\n for _ in range(1000):\n e.append(utils.gaussian(30.1, 1))\n\n for _ in range(1000):\n f.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n g.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n h.append(utils.gaussian(40, 1))\n\n for _ in range(1000):\n i.append(utils.gaussian(40, 3))\n\n for _ in range(1000):\n j.append(utils.gaussian(10, 1))\n\n for k, v in enumerate([a, b, c, d, e, f, g, h, i, j]):\n rxs.append(creation.RX(v, \"rx{}\".format(k)))\n\n for rx in stats.tiles(stats.scottKnot(rxs)):\n print(\"\", rx[\"rank\"], rx[\"name\"], rx[\"show\"], sep=\"\\t\")", "def UpdateS1(i):\n Sum = 0.0\n for j in range(q):\n Sum1 = Kernel(i, int(WorkingSet[j,0]))\n Sum = Sum + (Difference[j])*y_train[int(WorkingSet[j,0])]*Sum1\n s1[i] = s1[i] + Sum\n return s1[i]", "def _greedy_packing(items: List[Item], cap: int,\n func: Callable) -> Tuple[Set[int], int]:\n items.sort(key=func)\n included = set()\n total_val, total_weight = 0, 0\n for item in items:\n if total_weight + item.weight > cap:\n continue\n included.add(item.idx)\n total_val += item.val\n total_weight += item.weight\n return included, total_val\n # Running time complexity: O(nlog n)", "def _remove_small_probabilities(pool):\n keys_to_remove = [key for key, value in pool.items() if value < 1e-8]\n for key in keys_to_remove:\n pool.pop(key)", "def oswmem_free_memory(self,min=0): \n result = self.df[self.df['free mmemory'] > min].all \n return result", "def update_bars(self):\n for s in self.symbol_list:\n try:\n bar = next(self._get_new_bar(s))\n except StopIteration:\n self.continue_backtest = False\n else:\n if bar is not None:\n self.latest_symbol_data[s].append(bar)\n self.events.put(MarketEvent())", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def _compute_order_priorities_stats(self, orders):\n order_prices = {}\n tab_limits = {}\n tab_demands = {}\n total_fulfilled_prices = Counter()\n valid_statuses = set([Order.STATUS_OPEN, Order.STATUS_FULFILLED])\n\n for order in orders:\n if order.status not in valid_statuses:\n bodega_value_error(\n log,\n ('Order %s status %s is not valid for computing '\n 'price-based priority') % (order, order.status))\n\n order_price = 0.0\n if not order.maintenance:\n # We currently assume that each user has a single tab,\n # but this may change in the future.\n if order.tab.sid not in tab_limits:\n tab_limits[order.tab.sid] = order.tab.limit\n\n if order.tab.sid not in tab_demands:\n tab_demands[order.tab.sid] = 0.0\n\n # Compute order price as a sum of its items' prices.\n item_prices = \\\n self.item_tools.get_prices_for_items(order.items.items())\n order_price = sum(item_prices.values())\n\n if order.status == Order.STATUS_FULFILLED:\n total_fulfilled_prices[order.tab.id] += order_price\n\n tab_demands[order.tab.sid] += order_price\n\n log.debug('Order %s has a price of %s' % (order, order_price))\n order_prices[order.sid] = order_price\n\n total_tab_limit = sum(tab_limits.values())\n\n # Generate a list of tab_demands / tab_limit to compute the median\n # demand\n tab_demand_per_limit = sorted(\n [tab_demands[key] / tab_limits[key]\n for key in tab_demands])\n\n if total_tab_limit < 0:\n bodega_value_error(\n log,\n 'Total tab limit is negative: %s' % total_tab_limit)\n elif total_tab_limit == 0:\n if orders:\n bodega_value_error(\n log,\n ('Total tab limit is 0 for non-empty list of orders. '\n 'This may be due to a race condition in between the time '\n 'we collect the tab ids and fetch their limits.'))\n median_demand = None\n else:\n median_demand = statistics.median(tab_demand_per_limit)\n\n order_priority_stats = {\n 'median_demand': median_demand,\n 'order_prices': order_prices,\n 'tab_limits': tab_limits,\n 'total_fulfilled_prices': dict(total_fulfilled_prices)\n }\n\n log.debug('Order priority stats: %s' % order_priority_stats)\n return order_priority_stats", "def calculate_production_bonus(self, number_of_improvements, max_slots):\n pass", "def agregarprecios(obras):\n costos = lt.newList(datastructure='ARRAY_LIST')\n costototal = 0\n z = 1\n while z <= lt.size(obras):\n costofinal = 0\n pesofinal = 0\n costo_area = 0\n costo_volumen = 0\n if lt.getElement(obras,z)['Weight (kg)'] != '':\n costofinal = 72.00 * float(lt.getElement(obras,z)['Weight (kg)'])\n pesofinal += float(lt.getElement(obras,z)['Weight (kg)'])\n if lt.getElement(obras,z)['Diameter (cm)'] != '':\n costo_area = 72.00 * ((2 * 3.1416 * (float(lt.getElement(obras,z)['Diameter (cm)'])/2) * float(lt.getElement(obras,z)['Diameter (cm)']) + 2 * 3.1416 * ((float(lt.getElement(obras,z)['Diameter (cm)'])/2) ** 2))/10000)\n elif (lt.getElement(obras,z)['Height (cm)'] != '') and (lt.getElement(obras,z)['Depth (cm)'] != '') and (lt.getElement(obras,z)['Width (cm)'] != ''):\n costo_area = 72.00 * (((2 * float(lt.getElement(obras,z)['Height (cm)']) * (float(lt.getElement(obras,z)['Depth (cm)']) + float(lt.getElement(obras,z)['Width (cm)']))) + (2 * float(lt.getElement(obras,z)['Depth (cm)']) * float(lt.getElement(obras,z)['Width (cm)'])))/10000)\n elif (lt.getElement(obras,z)['Height (cm)'] != '') and (lt.getElement(obras,z)['Width (cm)'] != ''):\n costo_area = 72.00 * ((float(lt.getElement(obras,z)['Width (cm)']) * float(lt.getElement(obras,z)['Height (cm)']))/10000)\n if (lt.getElement(obras,z)['Diameter (cm)'] != '') and (lt.getElement(obras,z)['Height (cm)'] != ''):\n costo_volumen = 72.00 * (((3.1416 * (float(lt.getElement(obras,z)['Diameter (cm)'])/2) ** 2) * (float(lt.getElement(obras,z)['Height (cm)'])))/1000000)\n elif (lt.getElement(obras,z)['Height (cm)'] != '') and (lt.getElement(obras,z)['Depth (cm)'] != '') and (lt.getElement(obras,z)['Width (cm)'] != ''):\n costo_volumen = 72.00 * ((float(lt.getElement(obras,z)['Width (cm)']) * float(lt.getElement(obras,z)['Height (cm)']) * float(lt.getElement(obras,z)['Depth (cm)']))/1000000)\n if costo_area > costofinal:\n costofinal = costo_area\n if costo_volumen > costofinal:\n costofinal = costo_volumen\n if costofinal == 0:\n costofinal = 48.00\n lt.addLast(costos,lt.newList('ARRAY_LIST'))\n lt.addLast(lt.getElement(costos,z),lt.getElement(obras,z))\n lt.addLast(lt.getElement(costos,z),costofinal)\n costototal += costofinal\n z += 1\n return (costos,costototal,pesofinal)", "def standardComposition_Min(self):\n self.rulesList = []\n\n self.rulesList.append(np.fmin(self.rule1,self.below_price))\n self.rulesList.append(np.fmin(self.rule2,self.below_price))\n self.rulesList.append(np.fmin(self.rule3,self.below_price))\n self.rulesList.append(np.fmin(self.rule4,self.standard_price))\n self.rulesList.append(np.fmin(self.rule5,self.standard_price))\n self.rulesList.append(np.fmin(self.rule6,self.standard_price))\n self.rulesList.append(np.fmin(self.rule7,self.above_price))\n self.rulesList.append(np.fmin(self.rule8,self.above_price))\n self.rulesList.append(np.fmin(self.rule9,self.above_price))\n self.rulesList.append(np.fmin(self.rule10,self.high_price))\n self.rulesList.append(np.fmin(self.rule11,self.high_price))", "def update_graph_compound_costs(self):\n\n # # # Check if all costs are available\n if not self.compound_costs_solved:\n unsolved_cmp = [key for key, _ in self.compound_costs.items()]\n raise RuntimeError(\"The following cmp have no cost assigned:\\n\" + str(unsolved_cmp) +\n \"\\nReconsider the starting conditions.\")\n # # # Reset unique_iterator_list as graph changes\n self._reset_iterator_memory()\n for node in self.compound_costs.keys():\n # # # Loop over all edges of compound and manipulate weight\n for target_node, attributes in self.graph_handler.graph[node].items():\n required_compound_costs = np.asarray([self.compound_costs[k] for k in attributes['required_compounds']])\n tot_required_compound_costs = np.sum(required_compound_costs)\n # # # Set required compound costs in edge\n self.graph_handler.graph.edges[node,\n target_node]['required_compound_costs'] = tot_required_compound_costs\n # # # Add required compound costs to weight\n self.graph_handler.graph.edges[node, target_node]['weight'] += tot_required_compound_costs", "def open_orders_for(self, symbol, **kwargs):\n pass", "def minimize_individual_blocks(self):\n i = len(self.blocks) - 1\n while i >= 0:\n u, v = self.blocks[i].bounds\n Lexical.shrink(\n self.shrink_target.buffer[u:v],\n lambda b: self.try_shrinking_blocks((i,), b),\n random=self.random,\n full=False,\n )\n i -= 1", "def gen_910AQ_SO3H():\r\n q_smiles_base = {}\r\n q_smiles_mid = {}\r\n\r\n q_smiles_base['9,10AQ'] = 'O=C1C2C=CC=CC2C(=O)C2=C1C=CC=C2'\r\n q_smiles_base['9,10AQ,1-OH'] = 'OS(=O)(=O)C1=CC=CC2C1C(=O)C1=C(C=CC=C1)C2=O'\r\n q_smiles_base['9,10AQ,2-OH'] = 'OS(=O)(=O)C1=CC2C(C=C1)C(=O)C1=C(C=CC=C1)C2=O'\r\n q_smiles_base['9,10AQ,Full-OH'] = 'OS(=O)(=O)C1=C(C(=C(C2C1C(=O)C1=C(C2=O)C(=C(C(=C1S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O'\r\n\r\n q_smiles_mid['9,10AQ'] = 'O=C1C2C=CC=CC2C(=O)C2=C1C=CC=C2'\r\n q_smiles_mid['9,10AQ,1-OH'] = 'OS(=O)(=O)C1=CC=CC2C1C(=O)C1=C(C=CC=C1)C2=O'\r\n q_smiles_mid['9,10AQ,2-OH'] = 'OS(=O)(=O)C1=CC2C(C=C1)C(=O)C1=C(C=CC=C1)C2=O'\r\n q_smiles_mid['9,10AQ,1,2-OH'] = 'OS(=O)(=O)C1=C(C2C(C=C1)C(=O)C1=C(C=CC=C1)C2=O)S(O)(=O)=O'\r\n q_smiles_mid['9,10AQ,Full-OH'] = 'OS(=O)(=O)C1=C(C(=C(C2C1C(=O)C1=C(C2=O)C(=C(C(=C1S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O'\r\n\r\n return q_smiles_base, q_smiles_mid", "def process_lim(pool_lim, area):\n\n pool_nolim = [] # No limitation\n pool_lim_n = [] # N limitation\n pool_lim_p = [] # P limitation\n # Colimitation driven by N (When the realized NPP allocation is smaller\n # thant the potential due to N but the other element is also limitant)\n pool_colim_n = []\n # Colimitation driven by P (When the realized NPP allocation is smaller\n # than the potential due to P but the other element is also limitant\n pool_colim_p = []\n # Real Colimitation = K <= 1D-6 (K is difference between P and N realized NPP allocation)\n pool_colim_np = []\n\n ndays = pool_lim.shape[1]\n npls = pool_lim.shape[0]\n\n for pls in range(npls):\n if area[pls]:\n no_lim = (pool_lim[pls, :] == 0).sum() / ndays * area[pls]\n lim_n = (np.count_nonzero(\n pool_lim[pls, :] == 1) / ndays) * area[pls]\n lim_p = (np.count_nonzero(\n pool_lim[pls, :] == 2) / ndays) * area[pls]\n colim_n = (np.count_nonzero(\n pool_lim[pls, :] == 4) / ndays) * area[pls]\n colim_p = (np.count_nonzero(\n pool_lim[pls, :] == 5) / ndays) * area[pls]\n colim_np = (np.count_nonzero(\n pool_lim[pls, :] == 6) / ndays) * area[pls]\n\n pool_nolim.append(no_lim)\n pool_lim_n.append(lim_n)\n pool_lim_p.append(lim_p)\n pool_colim_n.append(colim_n)\n pool_colim_p.append(colim_p)\n pool_colim_np.append(colim_np)\n\n return (np.sum(pool_nolim),\n np.sum(pool_lim_n),\n np.sum(pool_lim_p),\n np.sum(pool_colim_n),\n np.sum(pool_colim_p),\n np.sum(pool_colim_np))", "def sc_QC(X: pd.DataFrame,\n min_lib_size: float = 1000,\n remove_outlier_cells: bool = True,\n min_percent: float = 0.05,\n max_mito_ratio: float = 0.1,\n min_exp_avg: float = 0,\n min_exp_sum: float = 0) -> pd.DataFrame:\n outlier_coef = 1.5\n X[X < 0] = 0\n lib_size = X.sum(axis=0)\n before_s = X.shape[1]\n X = X.loc[:, lib_size > min_lib_size]\n print(f\"Removed {before_s - X.shape[1]} cells with lib size < {min_lib_size}\")\n if remove_outlier_cells:\n lib_size = X.sum(axis=0)\n before_s = X.shape[1]\n Q3 = lib_size.to_frame().quantile(0.75, axis=0).values[0]\n Q1 = lib_size.to_frame().quantile(0.25, axis=0).values[0]\n interquartile_range = Q3 - Q1\n X = X.loc[:, (lib_size >= Q1 - interquartile_range * outlier_coef) &\n (lib_size <= Q3 + interquartile_range * outlier_coef)]\n print(f\"Removed {before_s - X.shape[1]} outlier cells from original data\")\n mt_genes = X.index.str.upper().str.match(\"^MT-\")\n if any(mt_genes):\n print(f\"Found mitochondrial genes: {X[mt_genes].index.to_list()}\")\n before_s = X.shape[1]\n mt_rates = X[mt_genes].sum(axis=0) / X.sum(axis=0)\n X = X.loc[:, mt_rates < max_mito_ratio]\n print(f\"Removed {before_s - X.shape[1]} samples from original data (mt genes ratio > {max_mito_ratio})\")\n else:\n warn(\"Mitochondrial genes were not found. Be aware that apoptotic cells may be present in your sample.\")\n before_g = X.shape[0]\n X = X[(X != 0).mean(axis=1) > min_percent]\n print(f\"Removed {before_g - X.shape[0]} genes expressed in less than {min_percent} of data\")\n\n before_g = X.shape[0]\n if X.shape[1] > 500:\n X = X.loc[X.mean(axis=1) >= min_exp_avg, :]\n else:\n X = X.loc[X.sum(axis=1) >= min_exp_sum, :]\n print(f\"Removed {before_g - X.shape[0]} genes with expression values: average < {min_exp_avg} or sum < {min_exp_sum}\")\n return X", "def compress(self):\n known_sizes = dict()\n\n # list to dict to make them unique\n for size in self.target_sizes:\n if size.length in known_sizes:\n known_sizes[size.length] += size.quantity\n else:\n known_sizes[size.length] = size.quantity\n\n # back to list again for compatibility\n self.target_sizes = [TargetSize(length=l, quantity=q) for (l, q) in known_sizes.items()]", "def calc_resources(self):\n self.popula = self.energy = self.popula_used = self.energy_used = 0\n self.cnt_public = self.cnt_shop = self.cnt_1 = self.cnt_2 = self.cnt_3 = self.cnt_4 = self.cnt_5 = self.cnt_office = 0\n self.popula += self.extra_pop\n for i in range(20):\n b = self.b[i]\n if b == 'T':\n self.popula += self.f[i] * 2\n self.energy_used += 1\n elif b == 'O':\n self.popula_used += 1\n self.energy_used += 1\n self.cnt_office += self.f[i]\n elif b == 'U':\n self.popula_used += 1\n self.cnt_public += 1\n elif b == 'S':\n self.energy_used += 1\n self.cnt_shop += 1\n elif b == '1':\n self.popula += 1\n self.energy += 1\n self.popula_used += 1\n self.cnt_1 += 1\n elif b == '2':\n self.popula_used += 1\n self.cnt_2 += 1\n elif b == '3':\n self.popula_used += 1\n self.cnt_3 += 1\n elif b == '4':\n self.popula += 2\n self.popula_used += 1\n self.cnt_4 += 1\n elif b == '5':\n self.energy += 2\n self.popula_used += 1\n self.cnt_5 += 1\n elif b == 'A':\n self.energy += 2\n self.popula_used += 1\n elif b == 'F':\n self.energy += 3\n self.popula_used += 1\n elif b == 'G':\n self.popula += 1\n if 'tvst' in args.exp:\n self.popula += self.cnt_shop\n if 'ward' in args.exp:\n self.popula += 3\n if 'elec' in args.exp:\n self.energy += 3\n if 'capi' in args.exp:\n self.popula_used += 2\n if 'fire' in args.exp:\n self.popula_used += 1\n if 'park' in args.exp:\n self.popula_used += 1", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n aStarSearch(problem)", "def ram_condition(min_gb=3):\n return get_free_gb() < min_gb", "def get_cost(self, symbol) -> float:\n if len(symbol) <= 6:\n search = self.trader.stock_positions + self.trader.crypto_positions\n for p in search:\n if p['symbol'] == symbol:\n return p['avg_price']\n return None\n else:\n for p in self.trader.option_positions:\n if p['occ_symbol'] == symbol:\n return p['avg_price']", "async def _garbage_collect_live(self, base: str, trade_size: float, reserved: float):\n\n if not config['trade_garbage_collect']:\n return\n\n balance = await self.api.get_balance(base)\n if balance is None:\n self.log.error(\"Could not get available balance for {}!\", base)\n return\n\n base_mult = await self.market.get_base_mult(config['trade_base'], base)\n adjusted_balance = balance * base_mult - reserved\n\n if adjusted_balance >= trade_size:\n return\n\n open_trades_by_time = []\n for pair in self.trades:\n if pair.split('-')[0] == base:\n for trade in self.trades[pair]['open']:\n open_trades_by_time.append((trade['open_time'], trade))\n\n open_trades_sorted = [trade_tuple[1] for trade_tuple in sorted(open_trades_by_time, key=lambda x: x[0])]\n if open_trades_sorted:\n collect_trade = open_trades_sorted[0]\n utils.async_task(self._sell_live(collect_trade, 'COLLECT SELL', 'collect', remit=False), loop=common.loop)\n self.trades[collect_trade['pair']]['open'].remove(collect_trade)", "def calculateAllocations(rf, resource, ledgers, data):\n weights = {\n p: rf(l.debtRatio()) for p, l in ledgers.items() if not np.isclose(data[p], 0)\n }\n total_weight = sum(weight for weight in weights.values())\n return {\n p: np.round(resource * weight / total_weight, 1)\n for p, weight in weights.items()\n }", "def prep_alloction_fraction(gen_assoc):\n # flag whether the generator exists in the\n # generation table (this will be used later on)\n # for calculating ratios to use to allocate net generation\n gen_assoc = gen_assoc.assign(\n in_g_tbl=lambda x: np.where(\n x.net_generation_mwh_g_tbl.notnull(),\n True, False)\n )\n\n gens_gb = gen_assoc.groupby(by=IDX_PM_FUEL, dropna=False)\n # get the total values for the merge group\n # we would use on groupby here with agg but it is much slower\n # so we're gb-ing twice w/ a merge\n # gens_gb.agg({'net_generation_mwh_g_tbl': lambda x: x.sum(min_count=1),\n # 'capacity_mw': lambda x: x.sum(min_count=1),\n # 'in_g_tbl': 'all'},)\n gen_pm_fuel = (\n gen_assoc\n .merge( # flag if all generators exist in the generators_eia860 tbl\n gens_gb[['in_g_tbl']].all().reset_index(),\n on=IDX_PM_FUEL,\n suffixes=('', '_all')\n )\n .merge( # flag if some generators exist in the generators_eia860 tbl\n gens_gb[['in_g_tbl']].any().reset_index(),\n on=IDX_PM_FUEL,\n suffixes=('', '_any')\n )\n # Net generation and capacity are both proxies that can be used\n # to allocate the generation which only shows up in generation_fuel\n # Sum them up across the whole plant-prime-fuel group so we can tell\n # what fraction of the total capacity each generator is.\n .merge(\n (gens_gb\n [['net_generation_mwh_g_tbl', 'capacity_mw']]\n .sum(min_count=1)\n .add_suffix('_pm_fuel')\n .reset_index()),\n on=IDX_PM_FUEL,\n )\n .assign(\n # fill in the missing generation with zeros (this will help ensure\n # the calculations to run the fractions in `calc_allocation_ratios`\n # can be consistent)\n net_generation_mwh_g_tbl=lambda x: x.net_generation_mwh_g_tbl.fillna(\n 0)\n )\n )\n # Add a column that indicates how much capacity comes from generators that\n # report in the generation table, and how much comes only from generators\n # that show up in the generation_fuel table.\n gen_pm_fuel = (\n pd.merge(\n gen_pm_fuel,\n gen_pm_fuel.groupby(by=IDX_PM_FUEL + ['in_g_tbl'], dropna=False)\n [['capacity_mw']].sum(min_count=1)\n .add_suffix('_in_g_tbl_group').reset_index(),\n on=IDX_PM_FUEL + ['in_g_tbl'],\n )\n )\n return gen_pm_fuel", "def _update_reserved_quantity(self, product_id, location_id, quantity, lot_id=None, package_id=None, owner_id=None,\n strict=False):\n self = self.sudo()\n rounding = product_id.uom_id.rounding\n quants = self._gather(product_id, location_id, lot_id=lot_id, package_id=package_id, owner_id=owner_id,\n strict=strict)\n reserved_quants = []\n\n if float_compare(quantity, 0, precision_rounding=rounding) > 0:\n # if we want to reserve\n available_quantity = self._get_available_quantity(product_id, location_id, lot_id=lot_id,\n package_id=package_id, owner_id=owner_id, strict=strict)\n if float_compare(quantity, available_quantity, precision_rounding=rounding) > 0:\n raise UserError(_('It is not possible to reserve more products of %s than you have in stock.',\n product_id.display_name))\n elif float_compare(quantity, 0, precision_rounding=rounding) < 0:\n # if we want to unreserve\n available_quantity = sum(quants.mapped('reserved_quantity'))\n # if float_compare(abs(quantity), available_quantity, precision_rounding=rounding) > 0:\n # raise UserError(_('It is not possible to unreserve more products of %s than you have in stock.',\n # product_id.display_name))\n else:\n return reserved_quants\n\n for quant in quants:\n if float_compare(quantity, 0, precision_rounding=rounding) > 0:\n max_quantity_on_quant = quant.quantity - quant.reserved_quantity\n if float_compare(max_quantity_on_quant, 0, precision_rounding=rounding) <= 0:\n continue\n max_quantity_on_quant = min(max_quantity_on_quant, quantity)\n quant.reserved_quantity += max_quantity_on_quant\n reserved_quants.append((quant, max_quantity_on_quant))\n quantity -= max_quantity_on_quant\n available_quantity -= max_quantity_on_quant\n else:\n max_quantity_on_quant = min(quant.reserved_quantity, abs(quantity))\n quant.reserved_quantity -= max_quantity_on_quant\n reserved_quants.append((quant, -max_quantity_on_quant))\n quantity += max_quantity_on_quant\n available_quantity += max_quantity_on_quant\n\n if float_is_zero(quantity, precision_rounding=rounding) or float_is_zero(available_quantity,\n precision_rounding=rounding):\n break\n return reserved_quants", "def cost(self):\n cost = {}\n if len(self.nodes) == 0:\n return cost\n resources = self.nodes[0].capacity.keys()\n for r in resources:\n values = [n.cost[r] for n in self.nodes]\n estimator = AvgAggregatorEstimator(values)\n cost[r] = estimator\n return cost", "def main():\n # Retrieve csv_file name an max_investment from argument passed in console:\n arg_csv_file, arg_max_investment = set_arg()\n if arg_csv_file:\n csv_file = arg_csv_file\n else:\n csv_file = 'dataset1_Python+P7.csv'\n if arg_max_investment:\n max_investment = float(arg_max_investment)\n else:\n max_investment = 500.00\n\n # Retrieve dataset:\n base_dataset = open_convert_and_clean_csv(csv_file)\n\n # Retrieve solution:\n start = time.perf_counter()\n print()\n print(f\"Processing with file '{csv_file}' containing {len(base_dataset)} shares...\")\n print(f\"Maximum investment: {max_investment}€\")\n print(\"Please wait...\")\n computable_dataset = add_roi_to_dataset(convert_dataset_to_cents(base_dataset))\n best_roi, combination = best_combination_dynamic(computable_dataset, max_investment)\n\n # Formatting results:\n combination.sort(key=lambda x: x[2], reverse=True)\n combination_in_euros = convert_dataset_to_euros(combination)\n best_roi /= 100\n # Following calculus is done on cent prices (combination) to avoid approximations with floats\n shares_cost = calculate_shares_cost_sum(combination) / 100\n\n # Printing results:\n print()\n print(f\"Length of dataset: {len(computable_dataset)}\")\n print(f\"Duration of Analysis: {elapsed_time_formatted(start)}\")\n print()\n print(f\"Best Return on investment after 2 years: {round(best_roi, 2)}€\")\n print(f\"Number of shares to buy : {len(combination_in_euros)}\")\n print(f\"Total cost: {round(shares_cost, 2)}€\")\n print()\n print(f\"Best combination of shares ordered by performance: \")\n for share in combination_in_euros:\n print(f\"{share[0]} | Price: {share[1]}€ | profit: {share[2]}%\")\n print()", "def support(stock):\n output= stock_min(stock)+(stock_min(stock)*.05)\n return output", "def alphabet_minimize(self):\n\n # We perform our normalization in a random order. This helps give\n # us a good mix of likely to succeed (e.g. rare bytes) vs likely\n # to have a large impact (e.g. common bytes) without having to\n # have any idea which bytes are which.\n all_bytes = list(hrange(256))\n self.random.shuffle(all_bytes)\n\n for c in all_bytes:\n buf = self.buffer\n\n if c not in buf:\n continue\n\n def can_replace_with(d):\n if d < 0:\n return False\n\n if self.consider_new_buffer(hbytes([d if b == c else b for b in buf])):\n if d <= 1:\n # For small values of d if this succeeds we take this\n # as evidence that it is worth doing a a bulk replacement\n # where we replace all values which are close\n # to c but smaller with d as well. This helps us substantially\n # in cases where we have a lot of \"dead\" bytes that don't really do\n # much, as it allows us to replace many of them in one go rather\n # than one at a time. An example of where this matters is\n # test_minimize_multiple_elements_in_silly_large_int_range_min_is_not_dupe\n # in test_shrink_quality.py\n def replace_range(k):\n if k > c:\n return False\n\n def should_replace_byte(b):\n return c - k <= b <= c and d < b\n\n return self.consider_new_buffer(\n hbytes(\n [d if should_replace_byte(b) else b for b in buf]\n )\n )\n\n find_integer(replace_range)\n return True\n\n if (\n # If we cannot replace the current byte with its predecessor,\n # assume it is already minimal and continue on. This ensures\n # we make no more than one call per distinct byte value in the\n # event that no shrinks are possible here.\n not can_replace_with(c - 1)\n # We next try replacing with 0 or 1. If this works then\n # there is nothing else to do here.\n or can_replace_with(0)\n or can_replace_with(1)\n # Finally we try to replace with c - 2 before going on to the\n # binary search so that in cases which were already nearly\n # minimal we don't do log(n) extra work.\n or not can_replace_with(c - 2)\n ):\n continue\n\n # Now binary search to find a small replacement.\n\n # Invariant: We cannot replace with lo, we can replace with hi.\n lo = 1\n hi = c - 2\n while lo + 1 < hi:\n mid = (lo + hi) // 2\n if can_replace_with(mid):\n hi = mid\n else:\n lo = mid", "def short_training_symbol() -> np.ndarray:\n carriers = [0 + 0j] * 64\n carriers[-32] = 0\n carriers[-31] = 0\n carriers[-30] = 0\n carriers[-29] = 0\n carriers[-28] = 0\n carriers[-27] = 0\n carriers[-26] = 0\n carriers[-25] = 0\n carriers[-24] = 1 + 1j\n carriers[-23] = 0\n carriers[-22] = 0\n carriers[-21] = 0\n carriers[-20] = -1 - 1j\n carriers[-19] = 0\n carriers[-18] = 0\n carriers[-17] = 0\n carriers[-16] = 1 + 1j\n carriers[-15] = 0\n carriers[-14] = 0\n carriers[-13] = 0\n carriers[-12] = -1 - 1j\n carriers[-11] = 0\n carriers[-10] = 0\n carriers[-9] = 0\n carriers[-8] = -1 - 1j\n carriers[-7] = 0\n carriers[-6] = 0\n carriers[-5] = 0\n carriers[-4] = 1 + 1j\n carriers[-3] = 0\n carriers[-2] = 0\n carriers[-1] = 0\n carriers[0] = 0\n carriers[1] = 0\n carriers[2] = 0\n carriers[3] = 0\n carriers[4] = -1 - 1j\n carriers[5] = 0\n carriers[6] = 0\n carriers[7] = 0\n carriers[8] = -1 - 1j\n carriers[9] = 0\n carriers[10] = 0\n carriers[11] = 0\n carriers[12] = 1 + 1j\n carriers[13] = 0\n carriers[14] = 0\n carriers[15] = 0\n carriers[16] = 1 + 1j\n carriers[17] = 0\n carriers[18] = 0\n carriers[19] = 0\n carriers[20] = 1 + 1j\n carriers[21] = 0\n carriers[22] = 0\n carriers[23] = 0\n carriers[24] = 1 + 1j\n carriers[25] = 0\n carriers[26] = 0\n carriers[27] = 0\n carriers[28] = 0\n carriers[29] = 0\n carriers[30] = 0\n carriers[31] = 0\n return np.array(carriers) * np.sqrt(13 / 6)", "def _process_buy(self, base, close_arr, sample, pred_buy):\n for bt in range(len(self.btl)):\n if pred_buy >= self.btl[bt]:\n for st in range(len(self.stl)):\n if self.perf[bt, st, self.BUY_PRICE] == 0:\n # first buy of a possible sequence of multiple buys before sell\n self.perf[bt, st, self.BUY_PRICE] = close_arr[sample]\n self.perf[bt, st, self.BUY_IX] = sample", "def _load_costs(self):\n F_BM = self.F_BM\n F_D = self.F_D\n F_P = self.F_P\n F_M = self.F_M\n baseline_purchase_costs = self.baseline_purchase_costs\n purchase_costs = self.purchase_costs\n installed_costs = self.installed_costs\n \n # Load main costs\n for i in purchase_costs:\n if i not in baseline_purchase_costs:\n baseline_purchase_costs[i] = purchase_costs[i]\n for name, Cpb in baseline_purchase_costs.items(): \n if name in installed_costs and name in purchase_costs:\n continue # Assume costs already added elsewhere using another method\n F = F_D.get(name, 1.) * F_P.get(name, 1.) * F_M.get(name, 1.)\n try:\n installed_costs[name] = Cpb * (F_BM[name] + F - 1.)\n except KeyError:\n F_BM[name] = 1.\n installed_costs[name] = purchase_costs[name] = Cpb * F\n else:\n purchase_costs[name] = Cpb * F", "def normalize_quantities(self):\n return (\n pynini.cdrewrite(self.units_map, \"\", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.singularize_map, \"1 \", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.thousands_map, \"\", self.triple_digits, self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.hundreds_map, \"\", self.double_digits, self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.tens_map, \"\", self.digits, self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.teens_map, \"\", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.ones_map, \"\", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.zero_del, \"\", \"\", self.sigma_star, direction=\"ltr\")\n )", "def measure_allocation_diversity_bounds_errors(self, slots_assegnation_probabilities, LP_news_pool, iter=5000):\n for tech in [\"rand_1\", \"rand_2\", \"rand_3\"]:\n max_errors_per_iter = []\n for k in range(iter):\n tmp_slots_assegnation_probabilities = []\n for elem in slots_assegnation_probabilities:\n tmp_slots_assegnation_probabilities.append(elem.copy())\n constraints_error = [0] * len(self.categories)\n promenance_per_category = [0] * len(self.categories)\n result = self.__de_randomize_LP(LP_news_pool, tmp_slots_assegnation_probabilities, tech)\n for i in range(len(result)):\n category_index = self.categories.index(result[i].news_category)\n promenance_per_category[category_index] += self.real_slot_promenances[i]\n\n for i in range(len(promenance_per_category)):\n if promenance_per_category[i] < self.B[i] * -1:\n constraints_error[i] += (self.B[i] * -1 - promenance_per_category[i]) / (self.B[i] * -1)\n\n max_errors_per_iter.append(np.mean(constraints_error))\n if tech == \"rand_1\":\n self.rand_1_errors += max_errors_per_iter\n elif tech == \"rand_2\":\n self.rand_2_errors += max_errors_per_iter\n else:\n self.rand_3_errors += max_errors_per_iter", "def dp_all(foods, cal_goal, pro_goal, carb_goal, fat_goal):\n costs = init_four_d_array((cal_goal, pro_goal, carb_goal, fat_goal),\n 999999999)\n foods_used = init_four_d_array((cal_goal, pro_goal, carb_goal, fat_goal),\n {})\n\n for i in range(cal_goal):\n for j in range(pro_goal):\n for k in range(carb_goal):\n for l in range(fat_goal):\n for n in range(len(foods)):\n food = foods[n]\n if (int(food['calories']) > i\n or int(food['protein']) > j\n or int(food['carbs']) > k\n or int(food['fat']) > l):\n continue\n if (costs[i - int(food['calories'])]\n [j - int(food['protein'])]\n [k - int(food['carbs'])]\n [l - int(food['fat'])]\n == 999999999):\n prev_cost = 0\n prev_foods_used = {}\n else:\n prev_cost = (macros[i - int(food['calories'])]\n [j - int(food['protein'])]\n [j - int(food['carbs'])]\n [j - int(food['fat'])])\n prev_foods_used = \\\n (foods_used[i - int(food['calories'])]\n [j - int(food['protein'])]\n [k - int(food['carbs'])]\n [l - int(food['fat'])]).copy()\n new_cal = calories(\n foods, prev_foods_used) + food['calories']\n new_pro = protein(\n foods, prev_foods_used) + food['protein']\n new_car = carbs(\n foods, prev_foods_used) + food['protein']\n new_fat = fat(\n foods, prev_foods_used) + food['protein']\n if (costs[i][j] > prev_cost + food['serving_cost']\n and new_cal > i - 20 and new_cal < i + 10\n and new_pro < j + 5 and new_pro < j + 5\n and new_car < j + 5 and new_car < j + 5\n and new_fat < j + 5 and new_fat < j + 5):\n costs[i][j][k][l] = prev_cost + \\\n food['serving_cost']\n try:\n prev_foods_used[n] += 1\n except KeyError:\n prev_foods_used[n] = 1\n foods_used[i][j][k][l] = prev_foods_used\n return foods_used[cal_goal - 1][pro_goal - 1][carb_goal - 1][fat_goal - 1]", "def _allocation_weights(self, write_gates, num_writes, usage):\n write_gates = write_gates.unsqueeze(-1)\n allocation_weights = []\n for i in range(num_writes):\n allocation = self._allocate(usage)\n allocation_weights.append(allocation)\n usage = usage + ((1-usage)*write_gates[:, i, :]*allocation_weights[i])\n allocation_weights = torch.stack(allocation_weights, 1)\n return allocation_weights", "def pro_rata(buy_orders, sell_orders):\n\n \n \"\"\"trade_matrix (numpy.array): matrix for traded shares set buy and sell shares to new amount\"\"\"\n\n if len(buy_orders) == 0 or len(sell_orders) == 0: return np.array([])\n current_buy_orders_length = len(buy_orders)\n current_sell_orders_length = len(sell_orders)\n\n # get total volume of buy\n volume_buy = 0\n for i in range(current_buy_orders_length):\n volume_buy += buy_orders[i].left_quantity\n\n # get total volume of sell\n volume_sell = 0\n for i in range(current_sell_orders_length):\n volume_sell += sell_orders[i].left_quantity\n\n # compare volumes\n if volume_sell > volume_buy:\n sell_buy_diff = volume_sell - volume_buy - 1\n while sell_buy_diff > 0:\n sell_buy_diff -= sell_orders[current_sell_orders_length - 1].left_quantity\n current_sell_orders_length -= 1\n\n sum_of_weighted_orders = 0\n\n for i in range(current_sell_orders_length):\n sum_of_weighted_orders += buy_orders[i].left_quantity * (i + 1)\n\n # list of transactions, line is seller(i), row is buyer(j)\n trade_matrix = np.zeros(shape=(len(sell_orders), len(buy_orders)))\n\n # time pro rata algorithm\n p = []\n for i in range(current_buy_orders_length):\n p.append((buy_orders[i].left_quantity * buy_orders[i].price * (i + 1)) / sum_of_weighted_orders)\n\n P = []\n for i in range(current_buy_orders_length):\n comp = [buy_orders[i].left_quantity * buy_orders[i].price, np.floor(p[i] * current_sell_orders_length)]\n P.append(np.min(comp))\n\n for i in range(current_sell_orders_length):\n while sell_orders[i].left_quantity > 0:\n for j in range(current_buy_orders_length):\n if P[j] > 0:\n P[j] -= 1\n buy_orders[j].left_quantity -= 1\n sell_orders[i].left_quantity -= 1\n trade_matrix[[i], [j]] += 1\n if sell_orders[i].left_quantity == 0:\n break\n\n return trade_matrix", "def getBuySellGains(series, trades):\n marketAlphas = others.daily_return(series)\n tradeGains = [\n np.product(np.add(np.divide(marketAlphas[t[0]:t[1]], 100), 1.0))\n for t in trades]\n return np.product(tradeGains)", "def lower_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i <= support(stock):\n counter+=1\n return counter", "def ramp_up(self) -> None:\n self.cash_balance: float = self.initial_cash_balance()\n for stock in self.stocks:\n initial_date_idx = 0\n self.cash_balance = stock.buy(initial_date_idx, self.cash_balance, self.buy_budget)", "def cost_fun(self, specs_dict: Dict[str, float]) -> float:\n cost = 0\n for spec in self.spec_range.keys():\n penalty = self.compute_penalty(specs_dict[spec], spec)[0]\n cost += penalty\n\n return cost", "def calc_free_g(energies, temperatures):\n pass", "def _pick_sizes_assuming_oci_and_odi_at_most_one(\n osizes: Dict[str, int], imbalance_repair_level: float, bias_repair_level: float\n) -> Dict[str, int]:\n # inputs\n o00, o01, o10, o11 = osizes[\"00\"], osizes[\"01\"], osizes[\"10\"], osizes[\"11\"]\n # outputs: new intersection sizes n00, n01, n10, n11\n # constants\n oci = _class_imbalance(o00, o01, o10, o11)\n nci = oci + imbalance_repair_level * (1 - oci)\n odi = _disparate_impact(o00, o01, o10, o11)\n ndi = odi + bias_repair_level * (1 - odi)\n # we have two equations, one each for nci and ndi\n # nci == (n00 + n10) / (n01 + n11)\n # ndi == (n01 / (n00 + n01)) / (n11 / (n10 + n11))\n # without loss of generality, assume oci <= 1 and odi <= 1\n assert oci <= 1 and odi <= 1, _sizes_to_string(osizes, \"o\")\n # that means we do not need to upsample o11\n # we will set n11 == o11, leaving three unknowns: n00, n01, n10\n # two equations admit multiple solutions for the three unknowns\n # algorithm to pick the solution that minimizes the amount of oversampling:\n # - loop over candidate values for n00 in ascending order\n # - given n00, solve the equations to also pick values for n01 and n10\n # - terminate when all group sizes >= their original size\n\n invalid = {\"00\": -1, \"01\": -1, \"10\": -1, \"11\": -1}\n\n def solve_for_n01_n10_given_00_11(n00, n11):\n # rewriting the nci equation:\n # n01 == n00 / nci + n10 / nci - n11\n # rewriting the ndi equation:\n # ndi * n11 * n00 + ndi * n11 * n01 == n01 * n10 + n01 * n11\n # substituting n01 into the ndi equation:\n # ndi*n11*n00 + ndi*n11*(n00/nci + n10/nci - n11)\n # == n10*(n00/nci + n10/nci - n11) + n11*(n00/nci + n10/nci - n11)\n # rewriting this to the standard form of a quadratic equation for n10:\n # n10*n10\n # + n10*(n00 + n11 - n11*nci - ndi*n11)\n # + (n11*n00+ndi*n11*n11*nci-n11*n11*nci-ndi*n11*n00*nci-ndi*n11*n00)\n # == 0\n # assigning variables so the above is n10*n10 + n10 * b + c == 0:\n b = n00 + n11 - n11 * nci - ndi * n11\n c = (\n n11 * n00\n + ndi * n11 * n11 * nci\n - n11 * n11 * nci\n - ndi * n11 * n00 * nci\n - ndi * n11 * n00\n )\n # the square root of a negative number is imaginary\n if b * b - 4 * c < 0:\n return invalid\n # quadratic equations have two solutions\n n10_plus = (-b + (b * b - 4 * c) ** 0.5) / 2\n n01_plus = n00 / nci + n10_plus / nci - n11\n valid_plus = round(n01_plus) >= o01 and round(n10_plus) >= o10\n n10_minus = (-b - (b * b - 4 * c) ** 0.5) / 2\n n01_minus = n00 / nci + n10_minus / nci - n11\n valid_minus = round(n01_minus) >= o01 and round(n10_minus) >= o10\n if valid_plus and valid_minus: # pick solution minimizing n01 + n10\n if n01_plus + n10_plus < n01_minus + n01_minus:\n n01, n10 = n01_plus, n10_plus\n else:\n n01, n10 = n01_minus, n10_minus\n elif valid_plus:\n n01, n10 = n01_plus, n10_plus\n elif valid_minus:\n n01, n10 = n01_minus, n10_minus\n else:\n return invalid\n _assert_almost_equal(nci, _class_imbalance(n00, n01, n10, n11))\n _assert_almost_equal(ndi, _disparate_impact(n00, n01, n10, n11))\n return {\"00\": n00, \"01\": round(n01), \"10\": round(n10), \"11\": n11}\n\n nsizes = invalid\n # to minimize n00, search candidate values in ascending order\n for n00 in range(o00, sum(osizes.values()) + 1):\n nsizes = solve_for_n01_n10_given_00_11(n00, o11)\n okay = all(nsizes[k] >= osizes[k] for k in osizes)\n if okay:\n break\n if not all(nsizes[k] >= osizes[k] for k in osizes):\n logger.warning(f\"insufficient upsampling for {osizes}\")\n nsizes = {\n \"00\": max(osizes[\"00\"], osizes[\"01\"]),\n \"01\": max(osizes[\"00\"], osizes[\"01\"]),\n \"10\": max(osizes[\"10\"], osizes[\"11\"]),\n \"11\": max(osizes[\"10\"], osizes[\"11\"]),\n }\n return nsizes", "async def on_symbol_price_updated(self, price: MetatraderSymbolPrice):\n self._pricesBySymbol[price['symbol']] = price\n positions = list(filter(lambda p: p['symbol'] == price['symbol'], self._positions))\n orders = list(filter(lambda o: o['symbol'] == price['symbol'], self._orders))\n specification = self.specification(price['symbol'])\n if specification:\n for position in positions:\n if 'unrealizedProfit' not in position or 'realizedProfit' not in position:\n position['unrealizedProfit'] = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (position['currentPrice'] - position['openPrice']) * \\\n position['currentTickValue'] * position['volume'] / specification['tickSize']\n position['realizedProfit'] = position['profit'] - position['unrealizedProfit']\n new_position_price = price['bid'] if (position['type'] == 'POSITION_TYPE_BUY') else price['ask']\n is_profitable = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * (new_position_price -\n position['openPrice'])\n current_tick_value = price['profitTickValue'] if (is_profitable > 0) else price['lossTickValue']\n unrealized_profit = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (new_position_price - position['openPrice']) * current_tick_value * position['volume'] / \\\n specification['tickSize']\n position['unrealizedProfit'] = unrealized_profit\n position['profit'] = position['unrealizedProfit'] + position['realizedProfit']\n position['currentPrice'] = new_position_price\n position['currentTickValue'] = current_tick_value\n for order in orders:\n order['currentPrice'] = price['ask'] if (order['type'] == 'ORDER_TYPE_BUY_LIMIT' or\n order['type'] == 'ORDER_TYPE_BUY_STOP' or\n order['type'] == 'ORDER_TYPE_BUY_STOP_LIMIT') else price['bid']\n if self._accountInformation:\n self._accountInformation['equity'] = self._accountInformation['balance'] + \\\n functools.reduce(lambda a, b: a + b['profit'], self._positions, 0)" ]
[ "0.6063045", "0.53763187", "0.5290051", "0.52126926", "0.5207097", "0.5145416", "0.50932026", "0.50518227", "0.50404334", "0.49763635", "0.49679303", "0.49524197", "0.48846778", "0.48805937", "0.48631468", "0.4856346", "0.4832151", "0.48185173", "0.48169646", "0.48079696", "0.4805696", "0.47952592", "0.47914487", "0.47780216", "0.47716072", "0.47709826", "0.4770973", "0.47607952", "0.47441167", "0.4699819", "0.46973622", "0.4675014", "0.4672219", "0.4670411", "0.46688995", "0.46625882", "0.464189", "0.46381935", "0.46361518", "0.46357456", "0.46255845", "0.46240696", "0.46150497", "0.46067846", "0.45986652", "0.45926225", "0.45893216", "0.45891878", "0.458661", "0.4580492", "0.45797807", "0.45781294", "0.45730352", "0.4570539", "0.45653525", "0.45634875", "0.4557695", "0.45554432", "0.4547246", "0.45456907", "0.4541219", "0.45400167", "0.453861", "0.45330617", "0.4529123", "0.45248687", "0.4520879", "0.45189962", "0.4518299", "0.4511724", "0.45110127", "0.45055905", "0.45044628", "0.4503939", "0.45013416", "0.44981283", "0.44974184", "0.44965628", "0.44953635", "0.4491732", "0.4489509", "0.4482863", "0.4481091", "0.44810343", "0.44797754", "0.44769904", "0.44702214", "0.44691688", "0.44686723", "0.44679904", "0.44665268", "0.44635576", "0.44625375", "0.44614717", "0.44598502", "0.44588473", "0.4455082", "0.44534934", "0.445157", "0.44506973" ]
0.55457914
1
Given a starting value and prices of stocks in portfolio with allocations return the portfolio value over time.
def get_portfolio_value(prices, allocs, start_val): normed = prices/prices.iloc[0] alloced = np.multiply(allocs, normed) pos_vals = alloced * start_val port_val = pos_vals.sum(axis=1) return port_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio", "def compute_port_val(allocs, prices):\n # normalized price\n # normalized prices\n normed = prices/prices.iloc[0]\n prices.head()\n alloced = normed * allocs\n\n # position values\n start_val = 1 # included to simplify adding ability to calc as $\n pos_vals = alloced * start_val\n\n # portfolio value\n port_val = pos_vals.sum(axis=1)\n\n return port_val", "def getPortfolioValue(self, start_t, t):\n sum_tmp=0\n for item in self.portfolio.keys():\n if \"DJI_\" in item:\n t_tmp=datetime.strftime(pd.date_range(end=t,periods=1,freq='B')[0],'%Y-%m-%d')\n price=universe.get_price_in_currency(item,t_tmp,'CAD')\n elif 'rf_rate' in item:\n price=universe.get_security(item).get_cc_return(start_t,t) \n else:\n price=universe.get_price_in_currency(item,t,'CAD')\n #price=universe.get_security(item).price[t]\n amount=self.portfolio[item]\n sum_tmp=sum_tmp+price*amount\n \n return sum_tmp", "def compute_portvals(start_date, end_date, trades_df, start_val):\n # SETTING UP ORDERS DATAFRAME\n # Read orders file into a dataframe http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table \n orders = trades_df\n symbols = np.unique(orders['Symbol']).tolist() # List of all the symbols used in orders\n\n # SETTING UP PRICES DATAFRAME\n # Read in adjusted closing prices for given symbols, date range... drop non-trading days... add cash column\n dates = pd.date_range(start_date, end_date)\n prices = get_data(symbols, dates, addSPY=False).dropna()\n prices['cash'] = 1.00\n\n # SETTING UP TRADES DATAFRAME\n # Daily snapshot of portfolio changes (+ = Buy Order, - = Sell Order) with cash adjustments\n trades = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n trades['cash'] = 0.00\n\n for row_index, row in orders.iterrows():\n try:\n if row.Order == 'SELL':\n trades.ix[row.Date,row.Symbol] += (-1 * row.Shares) # Subtract ShareAmount for Sell \n trades.ix[row.Date,'cash'] += (row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Sell\n elif row.Order == 'BUY':\n trades.ix[row.Date,row.Symbol] += (row.Shares) # Add ShareAmount for Buy\n trades.ix[row.Date,'cash'] += (-1 * row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Buy\n else:\n print 'ERROR: order type not recognized, looking for BUY or SELL'\n except:\n print 'Unknown Error:'\n\n\n # SETTING UP HOLDINGS DATAFRAME \n # accumulating trades into holdings dataframe, snapshot of shares and cash for given day\n holdings = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n holdings['cash'] = 0.00\n holdings.ix[start_date,'cash'] = start_val # add starting cash value\n previous_row = holdings.iloc[0]\n for row_index, row in holdings.iterrows():\n holdings.ix[row_index] = previous_row + trades.ix[row_index] #previous day's value + trades\n previous_row = row\n\n #SETTING UP VALUES DATAFRAME\n # convert shares into their respective dollar amounts\n values = pd.np.multiply(holdings, prices)\n #DAILY VALUE OF THE PORTFOLIO\n portvals = values.sum(axis=1)\n return portvals", "def test_best_allocation():\n\n # symbols = ['BRCM', 'TXN', 'IBM', 'HNZ'] \n symbols = ['AAPL', 'GOOG', 'IBM', 'MSFT']\n # ['GOOG','AAPL','GLD','XOM']\n basic_portfolio = BasicPortfolio(symbols, dt.datetime(2014, 1, 1), dt.datetime(2014, 12, 31))\n\n alloc = range(4)\n\n sharpe_max = 0\n alloc_max = alloc[:]\n\n for i in range(11):\n alloc[0] = i * 0.1\n for j in range(11 - i):\n alloc[1] = j * 0.1\n for k in range(11 - i - j):\n alloc[2] = k * 0.1\n alloc[3] = (10 - i - j - k) * 0.1\n\n vol, daily_ret, sharpe, cum_ret = \\\n basic_portfolio.analyze(alloc)\n\n if sharpe > sharpe_max:\n sharpe_max = sharpe\n alloc_max = alloc[:]\n\n print 'Best sharpe ratio is ', sharpe_max\n print 'Best allocation is', alloc_max\n\n ref_symbol = '$SPX'\n\n basic_portfolio.plot_with_reference(alloc_max, ref_symbol, source='local')", "def run(self, max_risk=0, min_return=0, num=0, init_holdings=None):\n if not self.dates:\n self.dates = ['2010-01-01', '2012-12-31']\n self.load_data()\n\n num_months = len(self.df_all)\n first_purchase = True\n result = {}\n baseline_result = {}\n self.baseline_values = [0]\n self.update_values = [0]\n months = []\n\n # Define dataframe to save output data \n headers = ['Date', 'Value'] + self.stocks + ['Variance', 'Returns']\n self.opt_results_df = pd.DataFrame(columns=headers)\n row = []\n\n self.price_df = pd.DataFrame(columns=self.stocks)\n\n # Initialize the plot\n plt.ylim(ymax = 1.5*self.budget, ymin = -1.5*self.budget)\n plt.xticks(list(range(0, num_months, 2)), \n self.df_baseline.index.strftime('%b')[::2], rotation='vertical')\n plt.locator_params(axis='x', nbins=num_months/2)\n plt.plot(list(range(0, num_months)), [0]*(num_months), \n color='red', label=\"Break-even\", linewidth=0.5)\n\n for i in range(3, num_months):\n\n # Look at just the data up to the current month\n df = self.df_all.iloc[0:i+1,:].copy()\n baseline_df_current = self.df_baseline.iloc[0:i+1,:]\n print(\"\\nDate:\", df.last_valid_index())\n months.append(df.last_valid_index().date()) \n\n if first_purchase:\n budget = self.budget\n initial_budget = self.budget\n baseline_shares = (budget / baseline_df_current.iloc[-1])\n baseline_result = {self.baseline[0]: baseline_shares} \n else:\n # Compute profit of current portfolio\n budget = sum([df.iloc[-1][s]*result['stocks'][s] for s in self.stocks]) \n self.update_values.append(budget - initial_budget)\n\n # Compute profit of fund portfolio\n fund_value = sum([baseline_df_current.iloc[-1][s]*baseline_result[s] \n for s in self.baseline]) \n self.baseline_values.append(fund_value - initial_budget)\n\n self.budget = budget \n\n self.load_data(df=df)\n\n self.price_df.loc[i-2] = list(self.price.values)\n\n # Output for user on command-line and plot\n update_values = np.array(self.update_values, dtype=object)\n baseline_values = np.array(self.baseline_values, dtype=object)\n plt.plot(range(3, i+1), update_values, \n color='blue', label=\"Optimized portfolio\")\n plt.plot(range(3, i+1), baseline_values, \n color='gray', label=\"Fund portfolio\", linewidth=0.5)\n \n if first_purchase:\n plt.legend(loc=\"lower left\")\n plt.title(\"Start: {start}, End: {end}\".format\\\n (start=self.df_all.first_valid_index().date(), \n end=self.df_all.last_valid_index().date()))\n\n plt.savefig(\"portfolio.png\")\n plt.pause(0.05)\n \n # Making solve run\n if self.model_type == 'DQM':\n print(f\"\\nMulti-Period DQM Run...\")\n \n self.build_dqm()\n self.solution['DQM'] = self.solve_dqm()\n result = self.solution['DQM']\n else:\n print(f\"\\nMulti-Period CQM Run...\")\n\n # Set budget to 0 to enforce that portfolio is self-financing \n if self.t_cost and not first_purchase:\n self.budget = 0 \n\n self.solution['CQM'] = self.solve_cqm(max_risk=max_risk, \n min_return=min_return,\n init_holdings=init_holdings)\n result = self.solution['CQM']\n init_holdings = result['stocks']\n\n # Print results to command-line\n value = sum([self.price[s]*result['stocks'][s] for s in self.stocks])\n returns = result['return']\n variance = result['risk'] \n\n row = [months[-1].strftime('%Y-%m-%d'), value] + \\\n [result['stocks'][s] for s in self.stocks] + \\\n [variance, returns] \n self.opt_results_df.loc[i-2] = row \n \n first_purchase = False\n\n print(self.opt_results_df)\n print(f'\\nRun completed.\\n')\n\n plt.savefig(\"portfolio.png\")\n plt.show(block=False)", "def market_value(self, ref_prices, suspensions=None):\n # TODO some securities could not be able to be traded\n if suspensions is None:\n suspensions = []\n \n market_value_float = 0.0\n market_value_frozen = 0.0 # suspended or high/low limit\n for sec in self.holding_securities:\n size = self.get_position(sec).current_size\n # TODO PortfolioManager object should not access price\n price = ref_prices[sec]\n mv_sec = price * size\n if sec in suspensions:\n market_value_frozen += mv_sec\n else:\n market_value_float += mv_sec\n \n return market_value_float, market_value_frozen", "def portfolio_allocation(self, data, total_risk):\n total_rating = data[\"rating\"].sum()\n shares = {}\n risk_amt = total_risk\n for _, row in data.iterrows():\n numshares = int(float(row[\"rating\"]) / float(total_rating) * float(risk_amt) / float(row[\"price\"]))\n if numshares > 10:\n multiplier = int(numshares / 10)\n numshares = multiplier * 10\n shares[row[\"symbol\"]] = numshares\n\n risk_amt -= numshares * row[\"price\"]\n # debug\n # for k, v in shares.items():\n # print(\"[*] Ticker: {}, Shares: {}\".format(k, v))\n return shares", "def test_interest_vs_stockprice(self):\n stock_prices = np.array([[5, 10, 20, 40]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def momentum(portfolio_item, transaction_volume, cash_allocation):\n from yahooquery import Ticker\n from math import floor\n import talib\n from .TradeHistoryItem import log_trade\n from API.Help import is_increasing, initialize_alpaca\n\n alpaca = initialize_alpaca()\n\n yahoo_ticker = Ticker(str(portfolio_item))\n info = yahoo_ticker.history()\n ma_5 = talib.SMA(info['close'], timeperiod=5)\n ma_20 = talib.SMA(info['close'], timeperiod=20)\n volume = info['volume']\n\n if portfolio_item.shares == 0:\n # if the price goes from below the sma to above, buy\n if ma_5[-1] > (ma_20[-1] * 1.1) and is_increasing(volume, 3):\n print('buying {} shares of {}'.format(transaction_volume, str(portfolio_item)))\n alpaca.submit_order(str(portfolio_item), transaction_volume, 'buy', 'market', 'day')\n portfolio_item.buy(transaction_volume=transaction_volume, cash_allocated=cash_allocation)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=0)\n # if the price goes from above the sma to below, short\n elif ma_5[-1] < (ma_20[-1] * .9) and not is_increasing(volume, 3) and portfolio_item.shares == 0:\n transaction_volume = floor(cash_allocation / (portfolio_item.ticker.price_now * 1.1))\n print('shorting {} shares of {}'.format(transaction_volume, str(portfolio_item)))\n alpaca.submit_order(str(portfolio_item), transaction_volume, 'sell', 'market', 'day')\n portfolio_item.short(transaction_volume=transaction_volume, cash_allocated=cash_allocation)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=3)", "def cumulative_returns(shares_allocation, capital, test_data):\n\n # list of DataFrames of cumulative returns for each stock\n daily_returns = []\n\n # iterates over every stock in the portfolio\n for stock in shares_allocation.index:\n\n # multiples shares by share prices in the validation dataset\n daily_returns.append(shares_allocation.loc[stock].values * test_data[stock])\n\n # concatenates every DataFrame in the above list to a single DataFrame\n daily_returns_df = pd.concat(daily_returns, axis=1).reset_index()\n\n # sets the index as the date\n daily_returns_df.set_index(\"Day\", inplace=True)\n\n # adds the cumulative returns for every stock\n cumulative_daily_returns = daily_returns_df.sum(axis=1)\n\n # returns the cumulative daily returns of the portfolio\n return cumulative_daily_returns", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices", "def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \\\n syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False):\n\n # Read in adjusted closing prices for given symbols, date range\n dates = pd.date_range(sd, ed)\n prices_all = get_data(syms, dates) # automatically adds SPY\n prices = prices_all[syms] # only portfolio symbols\n prices_SPY = prices_all['SPY'] # only SPY, for comparison later\n\n\t# find the allocations for the optimal portfolio\n #1 provide an initial guess for x\n allocs = np.ones(len(syms))/len(syms)\n #2 Provide constraints to the optimizer\n bounds = [(0,1) for i in syms]\n constraints = ({ 'type': 'eq', 'fun': lambda inputs: 1.0 - np.sum(inputs) })\n #3 call the optimizer\n res = spo.minimize(get_sharpe_ratio, allocs, \n \t\t\t\t\targs=prices, \n \t\t\t\t\tbounds = bounds,\n \t\t\t\t\tconstraints=constraints)\n allocs = res.x\n \n # Get daily portfolio value\n port_val = get_portfolio_value(prices, allocs, 1.0)\n \n # Get portfolio statistics\n cr, adr, sddr, sr = get_portfolio_stats(port_val, \n \t\t\t\t\t\t\t\t\t\tdaily_rf=0.0, \n \t\t\t\t\t\t\t\t\t\tsamples_per_year=252)\n \n # Compare daily portfolio value with SPY using a normalized plot\n if gen_plot:\n # add code to plot here\n df_temp = pd.concat([port_val, prices_SPY], keys=['Portfolio', 'SPY'], axis=1)\n plot_normalized_data(df_temp)\n\n return allocs, cr, adr, sddr, sr", "def portfolio_performance(returns,weights):\r\n print('Calculating Portfolio Performance')\r\n # returns=target_asset_port_data_attributes['component_returns']\r\n # weights =target_asset_port_data_attributes['effective_weights']\r\n\r\n component_returns= returns\r\n compnent_weights = pd.DataFrame(data=np.nan,index= component_returns.index,columns=component_returns.columns)\r\n compnent_weights.loc[weights.index,:] = weights\r\n\r\n portfolio_dates = component_returns.index\r\n components = component_returns.columns\r\n\r\n # pre-allocate\r\n BoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n EoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n PnL_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n portfolio_BoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio BoP'])\r\n portfolio_EoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio EoP'])\r\n portfolio_PnL = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio PnL'])\r\n \r\n portfolio_index = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Index'])\r\n previous_index_value = np.int64(1)\r\n\r\n pre_date = portfolio_dates[0]\r\n # set BoP to start weights\r\n for date,row in component_returns.iterrows():\r\n # print(date)\r\n # 1st date\r\n if date == portfolio_dates[0]:\r\n BoP_df.loc[date] = compnent_weights.iloc[0,:]\r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n\r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n # after first date\r\n else:\r\n BoP_df.loc[date] = EoP_df.loc[pre_date]\r\n # weights override\r\n if date in compnent_weights.index:\r\n none_NaN_index = ~compnent_weights.loc[date].isnull()\r\n if not compnent_weights.loc[date][none_NaN_index].empty:\r\n tmp_sum = BoP_df.loc[date].sum()\r\n BoP_df.loc[date][none_NaN_index.values] = (compnent_weights.loc[date][none_NaN_index.values].values)*tmp_sum\r\n\r\n \r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n \r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n\r\n portfolio_returns = portfolio_index.pct_change(1) \r\n portfolio_returns.columns = ['Returns']\r\n\r\n portfolio_index\r\n perf = portfolio_index.calc_stats()\r\n \r\n output = pd.Series(data = [perf,PnL_df,portfolio_index,portfolio_BoP,portfolio_EoP,BoP_df], index=['Portfolio Perf','Component PnL','portfolio_index','portfolio_BoP','portfolio_EoP','BoP_df'])\r\n return output", "def returns_to_prices(returns: pd.Series, start_price: float) -> pd.Series:\n return returns.add(1).cumprod().mul(start_price)", "def getStock(symbol, start, end):\n df = data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def get_index_portfolio_value_data(game_id: int, symbol: str, start_time: float = None,\n end_time: float = None) -> pd.DataFrame:\n start_time, end_time = get_time_defaults(game_id, start_time, end_time)\n base_value = get_index_reference(game_id, symbol)\n\n with engine.connect() as conn:\n df = pd.read_sql(\"\"\"\n SELECT timestamp, `value` FROM indexes\n WHERE symbol = %s AND timestamp >= %s AND timestamp <= %s;\"\"\", conn, params=[symbol, start_time, end_time])\n index_info = query_to_dict(\"SELECT * FROM index_metadata WHERE symbol = %s\", symbol)[0]\n\n # normalizes index to the same starting scale as the user\n df[\"value\"] = STARTING_VIRTUAL_CASH * df[\"value\"] / base_value\n df[\"username\"] = index_info[\"name\"]\n\n # When a game kicks off, it will generally be that case that there won't be an index data point at exactly that\n # time. We solve this here, create a synthetic \"anchor\" data point that starts at the same time at the game\n trade_start = make_index_start_time(start_time)\n return pd.concat([pd.DataFrame(dict(username=index_info[\"name\"], timestamp=[trade_start],\n value=[STARTING_VIRTUAL_CASH])), df])", "def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks", "def getStock(symbol, start, end):\n df = pd.io.data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def get_new_allocation(self, day, init=False):\n \"\"\n if init and self.data_train is None:\n # Use uniform allocation\n cur_day_op = self.data.get_op(relative=False)[day, :] # opening prices on |cur_day|\n return util.get_uniform_allocation(self.num_stocks, cur_day_op)\n\n predicted_price_rel = self.predict_price_relatives(day)\n\n # Compute mean price relative of available stocks (x bar at t+1)\n today_op = self.data.get_op(relative=False)[day, :]\n avail_stocks = util.get_avail_stocks(today_op)\n avail_idxs = util.get_available_inds(avail_stocks)\n ppr_avail = predicted_price_rel[avail_idxs] # predicted price relatives of available stocks\n mean_price_rel = np.mean(ppr_avail)\n\n lam = self.compute_lambda(ppr_avail, mean_price_rel, avail_idxs) # lambda at t+1\n\n # limit lambda to avoid numerical problems from acting too aggressively.\n # (referenced from marigold's implementation: https://github.com/Marigold/universal-portfolios)\n lam = min(100000, lam)\n\n # Note: we don't perform simplex project b/c negative values (shorting) is allowed.\n new_b = np.zeros(self.num_stocks)\n for i, _ in enumerate(new_b):\n ppr = predicted_price_rel[i]\n if ppr > 0:\n new_b[i] = self.b[i] + lam * (ppr - mean_price_rel)\n\n # Normalize b so that it sums to 1\n sum_b = np.linalg.norm(new_b, ord=1)\n return (1.0 / sum_b) * new_b", "def __init__(self, start_date=\"2017-01-01\", end_date=datetime.datetime.now().strftime(\"%Y-%m-%d\"), asset_list=[]):\n\n self.start_date = start_date\n self.end_date = end_date\n self.asset_list = asset_list\n self.portfolio = pd.DataFrame()\n self.benchmark = san.get(\"ohlcv/bitcoin\", from_date=start_date,\n to_date=end_date).closePriceUsd.pct_change()\n\n for portfolio_asset in asset_list:\n self.portfolio[portfolio_asset] = san.get(\"ohlcv/\" + portfolio_asset,\n from_date=start_date,\n to_date=end_date).closePriceUsd.pct_change()\n self.portfolio = self.portfolio.replace([np.inf, -np.inf], 0)\n self.metrics = dict()", "def test_low_stockprice_high_interest(self):\n stock_prices = np.array([[5, 4, 4, 2],\n [5, 3, 3, 3],\n [5, 4, 2, 2],\n [5, 3, 3, 1]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def evaluate_cur_stocks(self):\n today = datetime.today()\n close_val = PRICE_DF.iloc[PRICE_DF.index.get_loc(today, method=\"ffill\")]\n close_val = close_val[self.cur_stocks.index]\n close_val = pd.DataFrame({\"PRICE_CURRENT\" : close_val.values}, index=self.cur_stocks.index)\n evaluated_stocks = pd.merge(self.cur_stocks, close_val, left_index=True, right_index=True)\n evaluated_stocks[\"VOLUME_CURRENT\"] = evaluated_stocks[\"AMOUNT\"] * evaluated_stocks[\"PRICE_CURRENT\"]\n evaluated_stocks[\"RETURN\"] = (evaluated_stocks[\"VOLUME_CURRENT\"] / evaluated_stocks[\"VOLUME_PURCHASE\"]) - 1\n return evaluated_stocks", "def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()", "def hedge_portfolio(context, data):\r\n factors = get_alphas_and_betas(context, data)\r\n beta_exposure = 0.0\r\n count = 0\r\n for asset in context.portfolio.positions:\r\n if asset in factors and asset != context.index:\r\n if not np.isnan(factors[asset].beta):\r\n beta_exposure += factors[asset].beta\r\n count += 1\r\n beta_hedge = -1.0 * beta_exposure / count\r\n dollar_amount = context.portfolio.portfolio_value * beta_hedge\r\n record(beta_hedge=beta_hedge)\r\n if not np.isnan(dollar_amount):\r\n order_target_value(context.index, dollar_amount)", "def __init__(\n self,\n portfolio,\n market=None,\n commission_min=5.00,\n commission_pct=0.0,\n buy_percent=1.0,\n sell_percent=1.0,\n pm_threshold=0.0,\n pm_order=1.0,\n risk_free_return=1.0,\n name=None\n ):\n\n # Assumptions\n self.name = name if name else portfolio.name\n self.commission_min = commission_min\n self.commission_pct = commission_pct\n self.buy_percent = buy_percent\n self.sell_percent = sell_percent\n self.pm_threshold = pm_threshold\n self.pm_order = pm_order\n self.risk_free_return = risk_free_return\n self.performance = {}\n\n # Inputs\n self.portfolio = portfolio\n self.market = copy.deepcopy(market) if market else Asset(np.ones(len(self.portfolio.dates)))\n\n # Trading states\n self.long_open = {symbol:False for symbol in portfolio.assets.keys()}\n self.short_open = {symbol:False for symbol in portfolio.assets.keys()}\n\n # Keep track of intermidiate results for performance\n self.trade_data = []\n recordings = [\n 'buy price', 'buy shares', 'buy fees', 'buy date',\n 'sell price', 'sell shares', 'sell fees', 'sell date',\n 'gain', 'profit', 'loss', 'return', 'win/loose',\n 'min balance', 'min date', 'max balance', 'max date',\n 'drawdown', 'drawdown days',\n 'volatility', 'expected_return', 'beta', 'lpm', 'hpm',\n 'max', 'mean', 'min'\n ]\n self.record = {symbol:pd.DataFrame(columns=recordings) for symbol in portfolio.assets.keys()}\n self.max = {symbol:[portfolio.assets[symbol].c.iloc[0], None] for symbol in portfolio.assets.keys()}\n self.min = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}\n self.drawdown = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}", "def rebalance(self, date):\n eod_values = self.df.shift(1).loc[date, 'values'].mul(1 + self.tc.instrument_returns.loc[date, 'daily'])\n eod_portfolio_value = sum(eod_values.values)\n\n previous_values = self.df.loc[date, 'values'].copy()\n position_value = self.target_weights.mul(eod_portfolio_value)\n trading_cost = abs(eod_values.div(eod_portfolio_value) - self.target_weights) * eod_portfolio_value * \\\n self.tc.commission\n current_values = position_value - trading_cost\n self.df.loc[date, 'values'] = current_values.values\n future_values = self.tc.instrument_returns.loc[date:, 'cumulative'].div(\n self.tc.instrument_returns.loc[date, 'cumulative']).mul(current_values, axis=1)\n self.df.loc[date:, 'values'] = future_values.values\n trade = pd.Series(current_values - previous_values)\n # Once we have calculated the end-of-day value of the portfolio, we set the allocation by looking at the\n # dollars invested in each ETF\n self.df.loc[date:, 'allocations'] = future_values.div(future_values.sum(axis=1), axis=0).values\n\n return trade", "def before_trading_start(context, data):\n factors = pipeline_output('ff_example')\n\n # get the data we're going to use\n returns = factors['returns']\n mkt_cap = factors.sort_values(['market_cap'], ascending=True)\n be_me = factors.sort_values(['be_me'], ascending=True)\n\n # to compose the six portfolios, split our universe into portions\n half = int(len(mkt_cap)*0.5)\n small_caps = mkt_cap[:half]\n big_caps = mkt_cap[half:]\n \n thirty = int(len(be_me)*0.3)\n seventy = int(len(be_me)*0.7)\n growth = be_me[:thirty]\n neutral = be_me[thirty:seventy]\n value = be_me[seventy:]\n\n # now use the portions to construct the portfolios.\n # note: these portfolios are just lists (indices) of equities\n small_value = small_caps.index.intersection(value.index)\n small_neutral = small_caps.index.intersection(neutral.index)\n small_growth = small_caps.index.intersection(growth.index)\n \n big_value = big_caps.index.intersection(value.index)\n big_neutral = big_caps.index.intersection(neutral.index)\n big_growth = big_caps.index.intersection(growth.index)\n\n # take the mean to get the portfolio return, assuming uniform\n # allocation to its constituent equities.\n sv = returns[small_value].mean()\n sn = returns[small_neutral].mean()\n sg = returns[small_growth].mean()\n \n bv = returns[big_value].mean()\n bn = returns[big_neutral].mean()\n bg = returns[big_growth].mean()\n\n # computing SMB\n context.smb = (sv + sn + sg)/3 - (bv + bn + bg)/3\n\n # computing HML\n context.hml = (sv + bv)/2 - (sg + bg)/2", "def declare_new_budget(date, exp_data):\n\n exp_list = exp_data[env.EXPENSE_DATA_KEY]\n local_budget = {}\n month_total = util.get_float_input(\n f\"Please input your total for the month ending {date}: \", force_pos=True)\n budg_remaining = month_total\n\n for i, exp in enumerate(exp_list):\n if i == len(exp_list) - 1:\n print(\"I got the last one for you :) MATH!\")\n budg_amnt = budg_remaining\n budg_remaining = 0\n\n elif budg_remaining == 0: # elif skips this condition if budget remaining is set above\n budg_amnt = 0\n local_budget[env.BUDGET_TOTAL_KEY] = month_total\n else:\n prompt = f\"Enter your budget for: [{exp}] - Total Budget Re. ${budg_remaining} - Exp's Re. [{len(exp_list) - i - 1}]: \"\n budg_amnt = prompt_for_budget_amnt(\n prompt, budg_remaining, exp_data)\n local_budget.update({exp: budg_amnt})\n budg_remaining = round(month_total - sum_budget(local_budget), 2)\n print(local_budget)\n return local_budget", "def size_portfolio_replic(df1, df2, df3, step3=None):\r\n # merge crsp & delisted stock returns\r\n _crsp = pd.merge(df1, df2, how='outer', on=['date', 'permno'])\r\n\r\n # use the two returns together\r\n _crsp = _crsp[~(_crsp['ret'].isna() & _crsp['dlret'].isna())]\r\n _crsp.loc[_crsp['ret'].isna(), 'ret'] = 0\r\n _crsp.loc[_crsp['dlret'].isna(), 'dlret'] = 0\r\n _crsp['ret'] = (_crsp['ret'] + 1) * (_crsp['dlret'] + 1) - 1\r\n\r\n _crsp['prc'] = abs(_crsp['prc']) # use positive price\r\n _crsp['me'] = _crsp['prc'] * _crsp['shrout'] / 1000 # shares in thousands, market value in millions\r\n _crsp['year'] = _crsp['date'].dt.year\r\n _crsp['month'] = _crsp['date'].dt.month\r\n\r\n # sort the df first by stock code then by date\r\n _crsp = _crsp.sort_values(by=['permno', 'date'], ascending=True)\r\n\r\n # get the market value for the previous month, for the value-weighted rets calculations later\r\n _crsp['lag_me'] = _crsp['me'].shift(1)\r\n _crsp.loc[_crsp['permno'].shift(1) != _crsp['permno'], 'lag_me'] = np.nan\r\n\r\n # get the fiscal year: if month<=6 then fyear = year - 1\r\n _crsp['fyear'] = _crsp['year']\r\n _crsp.loc[_crsp['month'] <= 6, 'fyear'] = _crsp['fyear'] - 1\r\n\r\n # at the end of each June, use me as indicator to construct new portfolio\r\n _construct = _crsp[_crsp['month'] == 6].copy()\r\n _construct['fyear'] = _construct['fyear'] + 1 # me at June is used for the following fyear\r\n _construct = _construct[['fyear', 'me', 'permno']]\r\n\r\n # merge the me indicator with the original dataset\r\n # now there are 2 me related values:\r\n # lag_me for vw ret calculation, and me_ind for decile classification\r\n _crsp = pd.merge(_crsp, _construct, how='left', on=['fyear', 'permno'])\r\n _crsp = _crsp[_crsp['me_y'].notna()]\r\n _crsp = _crsp.drop(columns='me_x')\r\n _crsp.rename(columns={'me_y': 'me_ind'}, inplace=True)\r\n\r\n # obtain the breakpoints, use nyse stocks and me at end of June (start of July)\r\n _nyse = _crsp.loc[(_crsp['exchcd'] == 1) & (_crsp['month'] == 7)]\r\n\r\n # use quantile function to get breakpoints for each time period\r\n _indicator = _nyse.groupby(['fyear'])['me_ind'].quantile(0.1).to_frame()\r\n _indicator.reset_index(drop=False, inplace=True)\r\n _indicator.rename(columns={'me_ind': 'd'}, inplace=True)\r\n for i in range(2, 10):\r\n _dec_insert = _nyse.groupby(['fyear'])['me_ind'].quantile(0.1 * i)\r\n _dec_insert.reset_index(drop=True, inplace=True)\r\n _indicator.insert(_indicator.shape[1], 'd' * i, _dec_insert)\r\n\r\n # merge the breakpoints to the original dataset\r\n _crsp = pd.merge(_crsp, _indicator, how='left', on=['fyear'])\r\n\r\n # obtain the decile for each observation\r\n _crsp.loc[(_crsp['me_ind'] <= _crsp['d']), 'decile'] = 1 # dec1\r\n _crsp.loc[(_crsp['me_ind'] > _crsp['d' * 9]), 'decile'] = 10 # dec10\r\n for i in range(1, 9):\r\n _crsp.loc[(_crsp['me_ind'] > _crsp['d' * i])\r\n & (_crsp['me_ind'] <= _crsp['d' * (i + 1)]), 'decile'] = i + 1 # dec2-9\r\n\r\n # if step3 is true, return crsp for HML & SMB calculation\r\n if step3:\r\n return _crsp\r\n\r\n # obtain the value-weighted rets for each month\r\n _crsp['ret*lag_me'] = _crsp['ret'] * _crsp['lag_me']\r\n _crsp_vw = (_crsp.groupby(['year', 'month', 'decile'])['ret*lag_me'].sum() /\r\n _crsp.groupby(['year', 'month', 'decile'])['lag_me'].sum()).to_frame()\r\n _crsp_vw.reset_index(drop=False, inplace=True)\r\n _crsp_vw.rename(columns={'decile': 'port', 0: 'Size_Ret'}, inplace=True)\r\n\r\n # restrict time from Jan1973 to Dec2020\r\n _crsp_vw = _crsp_vw.loc[(_crsp_vw['year'] <= 2020) & (_crsp_vw['year'] >= 1973)]\r\n _crsp_vw.reset_index(drop=True, inplace=True)\r\n\r\n _ff = df3.copy()\r\n _rf = _ff[['year', 'month', 'RF']] # get risk-free from ff\r\n _ff['wml_size'] = _ff['ME01'] - _ff['ME10'] # get long-short portfolio by dec1 minus dec10 in ff\r\n _crsp_vw = pd.merge(_crsp_vw, _rf, on=['year', 'month'], how='inner')\r\n _crsp_vw['exret'] = _crsp_vw['Size_Ret'] - _crsp_vw['RF'] # get excess returns\r\n\r\n # get long-short portfolio by dec1 minus dec10 in replication\r\n _ls = pd.merge(_crsp_vw[_crsp_vw['port'] == 1], _crsp_vw[_crsp_vw['port'] == 10], on=['year', 'month'], how='inner')\r\n _ls['wml'] = _ls['Size_Ret_x'] - _ls['Size_Ret_y'] # dec1 - dec10\r\n _ls = _ls[['year', 'month', 'wml']]\r\n\r\n # annualized, in percentage\r\n _ls_mean = np.mean(_ls['wml']) * 12 * 100\r\n _ls_std = np.std(_ls['wml']) * np.sqrt(12) * 100\r\n\r\n # get output values,\r\n # rows are exrets, standard deviations, Sharpe Ratios, skewnesses, and correlations with ff\r\n # columns are dec1 to dec10, and long-short\r\n _output = pd.DataFrame(index=np.arange(5), columns=np.arange(11))\r\n _output.iloc[[0], :-1] = _crsp_vw.groupby('port')['exret'].mean() * 12 * 100\r\n _output.iloc[[0], [10]] = _ls_mean\r\n _output.iloc[[1], :-1] = _crsp_vw.groupby('port')['exret'].std() * np.sqrt(12) * 100\r\n _output.iloc[[1], [10]] = _ls_std\r\n _output.iloc[[2], :-1] = np.array(_output.iloc[[0], :-1]) / np.array(_output.iloc[[1], :-1])\r\n _output.iloc[[2], [10]] = _ls_mean / _ls_std\r\n _output.iloc[[3], :-1] = _crsp_vw.groupby('port')['exret'].skew()\r\n _output.iloc[[3], [10]] = skew(_ls['wml'])\r\n\r\n # get the correlations for each decile between replication and ff\r\n for i in range(11):\r\n if i <= 8:\r\n _replic = _crsp_vw[_crsp_vw['port'] == (i + 1)]\r\n _replic.reset_index(drop=True, inplace=True)\r\n _ff_group = _ff[['year', 'month', ('ME0' + str(i + 1)), 'RF']]\r\n _ff_group['exret_ff'] = _ff_group['ME0' + str(i + 1)] - _ff_group['RF']\r\n elif i == 9:\r\n _replic = _crsp_vw[_crsp_vw['port'] == (i + 1)]\r\n _replic.reset_index(drop=True, inplace=True)\r\n _ff_group = _ff[['year', 'month', 'ME10', 'RF']]\r\n _ff_group['exret_ff'] = _ff_group['ME10'] - _ff_group['RF']\r\n else:\r\n _replic = _ls\r\n _replic['exret'] = _replic['wml']\r\n _ff_group = _ff[['year', 'month', 'wml_size', 'RF']]\r\n _ff_group['exret_ff'] = _ff_group['wml_size']\r\n _compare = pd.merge(_replic, _ff_group, on=['year', 'month'], how='left')\r\n _output.iloc[[4], [i]] = _compare.corr().loc['exret', 'exret_ff']\r\n\r\n # rename the output stats\r\n _output.rename(\r\n columns={0: 'D1', 1: 'D2', 2: 'D3', 3: 'D4', 4: 'D5',\r\n 5: 'D6', 6: 'D7', 7: 'D8', 8: 'D9', 9: 'D10', 10: 'LS'},\r\n index={0: 'exret', 1: 'sd', 2: 'SR', 3: 'skew', 4: 'corr'},\r\n inplace=True)\r\n return _output", "def stock_market(no_profiles: int) -> tuple:\n all_companies = []\n Stocks = namedtuple(\"Stocks\", 'name symbol open high close company_weight')\n MkValue_ = random.uniform(1000, 50000, 100)\n wts_ = random.uniform(0, 1, 100)\n wts_ = wts_/sum(wts_)\n\n for _ in range(100):\n name = fake.company()\n open_ = round(MkValue_[_]*wts_[_],2)\n close = round(open_ * random.uniform(0.7, 1.15), 2)\n high = round(open_ * random.uniform(0.85, 1.15), 2)\n if high < open_:\n high = open_\n if high < close:\n high = close\n\n all_companies.append(\n Stocks(name=name, symbol=symbol(name), open=open_, high=round(high, 2), close=round(close, 2), company_weight=round(wts_[_], 4)))\n\n stock_index = round(\n sum(x.open * x.company_weight for x in all_companies), 4)\n highest_for_day = round(\n sum(x.high * x.company_weight for x in all_companies), 2)\n lowest_close_for_day = round(\n sum(x.close * x.company_weight for x in all_companies), 2)\n\n # print(f\"\\n------------------------------------Top 100 listed companies on Fake Stock Exchange------------------------------------\")\n # [print(x) for x in sorted(all_companies, key=lambda x:x.symbol)]\n # print(f\"\\n--------------Main details on {date.today()}--------------\")\n # print(f\"\\nStart of the day: {stock_index}\")\n # print(f\"Highest for the day: {highest_for_day}\")\n # print(f\"Lowest close for the day: {lowest_close_for_day}\")\n return sorted(all_companies, key=lambda x: x.symbol), stock_index, highest_for_day, lowest_close_for_day", "def backtest_portfolio(self):\n self.rank=dict()\n self.accuracy=dict()\n portfolio = dict()\n \n for algo in self.algos:\n portfolio[algo]=pd.DataFrame(index=self.positions.index)\n self.pos_diff=dict()\n self.pos_diff[algo] = self.positions[algo].diff()\n \n portfolio[algo]['price_diff'] = self.bars['Close']-self.bars['Open']\n #portfolio['price_diff'][0:5] = 0.0\n portfolio[algo]['profit'] = self.positions[algo] * portfolio[algo]['price_diff']\n portfolio[algo]['total'] = self.initial_capital + portfolio[algo]['profit'].cumsum()\n portfolio[algo]['returns'] = portfolio[algo]['total'].pct_change()\n d=np.array(portfolio[algo]['profit']).copy()\n d[d>0]=1\n d[d<0]=0\n d[np.array(self.positions[algo])==0]=1\n for i in np.arange(1,len(d)+1):\n c=float(sum(d[0:i]))/(i)\n d[i-1]=c\n portfolio[algo]['accuracy']=d\n self.rank[algo]=float(portfolio[algo]['total'][-1] - portfolio[algo]['total'][0])\n self.returns=portfolio\n c=np.array(self.returns[algo]['profit'])\n c[c>0]=1\n c[c<0]=0\n c[np.array(self.positions[algo])==0]=1\n accuracy=round(float(c.sum())/len(c),2)*self.rank[algo]\n self.accuracy[algo]=accuracy\n #self.ranking= sorted(self.rank.items(), key=operator.itemgetter(1), reverse=True)\n self.ranking= sorted(self.accuracy.items(), key=operator.itemgetter(1))\n self.ready=True\n return (portfolio, self.rank, self.ranking)", "def update_portfolio(self):\n if not self._dirty_portfolio:\n return\n\n portfolio = self._portfolio\n pt = self.position_tracker\n\n portfolio.positions = pt.get_positions()\n position_stats = pt.stats\n\n portfolio.positions_value = position_value = (\n position_stats.net_value\n )\n portfolio.positions_exposure = position_stats.net_exposure\n self._cash_flow(self._get_payout_total(pt.positions))\n\n start_value = portfolio.portfolio_value\n\n # update the new starting value\n portfolio.portfolio_value = end_value = portfolio.cash + position_value\n\n pnl = end_value - start_value\n if start_value != 0:\n returns = pnl / start_value\n else:\n returns = 0.0\n\n portfolio.pnl += pnl\n portfolio.returns = (\n (1 + portfolio.returns) *\n (1 + returns) -\n 1\n )\n\n # the portfolio has been fully synced\n self._dirty_portfolio = False", "def cum_returns(returns, starting_value=None):\n\n # df_price.pct_change() adds a nan in first position, we can use\n # that to have cum_returns start at the origin so that\n # df_cum.iloc[0] == starting_value\n # Note that we can't add that ourselves as we don't know which dt\n # to use.\n if pd.isnull(returns.iloc[0]):\n returns.iloc[0] = 0.\n\n df_cum = np.exp(np.log(1 + returns).cumsum())\n\n if starting_value is None:\n return df_cum - 1\n else:\n return df_cum * starting_value", "def calculate_futures(current_balance, today_shares_owned, history, range_days, redistribution):\n today_fund_value = today_shares_owned * history.iloc[0]\n current_distribution = today_fund_value / current_balance\n\n range_max_price = []\n overall_max_price = []\n for account in history:\n range_max_price.append(max(history[account][:range_days]))\n overall_max_price.append(max(history[account][:]))\n new_fund_distribution = redistribution * current_balance # move dollar balance to new redistribution\n new_shares_after_distribution = new_fund_distribution/history.iloc[0]\n\n new_share_range_max_price = new_shares_after_distribution * range_max_price\n potential_range_gain_loss = new_share_range_max_price - new_fund_distribution\n potential_total = sum(new_share_range_max_price)\n total_gain_loss = potential_total - current_balance\n\n current_shares_at_range_max_price = today_shares_owned * range_max_price\n tot = sum(current_shares_at_range_max_price)\n est_gain_loss = tot - current_balance\n current_distrib_v_scenario = total_gain_loss - est_gain_loss\n\n return current_distrib_v_scenario", "def get_benchmark_returns(symbol, start_date, end_date):\n df = web.DataReader(symbol, 'google', start_date, end_date)\n df.index = df.index.tz_localize('UTC')\n\n calendar = get_calendar(\"NYSE\")\n start_index = calendar.all_sessions.searchsorted(start_date)\n end_index = calendar.all_sessions.searchsorted(end_date)\n\n # fill price data for missing dates\n df = df[\"Close\"].reindex(calendar.all_sessions[start_index:end_index],\n method='ffill')\n\n return df.pct_change(1).iloc[1:]", "def compute_portfolio_stats(allocs,prices,rfr=0, sf=252):\n\n # portfolio value\n port_val = compute_port_val(allocs, prices)\n\n daily_rets = port_val/port_val.shift(1) - 1\n daily_rets = daily_rets[1:]\n\n # cumulative return\n cr = port_val.iloc[-1]/port_val.iloc[0] -1\n\n # avg daily return\n adr = daily_rets.mean()\n\n # std dev of daily return\n sddr = daily_rets.std()\n \n #sharpe_ratio\n k = math.sqrt(252)\n \n sr = k * ((daily_rets - 0).mean() / daily_rets.std())\n \n return cr, adr, sddr, sr", "def buy(stage, budget, items):\n\n temptab = []\n\n if stage == 0:\n values = 0\n else:\n i = stage - 1 # align the stage with index of items\n if items[i].lim == 0:\n limit = int(math.floor(budget/items[i].cost)) + 1\n else:\n limit = items[i].lim + 1\n\n for n in xrange(\n min(limit, int(math.floor(budget/items[i].cost))+1)):\n temptab.append(treat(items[i], n) +\n buy(stage-1, budget - n*items[i].cost, items))\n\n values = max(temptab)\n # the index in temptab is the amount of item to buy\n AMOUNT[(stage, budget)] = temptab.index(values)\n\n return values", "def optimizeForReturn(required_return, stock_db, use_genetic):\n print('Optimizing portfolio for %f' % required_return)\n pf = PortfolioFactory(stock_db, required_return, use_genetic=use_genetic)\n desired_portfolio = pf.desired_portfolio\n print('Required Return: %f' % required_return)\n print('Expected Return: %f' % math.pow(\n desired_portfolio.average_return, Config.DAYS_IN_YEAR))\n print('Downside Risk: %f' % desired_portfolio.downside_risk)\n print('Downside Correl: %f' % desired_portfolio.downside_correl)\n print('Score: %f' % desired_portfolio.score)\n\n # Write desired portfolio.\n DataIO.writeDesiredPortfolio(\n desired_portfolio, stock_db,\n 'output/DesiredPortfolio_%.0f_%.4f_%s.csv' % (\n Config.MINIMUM_AMOUNT_DATA, required_return, Config.TODAY.date()))\n\n print('Finished for %f' % required_return)\n\n return desired_portfolio", "def current_portfolio_weights(self) -> 'pd.Series[float]':\n position_values = pd.Series({\n asset: (\n position.last_sale_price *\n position.amount *\n asset.price_multiplier\n )\n for asset, position in self.positions.items()\n }, dtype=\"float64\")\n return position_values / self.portfolio_value", "def get_benchmark_returns(symbol, first_date, last_date):\n if symbol == '^GSPC':\n symbol = 'spy'\n\n data = pd_reader.DataReader(\n symbol,\n 'google',\n first_date,\n last_date\n )\n\n data = data['Close']\n\n data[pd.Timestamp('2008-12-15')] = np.nan\n data[pd.Timestamp('2009-08-11')] = np.nan\n data[pd.Timestamp('2012-02-02')] = np.nan\n\n data = data.fillna(method='ffill')\n\n return data.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]", "def generate_portfolio(S_0, params):\n\tpublic_client = gdax.PublicClient()\n\tallvar = []\n\tsumvar = 0\n\tfor coin in params:\n\t\ttheta = coin[0]\n\t\tv = theta[0]\n\t\tT = coin[1]\n\t\tprod_id = coin[2]\n\t\t# Get the current value of the coin, i.e. how much you bought\n\t\tname = prod_id + '-USD'\n\t\tstats = public_client.get_product_24hr_stats(name)\n\t\tvalue = (float(stats['high']) + float(stats['low']))/2\n\t\tallvar.append([prod_id, value, v])\n\t\tsumvar += v\n\tpriority = sorted(allvar, key=lambda i: i[2])\n\tportfolio = []\n\tfor i in priority:\n\t\tinvestment = S_0*i[2]/sumvar\n\t\tcurrency = investment/i[1]\n\t\tportfolio.append((i[0], currency, investment)) # id, investment, currency\n\tprint(\"\\nYour suggested investments are: \\n\")\n\tfor coin in portfolio:\n\t\tprint(\"%s: %s for %s USD\" % (coin[0], coin[1], coin[2]))\n\t# Prompt to save the portfolio\n\tdone = False\n\twhile done != True:\n\t\tinp = input(\"\\nWould you like to save this portfolio? (y/n)\t\")\n\t\ttry:\n\t\t\tif inp.lower() == 'y':\n\t\t\t\tpublic_client = gdax.PublicClient()\n\t\t\t\tcurrent_date = np.datetime64(public_client.get_time().get(\"iso\").split('T')[0])\n\t\t\t\t# Save the file\n\t\t\t\twith open(\"portfolios/%s.txt\" % (current_date), \"w\") as f:\n\t\t\t\t\tfor coin in portfolio:\n\t\t\t\t\t\tf.write(str(coin[0]) + ', ' + str(coin[1]) + ', ' + str(coin[2]) + '\\n')\n\t\t\t\tprint(\"Portfolio saved. Exiting.\\n\")\n\t\t\t\tdone = True\n\t\t\tif inp.lower() == 'n':\n\t\t\t\tprint(\"Program complete. Exiting.\\n\")\n\t\t\t\tdone = True\n\t\texcept ValueError:\n\t\t\tprint(\"Your input could not be interpreted.\")", "def __populate_historical_trade_data(self):\n\n trade_data = self.__transactions.pivot_table(\n index=\"Date\",\n columns=[\"Ticker\"],\n values=[\n \"Quantity\",\n \"Investment\",\n ],\n aggfunc={\"Quantity\": np.sum, \"Investment\": np.sum},\n )\n\n # Make historical prices columns a multi-index. This helps the merging.\n self.portfolio_historical_prices.columns = pd.MultiIndex.from_product(\n [[\"Close\"], self.portfolio_historical_prices.columns]\n )\n\n trade_data = pd.merge(\n trade_data,\n self.portfolio_historical_prices,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n trade_data[\"Close\"] = trade_data[\"Close\"].fillna(method=\"ffill\")\n trade_data.fillna(0, inplace=True)\n\n trade_data[\"Quantity\"] = trade_data[\"Quantity\"].cumsum()\n trade_data[\"Investment\"] = trade_data[\"Investment\"].cumsum()\n trade_data[\"Investment\", \"Total\"] = trade_data[\"Investment\"].sum(axis=1)\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Investment delta\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"Investment\"].diff(periods=1).fillna(trade_data[\"Investment\"]))\n\n # End Value = Quantity * Close\n trade_data[pd.MultiIndex.from_product([[\"End Value\"], self.tickers_list])] = (\n trade_data[\"Quantity\"][self.tickers_list]\n * trade_data[\"Close\"][self.tickers_list]\n )\n\n trade_data.loc[:, (\"End Value\", \"Total\")] = trade_data[\"End Value\"][\n self.tickers_list\n ].sum(axis=1)\n\n # Initial Value = Previous End Value + Investment changes\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Initial Value\"], self.tickers_list + [\"Total\"]]\n )\n ] = 0\n\n trade_data[\"Initial Value\"] = trade_data[\"End Value\"].shift(1) + trade_data[\n \"Investment\"\n ].diff(periods=1)\n\n # Set first day Initial Value as the Investment (NaNs break first period)\n for t in self.tickers_list + [\"Total\"]:\n trade_data.at[trade_data.index[0], (\"Initial Value\", t)] = trade_data.iloc[\n 0\n ][\"Investment\"][t]\n\n trade_data = trade_data.reindex(\n columns=[\n \"Quantity\",\n \"Investment\",\n \"Investment delta\",\n \"Close\",\n \"Initial Value\",\n \"End Value\",\n ],\n level=0,\n )\n self.historical_trade_data = trade_data", "def calc_price_for_period(prev_price):\n result = []\n for i in range(1, N+1):\n price = prev_price + calc_price_delta(prev_price, i)\n prev_price = price\n result.append(price)\n return result", "def income_model_constant_portfolio_return(num_of_years=30, trials=100, method='normal'):\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n\n # read_income_inputs = pd.read_csv(src + \"income_model_inputs.csv\", index_col='Items')\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n # read_returns_est = pd.read_csv(src + \"income_assets_returns_estimates.csv\", index_col='Symbol')\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n # read_returns_est.drop(['BM', read_returns_est.index[-1]], axis=0, inplace=True)\n # read_portfolio_inputs = pd.read_csv(src + \"income_portfolio_inputs.csv\", index_col='Items')\n\n # read_asset_weights = pd.read_csv(src + \"asset_weights.csv\", index_col='Asset')\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'sort_normal.csv', index_col=[0], parse_dates=True)\n read_small = pd.read_csv(src + 'sort_small_to_large.csv', index_col=[0], parse_dates=True)\n read_large = pd.read_csv(src + 'sort_large_to_small.csv', index_col=[0], parse_dates=True)\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # # dataframe for unsorted returns (normal)\n # median_returns_normal = pd.DataFrame({t: asset_median_returns(read_normal, t) for t in tickers})\n # median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n # median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'r_FIA')})\n #\n # # dataframe for smallest to largest returns\n # median_returns_smallest = pd.DataFrame({t: asset_median_returns(read_small, t) for t in tickers})\n # median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n # median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'r_FIA')})\n #\n # # dataframe for unsorted returns (normal)\n # median_returns_largest = pd.DataFrame({t: asset_median_returns(read_large, t) for t in tickers})\n # median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n # median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'r_FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n \n # -------------For Constant Growth Rates------------------------\n const_fia_index_ret = float(read_income_inputs.loc['const_fia_index_ret', 'inputs'])\n const_risky_port_ret = float(read_income_inputs.loc['const_risky_port_ret', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n # income_df.loc[:, 'index_returns'] = read_normal.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n # ----------CONSTANT FIA INDEX GROWTH RATE-------------------\n income_df.loc[:, 'index_returns'] = const_fia_index_ret\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # -------------------------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = ['r_{}'.format(name) for name in base_assets]\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n # for c in range(len(r_cols)):\n # ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_small.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.sort(ret.flatten())\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.sort(ret.flatten())})\n\n elif method == 'largest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_large.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.flip(np.sort(ret.flatten()))\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.flip(np.sort(ret.flatten()))})\n\n else:\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_normal.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = ret.flatten()\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): ret.flatten()})\n\n # store the simulated assets returns in one dictionary\n # returns_dict.update({str(runs): random_returns})\n\n # collect the asset based returns from all simulation and calculate the median returns.\n # def get_median_returns(sym):\n # cols = [sym + '_' + str(c) for c in np.arange(trials)]\n # asset_df = pd.DataFrame({c: asset_dict.get(c) for c in cols})\n # return asset_df.median(axis=1)\n #\n # asset_median_returns = pd.DataFrame({symbol: get_median_returns(symbol) for symbol in r_cols})\n #\n # asset_median_returns.loc[:, 'simulated_portfolio_median_returns'] = asset_median_returns.dot(base_weights)\n\n base_df = random_returns.copy()\n pre_income_base_df = random_returns.copy()\n\n # base_investment = float(read_portfolio_inputs.loc['risky_assets', 'Base'])\n\n fia_portfolio_df = random_returns.copy()\n pre_income_port_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # ---------Initial Investments for pre-income account values---------------------\n pre_income_base_inv = base_investment\n pre_income_port_inv = port_investment\n # ----------------------------------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n pre_income_base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n # ---------------For year 0, the year of investment------------\n\n # ------------Calculate the annual portfolio returns - Gross Returns--------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_base_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_base_inv for c in range(len(boy_value))]\n pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_inv = pre_income_base_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n # base_investment = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_pre_income'] = base_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n # ----For years between the start of the investment and start if the income---------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'total'] = base_investment * (1 + const_risky_port_ret)\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total'] = base_investment * (1 + 0.06)\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total_net_fees']\n\n else:\n\n # -------------For Years after the income started----------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'total'] = base_investment * (1 + const_risky_port_ret)\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total'] = base_investment * (1 + const_risky_port_ret)\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n # -------------------Portfolio with PreIncome Values----------------------------\n sim_base_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_base_df.loc[:, 'total_net_fees']\n sim_base_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'Base']), inplace=True)\n # --------------------------------PreIncome Block Ends----------------------------\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n sim_base_total_pre_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_pre_income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n pre_income_port_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_port_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_port_inv for c in range(len(boy_value))]\n pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_inv = pre_income_port_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n # port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_pre_income'] = port_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # ------------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n \n # -----------------------CONSTANT GROWTH RATE-----------------\n pre_income_port_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n \n # -------CONSTANT GROWTH RATE-----------------\n fia_portfolio_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n sim_port_total_pre_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_pre_income']\n\n # -------------------Portfolio with PreIncome Values----------------------------\n sim_port_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_port_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'FIA']), inplace=True)\n \n # --------------------------------PreIncome Block Ends----------------------------\n\n # ------------------Calculate % of portfolios ending value greater than required LIFETIME cumm. income---------\n total_income_by_age = sim_base_income.loc[:, sim_base_income.columns[0]].cumsum()\n total_income_by_acturial_age = total_income_by_age.loc[life_expectancy - clients_age]\n total_income_by_age.fillna(0, inplace=True)\n income_dataframe = pd.DataFrame(total_income_by_age)\n income_dataframe.loc[:, 'remaining_income_by_acturial_age'] = total_income_by_age.apply(\n lambda x: total_income_by_acturial_age - x)\n\n s = income_dataframe.loc[:, 'remaining_income_by_acturial_age']\n base_prob_of_success = sim_base_total.gt(s, axis=0).sum(axis=1)\n port_prob_of_success = sim_port_total.gt(s, axis=0).sum(axis=1)\n\n # ----------------------------Portfolio sufficient for NEXT YEARS income needs-------------------\n next_year_income = sim_base_income.loc[:, sim_base_income.columns[0]].shift(-1).fillna(0) # Yearly Income Reqd.\n base_success_next_year = sim_base_total.gt(next_year_income, axis=0).sum(axis=1)\n\n base_for_next_year_need = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n\n port_success_next_year = sim_port_total.gt(next_year_income, axis=0).sum(axis=1)\n\n port_for_next_year_need = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ---------------Portfolio for 45 years of simulation---------------------------------------\n base_success_portfolio = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n port_success_portfolio = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ----------------Portfolio Simulation until the acturial age------------------------------\n acturial_years = life_expectancy - clients_age\n base_success_portfolio_act_age = base_success_portfolio.loc[acturial_years, :]\n port_success_portfolio_act_age = port_success_portfolio.loc[acturial_years, :]\n\n # -------------------------Base Portfolio TS with max Terminal Value ----------------------------\n if base_success_portfolio_act_age.isnull().sum() == trials:\n base_max_portfolio = 0.0\n else:\n base_max_portfolio = base_success_portfolio.loc[:, base_success_portfolio_act_age.idxmax()]\n\n # -------------------------FIA Portfolio TS with max Terminal Value ----------------------------\n if port_success_portfolio_act_age.isnull().sum() == trials:\n port_max_portfolio = 0.0\n else:\n port_max_portfolio = port_success_portfolio.loc[:, port_success_portfolio_act_age.idxmax()]\n\n # ------------------------------Average age with full income------------------------------\n base_mean_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n port_mean_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n # ----------------------------Median Age with full Income------------------------------------------\n base_median_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n port_median_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n # --------------Mean Value for all the portfolios at end of the acturial age--------------------\n base_act_avg_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n port_act_avg_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n\n # --------------Median Value for all the portfolios at end of the acturial age--------------------\n base_act_median_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n port_act_median_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n\n # # --------------Mean Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n #\n # # --------------Median Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n\n # -------Max Portfolio value at the end of acturial age----------------------------------------\n base_act_max = base_success_portfolio.loc[life_expectancy - clients_age, :].max()\n port_act_max = port_success_portfolio.loc[life_expectancy - clients_age, :].max()\n\n # -------Min Portfolio value at the end of acturial age----------------------------------------\n base_act_min = base_success_portfolio.loc[life_expectancy - clients_age, :].min()\n port_act_min = port_success_portfolio.loc[life_expectancy - clients_age, :].min()\n\n # ---------------------Lifetime Average Income----------------------------------\n base_total_income = sim_base_income.cumsum().loc[acturial_years, :].mean()\n port_total_income = income_from_fia + sim_port_income\n port_total_income = port_total_income.cumsum().loc[acturial_years, :].mean()\n\n simulation_stats = pd.DataFrame(index=['Average Years', 'Median Years', 'Average Age', 'Median Age',\n 'Average Portfolio (act.age)', 'Median Portfolio (act.age)',\n 'Max Portfolio Value', 'Min Portfolio Value',\n 'Average Lifetime Income'], columns=['Base', 'FIA'])\n\n simulation_stats.loc['Average Years', :] = [base_mean_age, base_mean_age]\n simulation_stats.loc['Median Years', :] = [base_median_age, base_median_age]\n simulation_stats.loc['Average Age', :] = [base_mean_age + clients_age, base_mean_age + clients_age]\n simulation_stats.loc['Median Age', :] = [base_median_age + clients_age, base_median_age + clients_age]\n simulation_stats.loc['Average Portfolio (act.age)', :] = [base_act_avg_porfolio, port_act_avg_porfolio]\n simulation_stats.loc['Median Portfolio (act.age)', :] = [base_act_median_porfolio, port_act_median_porfolio]\n simulation_stats.loc['Max Portfolio Value', :] = [base_act_max, port_act_max]\n simulation_stats.loc['Min Portfolio Value', :] = [base_act_min, port_act_min]\n simulation_stats.loc['Average Lifetime Income', :] = [base_total_income, port_total_income]\n comments = ['Average years of portfolios that meet the next years income needs for the lifetime',\n 'Median years of portfolios that meet the next years income needs for the lifetime',\n 'Average Clients Age',\n 'Median Clients Age',\n 'Average of terminal values for the portfolios at the end of the acturial life',\n 'Median of terminal values for the portfolios at the end of the acturial life',\n 'Maximum of terminal values for the portfolios at the end of the acturial life',\n 'Minimum of terminal values for the portfolios at the end of the acturial life',\n 'Average of total income generated by all portfolios at the end of the acturial life']\n\n simulation_stats.loc[:, 'Notes'] = comments\n\n # --------------------------------------------------------------------------------\n\n # # -----------------------------------income breakdown for Base portfolio----------------------------------\n # base_df.to_csv(src + 'base_port_detail.csv')\n # sim_base_total.to_csv(src + 'base_ending_values.csv')\n # income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n # income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n # income_breakdown_base.loc[:, 'fia_income'] = 0.0\n # income_breakdown_base.loc[:, 'social_security_income'] = social\n # income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n #\n # income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_base.loc[:, 'income_from_portfolio'][\n # income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n # axis=1)\n #\n # # --------------------------------------Block Ends-----------------------------------------------------------\n #\n # # ---------------------------------------income breakdown for FIA portfolio----------------------------------\n # fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n # sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n #\n # income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n # income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n # income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n # income_breakdown_port.loc[:, 'social_security_income'] = social\n # income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n #\n # income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_port.loc[:, 'income_from_portfolio'][\n # income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n # axis=1)\n #\n # # ----------------------------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.1, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '10th', '25th', '50th', '75th', '90th', 'Max']\n\n # ------------------------------------------drop year 0-----------------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ---------------------------------plot for histogram for porfolios--------------------------------------\n # base_term_value = sim_base_total.loc[sim_base_total.index[:life_expectancy - clients_age], :]\n # fact = 1 / len(base_term_value)\n # base_ann_ret = (base_term_value.iloc[-1] / base_term_value.iloc[0]) ** fact - 1\n # counts, bins, bars = plt.hist(base_ann_ret)\n\n # ------------------------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n sim_base_total.clip(lower=0, inplace=True)\n\n # -------------------------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # ---------------------------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ----------------------------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n # base_legacy_risk = (sim_base_total.loc[sim_base_total.index[-1]] < 0).sum() / (trials)\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n\n # port_legacy_risk = (sim_port_total.loc[sim_port_total.index[-1]] <= 0).sum() / (trials)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / trials\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / trials\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n\n writer = pd.ExcelWriter(dest_simulation + method + '_leveled_growth_simulation.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n # read_portfolio_inputs.to_excel(writer, sheet_name='portfolio_inputs')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n # base_qcut.loc[:, 'clients_age'] = age_index\n # base_qcut.loc[:, 'comment'] = ''\n # base_qcut.loc[:, 'comment'] = np.where(base_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n base_inv = float(read_income_inputs.loc['risky_assets', 'Base'])\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n # -----------------------To start with year 0---------------------------------\n insert_col = [base_inv, base_inv, base_inv, base_inv, base_inv, base_inv,\n base_inv, clients_age, np.nan]\n base_qcut.loc[len(base_qcut) + 1, :] = 0.0\n base_qcut = base_qcut.shift(1)\n base_qcut.iloc[0] = insert_col\n base_qcut.reset_index(drop=True, inplace=True)\n base_qcut.to_excel(writer, sheet_name='base_ending_value_quantiles')\n # base_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n # base_income_qcut = base_income_qcut[1:] base_income_qcut.loc[:, 'clients_age'] = age_index\n # base_income_qcut.loc[:, 'comment'] = '' base_income_qcut.loc[:, 'comment'] = np.where(\n # base_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_quantiles')\n\n # age_index = list(range(clients_age+1, clients_age + len(port_qcut)+1))\n # port_qcut.loc[:, 'clients_age'] = age_index\n # port_qcut.loc[:, 'comment'] = ''\n # port_qcut.loc[:, 'comment'] = np.where(port_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[len(port_qcut) + 1, :] = 0.0\n port_qcut = port_qcut.shift(1)\n port_qcut.iloc[0] = insert_col\n port_qcut.reset_index(drop=True, inplace=True)\n port_qcut.to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n # port_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n # port_income_qcut = port_income_qcut[1:] port_income_qcut.loc[:, 'clients_age'] = age_index\n # port_income_qcut.loc[:, 'comment'] = '' port_income_qcut.loc[:, 'comment'] = np.where(\n # port_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'clients_age'] = age_index\n # prob_success_df.loc[:, 'comment'] = ''\n # prob_success_df.loc[:, 'comment'] = np.where(prob_success_df.clients_age == life_expectancy, 'expected_life', \"\")\n\n prob_success_df.loc[:, 'age'] = age_index\n prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_base'] = base_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_port'] = port_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_base'] = base_success_next_year / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_port'] = port_success_next_year / trials\n prob_success_df.loc[:, 'base_max_portfolio_at_acturial_age'] = base_max_portfolio\n prob_success_df.loc[:, 'port_max_portfolio_at_acturial_age'] = port_max_portfolio\n\n # --------------------Percentile Portfolio's based on Acturial Life------------------------\n base_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_base']\n port_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_port']\n\n # acturial_age_base_tv = sim_base_total.loc[:life_expectancy - clients_age, ]\n # percentile_base_tv = sim_base_total.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_base = base_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = base_for_next_year_need.copy().fillna(0)\n percentile_base = base_for_next_year_need.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n base_pre_income_success = sim_base_total_preincome.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n base_ann_ret_pre_income = base_pre_income_success.pct_change().fillna(0)\n\n # acturial_age_port_tv = sim_port_total.loc[:life_expectancy - clients_age, ]\n # percentile_port_tv = sim_port_total.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_port = port_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = port_for_next_year_need.copy().fillna(0)\n percentile_port = port_for_next_year_need.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n port_pre_income_success = sim_port_total_preincome.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n port_ann_ret_pre_income = port_pre_income_success.pct_change().fillna(0)\n\n prob_success_df.loc[:, 'acturial_success_percentile_base_portfolio'] = percentile_base\n prob_success_df.loc[:, 'acturial_success_percentile_port_portfolio'] = percentile_port\n\n prob_success_df.loc[:, 'base_pre_income_ann_ret'] = base_ann_ret_pre_income\n prob_success_df.loc[:, 'port_pre_income_ann_ret'] = port_ann_ret_pre_income\n\n # prob_success_df.loc[:, 'terminalVal_success_percentile_base_portfolio'] = percentile_base_tv\n # prob_success_df.loc[:, 'terminalVal_success_percentile_port_portfolio'] = percentile_port_tv\n\n sim_base_total_preincome.to_excel(writer, sheet_name='base_preincome_portfolios')\n # -------Add premium to year 0 value to get total portfolio value---------\n sim_port_total_preincome.iloc[0] = sim_port_total_preincome.iloc[0] + premium\n sim_port_total_preincome.to_excel(writer, sheet_name='port_preincome_portfolios')\n\n # -------------For Simulation slide - BASE Portfolio - Can Delete --------------------\n # base_qcut_preinc = pd.DataFrame(index=sim_base_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # base_qcut_preinc.loc[:, cols[c]] = sim_base_total_preincome.quantile(q_cut[c], axis=1)\n #\n # # -------------For Simulation slide - Proposed Portfolio --------------------\n # port_qcut_preinc = pd.DataFrame(index=sim_port_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # port_qcut_preinc.loc[:, cols[c]] = sim_port_total_preincome.quantile(q_cut[c], axis=1)\n #\n # base_qcut_preinc.to_excel(writer, sheet_name='base_preincome_quantiles')\n # port_qcut_preinc.to_excel(writer, sheet_name='port_preincome_quantiles')\n\n prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n # --------------BASE - Accumulation and Income Breakdown based on the success percentile portfolio---------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(base_success, axis=1))\n income_breakdown_base.loc[:, 'income_from_risky_assets'] = sim_base_income.quantile(base_success, axis=1) \\\n - social - cpn_income_port\n income_breakdown_base.loc[:, 'guaranteed_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_risky_assets'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ----------FIA PORTFOLIO - Accumulation and Income Breakdown based on the success percentile portfolio-----------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(port_success, axis=1))\n income_breakdown_port.loc[:, 'income_from_risky_assets'] = sim_port_income.quantile(port_success, axis=1) \\\n - income_from_fia - social - cpn_income_port\n income_breakdown_port.loc[:, 'guaranteed_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_risky_assets'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # -------------------Write simulation Statistics-------------------------------------\n simulation_stats.to_excel(writer, sheet_name='simulation_statistics')\n\n # port_psuccess.to_excel(writer, sheet_name='fia_port_success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n legacy_risk.to_excel(writer, sheet_name='ruin_probability')\n\n # if method == 'normal':\n # median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n # median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n #\n # elif method == 'smallest':\n # median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n # median_returns_smallest.to_excel(writer, sheet_name='gr_port_median_asc')\n #\n # else:\n # median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n # median_returns_largest.to_excel(writer, sheet_name='gr_port_median_desc')\n\n # ---------------------Histogram for S&P Forecast---------------------------------------\n sp_returns = read_returns_est.loc['SPXT Index', 'Annualized Returns']\n sp_risk = read_returns_est.loc['SPXT Index', 'Annualized Risk']\n sp_random_ret = np.random.normal(loc=sp_returns, scale=sp_risk, size=10000)\n bins, data = np.histogram(sp_random_ret, bins=20)\n df_ret = pd.DataFrame(data, columns=['Return_range'])\n df_bins = pd.DataFrame(bins, columns=['Count'])\n df_hist = df_ret.join(df_bins)\n\n df_hist.to_excel(writer, sheet_name='sp500_histogram')\n writer.save()\n\n print(\"simulation completed....\")", "def returns_calculator(prices, lag):\r\n return (prices / prices.shift(lag) - 1)[::lag]", "def graph_balance(interval='5minute', span='all', bounds='regular', info=None, historical_transfers_df=historical_transfers_df):\n historical_portfolio = r.get_historical_portfolio(interval=interval, span=span, bounds=bounds, info=info) ###### could change info to close_equity\n # display(historical_portfolio)\n # input()\n \n historicalData = historical_portfolio['equity_historicals']\n \n dates = []\n closingPrices = []\n openPrices = []\n \n for data_point in historicalData:\n # print(data_point)\n dates.append(data_point['begins_at'])\n closingPrices.append(float(data_point['close_equity'])) # close_price\n openPrices.append(float(data_point['open_equity'])) # open_price\n \n balance_datetimes_list_cleaned = []\n for datetime in dates:\n datetime = datetime.split('T')[0]\n balance_datetimes_list_cleaned.append(datetime)\n # print(balance_datetimes_list_cleaned)\n \n portfolio_balance_dates = [dt.datetime.strptime(datetime,'%Y-%m-%d') for datetime in balance_datetimes_list_cleaned]\n import numpy as np\n # a1, a2 = df1.align(df2, join='outer', axis=1)\n # print(portfolio_balance_dates, closingPrices)\n # input()\n portfolio_balance_dates_df = pd.DataFrame(np.array(closingPrices), columns = list(['portfolio balance']), index=portfolio_balance_dates)\n\n display(portfolio_balance_dates_df.tail(n=60))\n display(historical_transfers_df.tail(n=60))\n print(len(portfolio_balance_dates_df))\n print(len(historical_transfers_df))\n # test = portfolio_balance_dates_df.merge(historical_transfers_df, left_index=True, right_index=True)\n \n\n\n # test = np.where(portfolio_balance_dates_df.index == historical_transfers_df.index, print('shit'), print('fart'))\n # display(test)\n\n # test = test['portfolio balance'] - test['historical transfers']\n\n # plt.plot(test)\n # # plt.plot(x, openPrices)\n # plt.ylabel('Price')\n # plt.xlabel('Date')\n # plt.show()\n\n # input(dates)\n # change the dates into a format that matplotlib can recognize.\n portfolio_balance_dates = [dt.datetime.strptime(d,'%Y-%m-%dT%H:%M:%SZ') for d in dates]\n \n # plot the data.\n # plt.plot(x, closingPrices, 'ro')\n # plt.plot(x, openPrices, 'bo')\n # plt.title(\"Option price for {} over time\".format(symbol_name))\n # plt.xlabel(\"Dates\")\n # plt.ylabel(\"Price\")\n # plt.show()\n\n portfolio_balances_df = pd.DataFrame(np.array(closingPrices), columns = list(['portfolio balances']))\n portfolio_balance_dates_df = portfolio_balance_dates_df.merge(historical_transfers_df,\n how='outer',\n left_index=True,\n right_index=True,\n ).fillna(0)\n\n # for row in portfolio_balance_dates_df[::-1].iterrows():\n # row['running balance'] = row['portfolio balance'] - row['historical transfers']\n\n # for i in range(portfolio_balance_dates_df):\n # for row in portfolio_balance_dates_df:\n # row['running transfers'] = row['historical transfers'] + row['historical transfers'][:-1]\n\n i = len(portfolio_balance_dates_df)-1\n portfolio_balance_dates_df['running transfers'] = 0.00\n\n # while i >= 0:\n # portfolio_balance_dates_df['running transfers'][i] = portfolio_balance_dates_df['running transfers'][i-1] + portfolio_balance_dates_df['historical transfers'][i]\n # i = i - 1\n\n for i in range(len(portfolio_balance_dates_df)):\n portfolio_balance_dates_df['running transfers'][i] = portfolio_balance_dates_df['running transfers'][i-1] + portfolio_balance_dates_df['historical transfers'][i]\n\n\n i = len(portfolio_balance_dates_df)\n\n while i >= 0:\n portfolio_balance_dates_df['running balance'] = portfolio_balance_dates_df['portfolio balance'] - portfolio_balance_dates_df['running transfers']\n i = i - 1\n\n\n # portfolio_balance_dates_df['running balance'] = portfolio_balance_dates_df['portfolio balance'][-1] - portfolio_balance_dates_df['historical transfers']\n # portfolio_balance_dates_df.loc[portfolio_balance_dates_df['portfolio balance'] > 0, 'running balance'] = portfolio_balance_dates_df['portfolio balance'] - portfolio_balance_dates_df['historical transfers']\n\n\n #portfolio_balance_dates_df = portfolio_balance_dates_df['portfolio balance'] - portfolio_balance_dates_df['historical transfers']\n print('portfolio_balance_dates_df')\n display(portfolio_balance_dates_df.tail(n=60))\n portfolio_balance_dates_df.loc[portfolio_balance_dates_df['running balance'] < 0, 'running balance'] = 0\n\n plt.plot(portfolio_balance_dates_df['running balance'])\n # plt.plot(x, openPrices)\n plt.ylabel('Price')\n plt.xlabel('Date')\n plt.show()\n\n # plt.plot(portfolio_balance_dates, closingPrices)\n plt.plot(portfolio_balance_dates_df)\n # plt.plot(x, openPrices)\n plt.ylabel('Price')\n plt.xlabel('Date')\n plt.show()\n #input()\n \n # for price in closingPrices:\n # print(portfolio_balance_dates)\n # print(historical_transfer_dates)\n # input\n # if portfolio_balance_dates == historical_transfer_dates:\n # price = closingPrices - historical_transfers\n \n # import numpy as np\n # plt.plot(portfolio_balance_dates, closingPrices)\n # # plt.plot(x, openPrices)\n # plt.ylabel('Price')\n # plt.xlabel('Date')\n # plt.show()", "def get_prices(start, end):\n\n tickers = TICKERS # fetch tickers from config.py\n df_final = pd.DataFrame() # declared for merging purposes (inside loops)\n\n for ticker in tickers: # Loop over tickers to fetch individual price series\n\n r = requests.get(\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=\" + ticker\n + \"&outputsize=full&apikey=\" + ALPHAVANTAGE_KEY)\n r_dict = r.json()\n\n dates = np.array([]) # this loop makes the index into an index of datetime objects. Note the format.\n for i in r_dict['Time Series (Daily)'].keys():\n datetime_obj = datetime.datetime.strptime(i, '%Y-%m-%d')\n dates = np.append(dates, datetime_obj)\n\n prices = np.array([]) # This loop extracts all prices and put them into an array\n for i in r_dict['Time Series (Daily)']:\n x = r_dict['Time Series (Daily)'][i]['5. adjusted close']\n prices = np.append(prices, x)\n\n open_prices = np.array([]) # grab opening prices as well\n for i in r_dict['Time Series (Daily)']:\n x = r_dict['Time Series (Daily)'][i]['1. open']\n open_prices = np.append(open_prices, x)\n\n df = pd.DataFrame({ # This dataframe contains each individual stock\n 'Date': dates,\n str(ticker + '_' + 'adjclose'): prices,\n str(ticker + '_' + 'open'): open_prices\n })\n df = df.set_index('Date')\n\n df_final = pd.DataFrame(data=df_final,\n index=dates) # these few lines are for merging the individual dataframes\n df_final.index.name = 'Date'\n df_final = df.merge(df_final, left_index=True, right_index=True)\n\n for ticker in tickers: # convert to numeric values. Prices are just \"objects\"\n df_final[str(ticker + '_' + 'adjclose')] = pd.to_numeric(df_final[str(ticker + '_' + 'adjclose')])\n df_final[str(ticker + '_' + 'open')] = pd.to_numeric(df_final[str(ticker + '_' + 'open')])\n\n df_final = df_final.iloc[::-1]\n\n return df_final[start: end] # slice the dataframe at the end, only return the specified date-range.", "def rebalance(context, data):\n logger.debug('rebalancing on: %s', algo.get_datetime())\n\n context.trend_filter = False\n\n # new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio (before filtering) - equity: %s', equity)\n\n # print(new_portfolio)\n\n # new_portfolio = new_portfolio[new_portfolio['overall_rank'].notna() & new_portfolio['momentum'] > 40][:20]\n \n # new_portfolio = new_portfolio[(new_portfolio['momentum_decile'] > 8)][:20]\n\n new_portfolio = new_portfolio.nlargest(20, ['overall_rank', 'momentum']) #<- $600K PL in 10 years\n\n # new_portfolio = new_portfolio.nlargest(20, ['momentum', 'overall_rank']) #<- 1M PL in 10 years\n\n if logger.level is logging.DEBUG:\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio - (after filtering) equity: %s', equity)\n \n\n # print(len(new_portfolio.index))\n\n # volatility driven weights\n # new_portfolio['inverse_volatility'] = new_portfolio['volatility'].apply(lambda x: 1 / x)\n # inv_vola_sum = new_portfolio['inverse_volatility'].sum()\n # new_portfolio['target_weight'] = new_portfolio['inverse_volatility'].apply(lambda x: x / inv_vola_sum)\n\n # portfolio size driven weights\n # num_equities = len(new_portfolio.index)\n # new_portfolio['target_weight'] = 1 / num_equities\\\n\n # logger.info('len existing portfolio: %s', len(context.portfolio.positions))\n\n if logger.level is logging.DEBUG:\n for equity, values in context.portfolio.positions.items():\n logger.debug('context.portfolio.positions - equity: %s, amount: %s, cost_basis: %s, sold_on: %s, sold_at_price: %s', equity, values.amount, values.cost_basis, values.last_sale_date, values.last_sale_price)\n\n \n order_target(algo.sid('FIBBG000NTFYM5'), 0)\n logger.debug('selling all bonds')\n\n for equity in context.portfolio.positions:\n if equity is algo.sid('FIBBG000NTFYM5'): \n continue\n if equity not in set(new_portfolio.index.tolist()):\n # logger.info('selling %s', equity)\n order_target_percent(equity, 0)\n\n stock_weights = 1.0 / max(len(context.portfolio.positions), len(new_portfolio.index))\n\n logger.debug('len existing portfolio (afer ejection): %s', len(context.portfolio.positions))\n logger.debug('len new portfolio: %s', len(new_portfolio.index))\n logger.debug('stock_weights: %s', stock_weights)\n\n # print(context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5')))\n\n # spy = context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5'))\n\n # if (spy is not None) and (spy.amount > 0):\n # order_target_percent(algo.sid('FIBBG000NTFYM5'), 0)\n\n for equity, row in new_portfolio.iterrows():\n if row.trend_filter is True:\n # logger.info('buying %s', equity)\n context.trend_filter = True\n order_target_percent(equity, stock_weights)\n else:\n context.trend_filter = False\n \n logger.debug('cash: %s', context.portfolio.cash)\n logger.debug('portfolio_value: %s', context.portfolio.portfolio_value)\n logger.debug('num_positions: %s', len(context.portfolio.positions))\n logger.debug('positions: %s', context.portfolio.positions)", "def get_market_metrics(market_portfolio: pd.DataFrame, t_costs: float, index_id: str, index_name: str,\n test_data_start_date: datetime.date, test_data_end_date: datetime.date, market_logs=False) -> \\\n Tuple[pd.Series, pd.Series, pd.Series]:\n\n market_portfolio_metrics = pd.Series([]).rename('Market')\n market_portfolio_metrics.index.name = 'Metrics'\n\n excess_return_series = calc_excess_returns(\n market_portfolio.loc[:, 'daily_return'].groupby(level=['datadate']).mean()).rename('daily_excess_return')\n excess_return_series = excess_return_series.reset_index()\n excess_return_series.loc[:, 'datadate'] = excess_return_series['datadate'].dt.strftime(\n '%Y-%m-%d')\n excess_return_series.set_index('datadate', inplace=True)\n cumulative_excess_return = (excess_return_series.get('daily_excess_return') + 1).cumprod().rename(\n 'Cumulative Market Return')\n cumulative_excess_return.index.name = 'Time'\n # cumulative_return.plot(title='Cumulative Market Performance')\n # plt.legend(loc='best')\n # plt.show()\n\n # JOB: Calculate metrics\n # noinspection DuplicatedCode\n annualized_sharpe = calc_sharpe(market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),\n annualize=True)\n annualized_sharpe_atc = calc_sharpe(\n market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,\n annualize=True)\n\n annualized_sortino = calc_sortino(market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(),\n annualize=True)\n annualized_sortino_atc = calc_sortino(\n market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs,\n annualize=True)\n\n mean_daily_return = market_portfolio.groupby(level=['datadate'])['daily_return'].mean().mean()\n mean_daily_excess_return = calc_excess_returns(\n market_portfolio.groupby(level=['datadate'])['daily_return'].mean().rename('daily_return')).mean()\n\n market_portfolio_metrics.loc['Mean Daily Return'] = mean_daily_return\n market_portfolio_metrics.loc['Annualized Return'] = annualize_metric(mean_daily_return)\n market_portfolio_metrics.loc['Mean Daily Excess Return'] = mean_daily_excess_return\n market_portfolio_metrics.loc['Annualized Excess Return'] = annualize_metric(mean_daily_excess_return)\n market_portfolio_metrics.loc['Annualized Sharpe'] = annualized_sharpe\n market_portfolio_metrics.loc['Annualized Sortino'] = annualized_sortino\n\n # JOB: Add metrics incl. transaction costs of 5 bps per half-turn\n market_portfolio_metrics.loc['Mean Daily Return_atc'] = mean_daily_return - 4 * t_costs\n market_portfolio_metrics.loc['Annualized Return_atc'] = annualize_metric(mean_daily_return - 4 * t_costs)\n market_portfolio_metrics.loc['Mean Daily Excess Return_atc'] = mean_daily_excess_return - 4 * t_costs\n market_portfolio_metrics.loc['Annualized Excess Return_atc'] = annualize_metric(\n mean_daily_excess_return - 4 * t_costs)\n market_portfolio_metrics.loc['Annualized Sharpe_atc'] = annualized_sharpe_atc\n market_portfolio_metrics.loc['Annualized Sortino_atc'] = annualized_sortino_atc\n\n data_record = {\n 'ID': config.run_id,\n 'Experiment Run End': datetime.datetime.now().isoformat(),\n 'Parent Model Type': 'Market',\n 'Model Type': 'Market',\n 'Index ID': index_id,\n 'Index Name': index_name,\n 'Study Period ID': config.study_period_id,\n 'Study Period Length': None,\n 'Period Range': None,\n 'Study Period Start Date': None,\n 'Study Period End Date': None,\n 'Test Set Size': None,\n 'Days Test Set': None,\n 'Constituent Number': None,\n 'Average Cross Section Size': None,\n 'Test Set Start Date': test_data_start_date.isoformat(),\n 'Test Set End Date': test_data_end_date.isoformat(),\n 'Total Accuracy': None,\n\n 'Top-k Accuracy Scores': None,\n 'Top-k Mean Daily Return': market_portfolio_metrics['Mean Daily Return'],\n 'Top-k Mean Daily Excess Return': market_portfolio_metrics['Mean Daily Excess Return'],\n 'Top-k Annualized Excess Return': market_portfolio_metrics['Annualized Excess Return'],\n 'Top-k Annualized Return': market_portfolio_metrics['Annualized Return'],\n 'Top-k Annualized Sharpe': market_portfolio_metrics['Annualized Sharpe'],\n 'Top-k Annualized Sortino': market_portfolio_metrics['Annualized Sortino'],\n 'Mean Daily Return (Short)': None,\n 'Mean Daily Return (Long)': None,\n\n 'Top-k Mean Daily Return_atc': market_portfolio_metrics['Mean Daily Return_atc'],\n 'Top-k Annualized Return_atc': market_portfolio_metrics['Annualized Return_atc'],\n 'Top-k Mean Daily Excess Return_atc': market_portfolio_metrics['Mean Daily Excess Return_atc'],\n 'Top-k Annualized Excess Return_atc': market_portfolio_metrics['Annualized Excess Return_atc'],\n 'Top-k Annualized Sharpe_atc': market_portfolio_metrics['Annualized Sharpe_atc'],\n 'Top-k Annualized Sortino_atc': market_portfolio_metrics['Annualized Sortino_atc'],\n 'Top-k Mean Daily Return (Short)_atc': None,\n 'Top-k Mean Daily Return (Long)_atc': None,\n\n 'Model Configs': None,\n 'Total Epochs': None,\n\n 'Return Series': excess_return_series['daily_excess_return'].to_dict(),\n 'Prediction Error': None\n }\n\n if market_logs:\n write_to_logs(data_record)\n\n return market_portfolio_metrics, excess_return_series, cumulative_excess_return", "def new_get_historical_price(base, target, date):\n return {base: {target: 10}}", "def analize_this(interval, start):\n results = []\n numCoins = len(CoinData.portfolio)\n for sma in range(6, 20):\n start = 2 * sma\n stop = round(3.5 * sma) + 1\n step = (stop - start) // 8\n for bma in range(start, stop, step):\n lma = round(bma * 2.75)\n profit = 0.0\n CoinData.setParams(sma, bma, lma, interval)\n for coin in CoinData.portfolio.values():\n trend, data = coin.trendFollower(start=start)\n profit += trend['Profit'].iloc[-1]\n\n results.append((sma, bma, lma, profit / numCoins))\n return pd.DataFrame(results, columns=('sma', 'bma', 'lma', 'portfolio profit'))", "def buy_and_sell_stock_once(prices):\n min_price_so_far, max_profit = float('inf') , 0.0\n\n for price in prices:\n min_price_so_far = min(min_price_so_far, price)\n max_profit_sell_today = price - min_price_so_far\n max_profit = max(max_profit, max_profit_sell_today)\n\n return max_profit", "def _initalize_portfolio_with_cash(self):\n self.cash = copy.copy(self.starting_cash)\n\n if self.starting_cash > 0.0:\n self.history.append(\n PortfolioEvent.create_subscription(\n self.current_dt, self.starting_cash, self.starting_cash\n )\n )\n\n self.logger.info(\n '(%s) Funds subscribed to portfolio \"%s\" '\n '- Credit: %0.2f, Balance: %0.2f' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id,\n round(self.starting_cash, 2),\n round(self.starting_cash, 2)\n )\n )", "def stock_min(stock):\n min_price=1000000\n for i in stock['Close']:\n if i < min_price:\n min_price=i\n return min_price", "def __init__(\n self,\n start_dt,\n starting_cash=0.0,\n currency = \"USD\",\n portfolio_id=None,\n name=None\n ):\n self.start_dt = start_dt\n self.current_dt = start_dt\n self.starting_cash = starting_cash\n self.currency = currency\n self.portfolio_id = portfolio_id\n self.name = name\n\n self.pos_handler = PositionHandler()\n self.history = []\n\n self.logger = logging.getLogger('Portfolio')\n self.logger.setLevel(logging.DEBUG)\n self.logger.info(\n '(%s) Portfolio \"%s\" instance initalized' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id\n )\n )\n\n self._initalize_portfolio_with_cash()", "def generate_portfolio_data(self):\n self.__load_portfolio_historical_prices()\n self.__populate_historical_trade_data()\n self.__calculate_portfolio_returns()\n self.__calculate_portfolio_performance()", "def calculatedResults(meanReturns, covMatrix, riskFreeRate=0, constraintSet=(0,1)):\n # Max Sharpe Ratio Portfolio\n maxSR_Portfolio = maxSR(meanReturns, covMatrix)\n maxSR_returns, maxSR_std = portfolioPerformance(maxSR_Portfolio['x'], meanReturns, covMatrix)\n \n maxSR_allocation = pd.DataFrame(maxSR_Portfolio['x'], index=meanReturns.index, columns=['allocation'])\n maxSR_allocation.allocation = [round(i*100,0) for i in maxSR_allocation.allocation]\n \n # Min Volatility Portfolio\n minVol_Portfolio = minimizeVariance(meanReturns, covMatrix)\n minVol_returns, minVol_std = portfolioPerformance(minVol_Portfolio['x'], meanReturns, covMatrix)\n \n minVol_allocation = pd.DataFrame(minVol_Portfolio['x'], index=meanReturns.index, columns=['allocation'])\n minVol_allocation.allocation = [round(i*100,0) for i in minVol_allocation.allocation]\n\n # Efficient Frontier\n efficientList = []\n targetReturns = np.linspace(minVol_returns, maxSR_returns, 20)\n for target in targetReturns:\n efficientList.append(efficientOpt(meanReturns, covMatrix, target)['fun'])\n \n maxSR_returns, maxSR_std = round(maxSR_returns*100,2), round(maxSR_std*100,2)\n minVol_returns, minVol_std = round(minVol_returns*100,2), round(minVol_std*100,2)\n \n return maxSR_returns, maxSR_std, maxSR_allocation, minVol_returns, minVol_std, minVol_allocation, efficientList, targetReturns", "def solve_prices(self):\n return None", "def generate_returns(prices):\n r=prices.copy()\n \n d=r.shift(1)\n \n returns=(r-d)/d\n \n \n #TODO: Implement function\n\n return returns", "def get_returns(self, start_date=None, end_date=None, stocks=None):\n if stocks is None:\n stocks = self.stocks\n\n if start_date is None:\n start_date = self.dates[0]\n\n if end_date is None:\n end_date = self.dates[-1]\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n dates_to_check = self.dates[self.dates.index(start_date): self.dates.index(end_date) + 1]\n\n stock_money = []\n\n for date in dates_to_check:\n stock_money += [self.get_day_returns(stocks, date)]\n\n stock_money = pd.DataFrame({\"stock value\": stock_money}).set_index([self.dates])\n\n return_info = join_features(stock_money, self.cash)\n return_info['value'] = return_info['cash'] + return_info['stock value']\n\n return return_info", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def ramp_up(self) -> None:\n self.cash_balance: float = self.initial_cash_balance()\n for stock in self.stocks:\n initial_date_idx = 0\n self.cash_balance = stock.buy(initial_date_idx, self.cash_balance, self.buy_budget)", "def calculate_cumulative_returns(returns, starting_value=0, out=None):\n if len(returns) < 1:\n return returns.copy()\n\n nanmask = np.isnan(returns)\n if np.any(nanmask):\n returns = returns.copy()\n returns[nanmask] = 0\n\n allocated_output = out is None\n if allocated_output:\n out = np.empty_like(returns)\n\n np.add(returns, 1, out=out)\n out.cumprod(axis=0, out=out)\n\n if starting_value == 0:\n np.subtract(out, 1, out=out)\n else:\n np.multiply(out, starting_value, out=out)\n\n if allocated_output:\n if returns.ndim == 1 and isinstance(returns, pd.Series):\n out = pd.Series(out, index=returns.index)\n elif isinstance(returns, pd.DataFrame):\n out = pd.DataFrame(\n out, index=returns.index, columns=returns.columns,\n )\n\n return out", "def create_equity_curve_dataframe(self):\n # returns the cumulative product for percent change over every timestamp in the index\n curve = pd.DataFrame(self.all_holdings)\n curve.set_index('datetime', inplace=True)\n curve['returns'] = curve['total'].pct_change()\n #curve['equity_curve'] = (1.0+curve['returns']).cumprod()\n curve['equity_curve'] = curve['returns']\n curve['equity_curve'] += 1\n curve['equity_curve'] = curve['equity_curve'].cumprod()\n self.equity_curve = curve\n print(curve)", "def calculate_provision_start_end(trades, instrument, portfolio_swap,\n start_date, end_date, warehousing_type='Daily'):\n \n start_provision = GetProvision(instrument, portfolio_swap, start_date)\n LOGGER.debug(\"Start provision '%s': %s\", instrument.Name(), start_provision)\n \n end_provision = 0.0\n today = acm.Time.DateToday()\n\n if today == end_date and not hist_valuation():\n for trade in trades.AsList():\n funding_instrument = trade.Portfolio().AdditionalInfo().PS_FundingIns()\n if funding_instrument != portfolio_swap:\n continue # Trade doesn't belong to the processed portfolio swap.\n end_provision += calculate(trade)\n else:\n LOGGER.debug(\"Historical valuation. Using PSwap to retrieve provision: '%s'\", portfolio_swap.Name())\n end_provision = GetProvision(instrument, portfolio_swap, end_date)\n \n LOGGER.debug(\"End provision '%s': %s\", instrument.Name(), end_provision)\n \n provision = end_provision - start_provision\n return provision", "def get_price():\n return uniform(1.0, 350.0)", "def create_equity_curve_dataframe(self):\n # returns the cumulative product for percent change over every timestamp in the index\n curve = pd.DataFrame(self.all_holdings)\n curve.set_index('datetime', inplace=True)\n curve['returns'] = curve['total'].pct_change()\n #curve['equity_curve'] = (1.0+curve['returns']).cumprod()\n curve['equity_curve'] = curve['returns']\n curve['equity_curve'] += 1\n curve['equity_curve'] = curve['equity_curve'].cumprod()\n self.equity_curve = curve", "def stock():\n stock=stock_data('AAPL',start(2019,12,1))\n return stock", "def portfolio_simulations_using_target_returns(num_of_years=30, trials=100):\n print(\"Running portfolio_simulations_using_target_returns() method\")\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'median_returns_unsorted.csv', index_col=[0], parse_dates=True)\n cols = [read_normal.columns[c].split('_')[1] for c in np.arange(len(read_normal.columns))]\n read_normal.rename(columns=dict(zip(list(read_normal.columns), cols)), inplace=True)\n idx = list(read_normal.index)\n\n runs = 0\n while runs <= trials:\n # --------Shuffling the path of random returns from the median portfolio\n np.random.shuffle(idx)\n read_normal = read_normal.set_index([idx])\n read_normal = read_normal.sort_index()\n\n # assets_col_names = list(read_normal.columns)\n # tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = read_normal.copy()\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n # runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # --------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = base_assets\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n random_returns = read_normal.copy()\n\n base_df = random_returns.copy()\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # -------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n runs = runs + 1\n\n # ----% of trials ending value at expected life is less than 0\n inflation_factor = (1 + annual_inflation) ** (life_expectancy - clients_age - income_starts)\n required_income_horizon = income_needed * inflation_factor\n required_income_horizon_net_fia = required_income_horizon - income_from_fia\n\n # prob_success_base = (sim_base_total.iloc[life_expectancy - clients_age] < 0).sum() / trials\n # prob_success_fia_port = (sim_port_total.iloc[life_expectancy - clients_age] < 0).sum() / trials\n\n prob_failure_base = (sim_base_total.iloc[life_expectancy - clients_age] < required_income_horizon).sum() / trials\n prob_failure_fia_port = (sim_port_total.iloc[life_expectancy - clients_age] < required_income_horizon_net_fia) \\\n .sum() / trials\n\n # ----Calculate at total average lifetime income for base portfolio----\n base_inc = sim_base_income.copy()\n base_inc = base_inc.cumsum()\n avg_income_base = base_inc.iloc[life_expectancy - clients_age].mean()\n\n # ----Calculate at total average lifetime income for FIA portfolio----\n port_inc = sim_port_income.copy()\n port_inc = port_inc.cumsum()\n avg_income_port = port_inc.iloc[life_expectancy - clients_age].mean()\n avg_income_port = avg_income_port + (income_from_fia * (life_expectancy - clients_age))\n\n # ---------income breakdown for Base portfolio----------------------------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n income_breakdown_base.loc[:, 'fia_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_portfolio'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n\n # ---------income breakdown for FIA portfolio----------------------------------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_portfolio'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ----------------------------drop year 0--------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ---------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n # ---------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # -------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ---------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -------------------------\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n writer = pd.ExcelWriter(src + 'simulated_portfolios_summary.xlsx', engine='xlsxwriter')\n sim_base_total.to_excel(writer, sheet_name='base_ending_value')\n sim_port_total.to_excel(writer, sheet_name='fiaport_ending_value')\n base_qcut.to_excel(writer, sheet_name='base_quantile_ending')\n base_income_qcut.to_excel(writer, sheet_name='base_quantile_income')\n port_qcut.to_excel(writer, sheet_name='port_quantile_ending')\n port_income_qcut.to_excel(writer, sheet_name='port_quantile_income')\n\n sucess_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n sucess_df.rename(columns={sucess_df.columns[0]: 'Base', sucess_df.columns[1]: 'Fia_Port'}, inplace=True)\n\n base_mean = sim_base_total[sim_base_total <= 0].isnull().sum().mean()\n port_mean = sim_port_total[sim_port_total <= 0].isnull().sum().mean()\n\n base_median = sim_base_total[sim_base_total <= 0].isnull().sum().median()\n port_median = sim_port_total[sim_port_total <= 0].isnull().sum().median()\n\n stats_df = pd.DataFrame([[base_mean, port_mean, 'Average years portfolio ending value > 0, out of N trials'],\n [base_median, port_median, 'Ending Value >0, 50% of the time']],\n index=['Mean years', 'Median years'], columns=['Base', 'fia_port', 'Comment'])\n\n # ---Average of terminal values at the end of horizon from N Trials\n stats_df.loc['Average Portfolio', 'Base'] = sim_base_total.iloc[-1].mean() + clients_age\n stats_df.loc['Average Portfolio', 'fia_port'] = sim_port_total.iloc[-1].mean() + clients_age\n stats_df.loc['Average Portfolio', 'Comment'] = \"Average of terminal values at the end of analysis period\" \\\n \" from N Trials\"\n\n # ----Median of terminal values at the end of horizon from N Trials\n stats_df.loc['Median Portfolio', 'Base'] = sim_base_total.iloc[-1].median() + clients_age\n stats_df.loc['Median Portfolio', 'fia_port'] = sim_port_total.iloc[-1].median() + clients_age\n stats_df.loc['Median Portfolio', 'Comment'] = \"Median of terminal values at the end of analysis period \" \\\n \"from N Trials\"\n\n # ---Average of terminal values at the end of Actuarial life from N Trials Base Portfolio\n stats_df.loc['Average Portfolio (end of expected_life)', 'Base'] = sim_base_total.iloc[\n life_expectancy - clients_age].mean()\n\n # ----Median of terminal values at the end of horizon from N Trials Base Portfolio\n stats_df.loc['Median Portfolio (end of expected_life)', 'Base'] = sim_base_total.iloc[\n life_expectancy - clients_age].median()\n\n # ---Average of terminal values at the end of Actuarial life from N Trials - FIA portfolio\n stats_df.loc['Average Portfolio (end of expected_life)', 'fia_port'] = sim_port_total.iloc[\n life_expectancy - clients_age].mean()\n stats_df.loc['Average Portfolio (end of expected_life)', 'Comment'] = \"Average of terminal values at the end of \" \\\n \"Actuarial life from N Trials\"\n\n # ----Median of terminal values at the end of horizon from N Trials - FIA Portfolio\n stats_df.loc['Median Portfolio (end of expected_life)', 'fia_port'] = sim_port_total.iloc[\n life_expectancy - clients_age].median()\n stats_df.loc['Median Portfolio (end of expected_life)', 'Comment'] = \"Median of terminal values at the end of \" \\\n \"horizon from N Trials\"\n\n stats_df.loc['% Success(value>0 at the end of expected_life)', 'Base'] = 1 - prob_failure_base\n stats_df.loc['% Success(value>0 at the end of expected_life)', 'fia_port'] = 1 - prob_failure_fia_port\n stats_df.loc['% Success(value>0 at the end of expected_life)', 'Comment'] = \"% of N trials yearly ending value \" \\\n \"greater than 0\"\n\n # -----Mininum of N portfolios terminal value at the end of Actuarial Age\n stats_df.loc['Minimum Value', 'Base'] = sim_base_total.iloc[life_expectancy - clients_age].min()\n stats_df.loc['Minimum Value', 'fia_port'] = sim_port_total.iloc[life_expectancy - clients_age].min()\n stats_df.loc['Minimum Value', 'Comment'] = \"Mininum of N portfolios terminal value at the end of Actuarial Age\"\n\n # -----Maxinum of N portfolios terminal value at the end of Actuarial Age\n stats_df.loc['Maximum Value', 'Base'] = sim_base_total.iloc[life_expectancy - clients_age].max()\n stats_df.loc['Maximum Value', 'fia_port'] = sim_port_total.iloc[life_expectancy - clients_age].max()\n stats_df.loc['Maximum Value', 'Comment'] = \"Maxinum of N portfolios terminal value at the end of Actuarial Age\"\n\n # -----Average income from N portfolios at the ned of Actuarial Age\n stats_df.loc['Avg Income (lifetime)', 'Base'] = avg_income_base\n stats_df.loc['Avg Income (lifetime)', 'fia_port'] = avg_income_port\n stats_df.loc['Avg Income (lifetime)', 'Comment'] = \"Average income from N portfolios at the end of Actuarial Age\"\n\n sucess_df.to_excel(writer, sheet_name='yearly_success_rates')\n stats_df.to_excel(writer, sheet_name='Stats')\n\n writer.save()\n\n print(\"simulation completed.\")", "def solve(credit, items, price_list):\n price_less_credit = []\n \n for i in range(items):\n value = price_list[i]\n value_less_credit = credit - value\n if value_less_credit > 0:\n price_less_credit.append(value_less_credit)\n\n for i in price_less_credit:\n try:\n if i in price_list:\n one = i\n one_index = price_list.index(one)\n price_list[one_index] = None\n another = credit - one\n another_index = price_list.index(another)\n \n index = [one_index + 1, another_index + 1]\n\n return \"{} {}\".format(min(index), max(index))\n except ValueError:\n pass", "def index():\n user_stocks_list = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n user_stocks = []\n for stock in user_stocks_list:\n if stock['stock'] not in user_stocks:\n user_stocks.append(stock['stock'])\n\n stock_portfolio = []\n\n for possible_stock in user_stocks:\n bought_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='B')\n bought_shares = 0\n bought_shares = bought_shares_list[0][\"SUM(units)\"]\n sold_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='S')\n sold_shares = 0\n sold_shares = sold_shares_list[0][\"SUM(units)\"]\n if sold_shares == None:\n sold_shares = 0\n\n available_shares = 0\n if bought_shares != None and (bought_shares - sold_shares) > 0:\n available_shares = bought_shares - sold_shares\n current_price = int(lookup(possible_stock)[\"price\"])\n market_value = current_price * available_shares\n dict_stock = {}\n dict_stock['name_stock'] = possible_stock\n dict_stock['shares_quantity'] = available_shares\n dict_stock['current_price'] = current_price\n dict_stock['market_value'] = market_value\n stock_portfolio.append(dict_stock)\n else:\n pass\n\n available_money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = usd(available_money_list[0]['cash'])\n\n username_list = db.execute(\"SELECT username FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n username = username_list[0][\"username\"]\n\n sum_market_values = 0\n for collection in stock_portfolio:\n sum_market_values += int(collection['market_value'])\n\n total_value = usd(available_money_list[0]['cash'] + sum_market_values)\n\n return render_template(\"index.html\", stock_portfolio=stock_portfolio, user_stocks=user_stocks, money=available_money, name=username, total_value=total_value)", "def getTrades(current_portfolio, desired_portfolio):\n # Create trade factory between current and desired portfolio.\n # tf = TradeFactory.TradeFactory(current_portfolio, desired_portfolio)\n\n return {}", "def find_max_profit(stock_prices,k):\n\teliminated_indices = set()\n\ttotal_profit = 0\n\n\t\n\tfor i in range(0,k):\n\t\tmax_profit = float('-inf')\n\t\tmin_price = float('inf')\n\t\t\n\t\tfor current_index,current_price in enumerate(stock_prices):\n\t\t\t# This condition takes care of note by making sure that \n\t\t\t# prices are not used in previous transaction.\n\t\t\tif current_index not in eliminated_indices:\n\t\t\t\tcurrent_profit = current_price - min_price\n\n\t\t\t\tif (current_profit > max_profit):\n\t\t\t\t\tbuying_price_index = min_price_index\n\t\t\t\t\tselling_price_index = current_index\n\t\t\t\t\tmax_profit = current_profit\n\n\t\t\t\t#min_price = min(min_price, current_price)\n\t\t\t\tif (current_price < min_price):\n\t\t\t\t\tmin_price = current_price\n\t\t\t\t\tmin_price_index = current_index\n\n\n\t\t# This for loop is to take care of Note\n\t\tfor i in range(buying_price_index,selling_price_index+1):\n\t\t\teliminated_indices.add(i)\n\n\t\ttotal_profit += max_profit\n\t\tprint('buying_price_index :',buying_price_index)\n\t\tprint(\"selling_price_index :\",selling_price_index)\n\n\treturn total_profit", "def index():\n#Get the current data of the stock.\n\n #SUM all similar stock values from Portfolio.\n ports = db.execute(\"SELECT *, SUM(quantity) as sharetotal FROM portfolio WHERE id = :id GROUP BY symbol\", id=session[\"user_id\"])\n\n #Get the remaining cash of the user from the users table.\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float so it can be displayed to index.html\n remaining_cash = get_cash[0]['cash']\n\n #SUM the stocks' total value plus the remaining cash.\n get_grand_total = db.execute(\"SELECT *, SUM(total) as grand_total FROM portfolio where id = :id\", id=session[\"user_id\"])\n grand_total_fl = get_grand_total[0]['grand_total']\n\n\n\n #Hold value is the sum of the shares * price of each shares in the portfolios PLUS the remaining cash.\n if grand_total_fl != None:\n hold_value = grand_total_fl + remaining_cash\n #Update hte current hold value of the user\n db.execute(\"UPDATE users SET hold_value = :hold_value WHERE id = :id\", id=session[\"user_id\"], hold_value=hold_value)\n else:\n hold_value = remaining_cash\n\n\n #Query for the symbol in the database for the specific user.\n rows = db.execute(\"SELECT symbol, stock_price FROM portfolio WHERE id = :id GROUP by symbol\", id=session[\"user_id\"])\n\n #Initiate a list for all the open prices of stocks of a certain user.\n price_open = []\n num_stocks = []\n symbol_list = []\n avg_open_list = []\n profit_loss_list = []\n price_today_list = []\n\n\n for i in range(len(rows)):\n print(rows[i]['symbol'])\n symbol = rows[i]['symbol']\n open_price = rows[i]['stock_price']\n print(rows[i]['stock_price'])\n stock = lookup(rows[i]['symbol'])\n price_today = stock['price']\n\n #Insert data into the price_open list\n price_open.insert(i, open_price)\n\n #Count the number of stocks in posession\n share_total = ports[i]['sharetotal']\n\n #Insert data into the num_stocks list\n num_stocks.insert(i, share_total)\n\n #Insert data into the symbol_list list\n symbol_list.insert(i, symbol)\n\n #Insert data into the price_today_list\n price_today_list.insert(i, price_today)\n\n #Compute for the average open price of all stocks of a certain user.\n total_price = ports[i]['total']\n avg_open = total_price/share_total\n avg_open_list.insert(i, avg_open)\n\n profit_loss = ((price_today - avg_open)/avg_open)*100\n\n profit_loss_list.insert(i, (profit_loss))\n\n\n db.execute(\"UPDATE portfolio SET price_today = :price_today, profit_loss = :profit_loss, avg_open = :avg_open WHERE symbol = :symbol AND id = :id\", price_today=price_today, symbol=symbol,profit_loss=profit_loss, avg_open=avg_open, id=session[\"user_id\"])\n\n\n print(\"The symbols are:\", symbol_list)\n print(\"The quantity are: \", num_stocks)\n print(\"The open prices are: \", price_open)\n print(\"The average open prices are: \", avg_open_list)\n print(\"The prices today are: \", price_today_list)\n print(\"The profit and loss are: \", profit_loss_list)\n\n return render_template(\"index.html\", ports=ports, remaining_cash = remaining_cash, hold_value=hold_value,)", "def income_model_asset_based_portfolio_quantile(num_of_years=30, trials=100, method='normal'):\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n\n # read_income_inputs = pd.read_csv(src + \"income_model_inputs.csv\", index_col='Items')\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n # read_returns_est = pd.read_csv(src + \"income_assets_returns_estimates.csv\", index_col='Symbol')\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n # read_returns_est.drop(['BM', read_returns_est.index[-1]], axis=0, inplace=True)\n # read_portfolio_inputs = pd.read_csv(src + \"income_portfolio_inputs.csv\", index_col='Items')\n\n # read_asset_weights = pd.read_csv(src + \"asset_weights.csv\", index_col='Asset')\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'sort_normal.csv', index_col=[0], parse_dates=True)\n read_small = pd.read_csv(src + 'sort_small_to_large.csv', index_col=[0], parse_dates=True)\n read_large = pd.read_csv(src + 'sort_large_to_small.csv', index_col=[0], parse_dates=True)\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = pd.DataFrame({t: asset_median_returns(read_normal, t) for t in tickers})\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'r_FIA')})\n\n # dataframe for smallest to largest returns\n median_returns_smallest = pd.DataFrame({t: asset_median_returns(read_small, t) for t in tickers})\n median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'r_FIA')})\n\n # dataframe for unsorted returns (normal)\n median_returns_largest = pd.DataFrame({t: asset_median_returns(read_large, t) for t in tickers})\n median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'r_FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n while runs < trials:\n print(runs)\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # -------------------------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = ['r_{}'.format(name) for name in base_assets]\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n for c in range(len(r_cols)):\n ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_small.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.sort(ret.flatten())\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.sort(ret.flatten())})\n\n elif method == 'largest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_large.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.flip(np.sort(ret.flatten()))\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.flip(np.sort(ret.flatten()))})\n\n else:\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_normal.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = ret.flatten()\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): ret.flatten()})\n\n # store the simulated assets returns in one dictionary\n # returns_dict.update({str(runs): random_returns})\n\n # collect the asset based returns from all simulation and calculate the median returns.\n # def get_median_returns(sym):\n # cols = [sym + '_' + str(c) for c in np.arange(trials)]\n # asset_df = pd.DataFrame({c: asset_dict.get(c) for c in cols})\n # return asset_df.median(axis=1)\n #\n # asset_median_returns = pd.DataFrame({symbol: get_median_returns(symbol) for symbol in r_cols})\n #\n # asset_median_returns.loc[:, 'simulated_portfolio_median_returns'] = asset_median_returns.dot(base_weights)\n\n base_df = random_returns.copy()\n pre_income_base_df = random_returns.copy()\n\n # base_investment = float(read_portfolio_inputs.loc['risky_assets', 'Base'])\n\n fia_portfolio_df = random_returns.copy()\n pre_income_port_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # ---------Initial Investments for pre-income account values---------------------\n pre_income_base_inv = base_investment\n pre_income_port_inv = port_investment\n # ----------------------------------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n pre_income_base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n # ---------------For year 0, the year of investment------------\n\n # ------------Calculate the annual portfolio returns - Gross Returns--------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_base_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_base_inv for c in range(len(boy_value))]\n pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_inv = pre_income_base_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_pre_income'] = base_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n # ----For years between the start of the investment and start if the income---------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total_net_fees']\n\n else:\n\n # -------------For Years after the income started----------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n # ------------------------Portfolio with PreIncome Values----------------------------\n sim_base_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_base_df.loc[:, 'total_net_fees']\n sim_base_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'Base']), inplace=True)\n # --------------------------------PreIncome Block Ends----------------------------\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n sim_base_total_pre_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_pre_income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n pre_income_port_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_port_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_port_inv for c in range(len(boy_value))]\n pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_inv = pre_income_port_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_pre_income'] = port_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # ------------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n sim_port_total_pre_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_pre_income']\n\n # -------------------Portfolio with PreIncome Values----------------------------\n sim_port_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_port_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'FIA']), inplace=True)\n # --------------------------------PreIncome Block Ends----------------------------\n\n runs += 1\n\n # ------------------Calculate % of portfolios ending value greater than required LIFETIME cumm. income---------\n total_income_by_age = sim_base_income.loc[:, sim_base_income.columns[0]].cumsum()\n total_income_by_acturial_age = total_income_by_age.loc[life_expectancy - clients_age]\n total_income_by_age.fillna(0, inplace=True)\n income_dataframe = pd.DataFrame(total_income_by_age)\n income_dataframe.loc[:, 'remaining_income_by_acturial_age'] = total_income_by_age.apply(\n lambda x: total_income_by_acturial_age - x)\n\n s = income_dataframe.loc[:, 'remaining_income_by_acturial_age']\n base_prob_of_success = sim_base_total.gt(s, axis=0).sum(axis=1)\n port_prob_of_success = sim_port_total.gt(s, axis=0).sum(axis=1)\n\n # ----------------------------Portfolio sufficient for NEXT YEARS income needs-------------------\n next_year_income = sim_base_income.loc[:, sim_base_income.columns[0]].shift(-1).fillna(0) # Yearly Income Reqd.\n base_success_next_year = sim_base_total.gt(next_year_income, axis=0).sum(axis=1)\n\n base_for_next_year_need = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n\n port_success_next_year = sim_port_total.gt(next_year_income, axis=0).sum(axis=1)\n\n port_for_next_year_need = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ---------------Portfolio for 45 years of simulation---------------------------------------\n base_success_portfolio = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n port_success_portfolio = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ----------------Portfolio Simulation until the acturial age------------------------------\n acturial_years = life_expectancy - clients_age\n base_success_portfolio_act_age = base_success_portfolio.loc[acturial_years, :]\n port_success_portfolio_act_age = port_success_portfolio.loc[acturial_years, :]\n\n # -------------------------Base Portfolio TS with max Terminal Value ----------------------------\n if base_success_portfolio_act_age.isnull().sum() == trials:\n base_max_portfolio = 0.0\n else:\n base_max_portfolio = base_success_portfolio.loc[:, base_success_portfolio_act_age.idxmax()]\n\n # -------------------------FIA Portfolio TS with max Terminal Value ----------------------------\n if port_success_portfolio_act_age.isnull().sum() == trials:\n port_max_portfolio = 0.0\n else:\n port_max_portfolio = port_success_portfolio.loc[:, port_success_portfolio_act_age.idxmax()]\n\n # ------------------------------Average age with full income------------------------------\n base_mean_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n port_mean_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n # ----------------------------Median Age with full Income------------------------------------------\n base_median_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n port_median_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n # --------------Mean Value for all the portfolios at end of the acturial age--------------------\n base_act_avg_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n port_act_avg_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n\n # ---------------Median Value for all the portfolios at end of the acturial age--------------------\n base_act_median_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n port_act_median_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n\n # # --------------Mean Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n #\n # # --------------Median Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n\n # -------Max Portfolio value at the end of acturial age----------------------------------------\n base_act_max = base_success_portfolio.loc[life_expectancy - clients_age, :].max()\n port_act_max = port_success_portfolio.loc[life_expectancy - clients_age, :].max()\n\n # ----------------------Min Portfolio value at the end of acturial age----------------------------------------\n base_act_min = base_success_portfolio.loc[life_expectancy - clients_age, :].min()\n port_act_min = port_success_portfolio.loc[life_expectancy - clients_age, :].min()\n\n # -----------------------------------------Lifetime Average Income-----------------------------------------\n base_total_income = sim_base_income.cumsum().loc[acturial_years, :].mean()\n port_total_income = income_from_fia + sim_port_income\n port_total_income = port_total_income.cumsum().loc[acturial_years, :].mean()\n\n simulation_stats = pd.DataFrame(index=['Average Years', 'Median Years', 'Average Age', 'Median Age',\n 'Average Portfolio (act.age)', 'Median Portfolio (act.age)',\n 'Max Portfolio Value', 'Min Portfolio Value',\n 'Average Lifetime Income'], columns=['Base', 'FIA'])\n\n simulation_stats.loc['Average Years', :] = [base_mean_age, base_mean_age]\n simulation_stats.loc['Median Years', :] = [base_median_age, base_median_age]\n simulation_stats.loc['Average Age', :] = [base_mean_age + clients_age, base_mean_age + clients_age]\n simulation_stats.loc['Median Age', :] = [base_median_age + clients_age, base_median_age + clients_age]\n simulation_stats.loc['Average Portfolio (act.age)', :] = [base_act_avg_porfolio, port_act_avg_porfolio]\n simulation_stats.loc['Median Portfolio (act.age)', :] = [base_act_median_porfolio, port_act_median_porfolio]\n simulation_stats.loc['Max Portfolio Value', :] = [base_act_max, port_act_max]\n simulation_stats.loc['Min Portfolio Value', :] = [base_act_min, port_act_min]\n simulation_stats.loc['Average Lifetime Income', :] = [base_total_income, port_total_income]\n comments = ['Average years of portfolios that meet the next years income needs for the lifetime',\n 'Median years of portfolios that meet the next years income needs for the lifetime',\n 'Average Clients Age',\n 'Median Clients Age',\n 'Average of terminal values for the portfolios at the end of the acturial life',\n 'Median of terminal values for the portfolios at the end of the acturial life',\n 'Maximum of terminal values for the portfolios at the end of the acturial life',\n 'Minimum of terminal values for the portfolios at the end of the acturial life',\n 'Average of total income generated by all portfolios at the end of the acturial life']\n\n simulation_stats.loc[:, 'Notes'] = comments\n\n # --------------------------------------------------------------------------------\n\n # # -----------------------------------income breakdown for Base portfolio----------------------------------\n # base_df.to_csv(src + 'base_port_detail.csv')\n # sim_base_total.to_csv(src + 'base_ending_values.csv')\n # income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n # income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n # income_breakdown_base.loc[:, 'fia_income'] = 0.0\n # income_breakdown_base.loc[:, 'social_security_income'] = social\n # income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n #\n # income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_base.loc[:, 'income_from_portfolio'][\n # income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n # axis=1)\n #\n # # --------------------------------------Block Ends-----------------------------------------------------------\n #\n # # ---------------------------------------income breakdown for FIA portfolio----------------------------------\n # fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n # sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n #\n # income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n # income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n # income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n # income_breakdown_port.loc[:, 'social_security_income'] = social\n # income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n #\n # income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_port.loc[:, 'income_from_portfolio'][\n # income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n # axis=1)\n #\n # # ----------------------------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.1, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '10th', '25th', '50th', '75th', '90th', 'Max']\n\n # ------------------------------------------drop year 0-----------------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ---------------------------------plot for histogram for porfolios--------------------------------------\n # base_term_value = sim_base_total.loc[sim_base_total.index[:life_expectancy - clients_age], :]\n # fact = 1 / len(base_term_value)\n # base_ann_ret = (base_term_value.iloc[-1] / base_term_value.iloc[0]) ** fact - 1\n # counts, bins, bars = plt.hist(base_ann_ret)\n\n # ------------------------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n sim_base_total.clip(lower=0, inplace=True)\n\n # -------------------------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # ---------------------------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ----------------------------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n # base_legacy_risk = (sim_base_total.loc[sim_base_total.index[-1]] < 0).sum() / (trials)\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n\n # port_legacy_risk = (sim_port_total.loc[sim_port_total.index[-1]] <= 0).sum() / (trials)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / trials\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / trials\n\n # ------------------------------------WRITING FILES TO EXCEL ---------------------------\n\n writer = pd.ExcelWriter(dest_simulation + method + '_montecarlo_income_summary.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n # read_portfolio_inputs.to_excel(writer, sheet_name='portfolio_inputs')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n # base_qcut.loc[:, 'clients_age'] = age_index\n # base_qcut.loc[:, 'comment'] = ''\n # base_qcut.loc[:, 'comment'] = np.where(base_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n base_inv = float(read_income_inputs.loc['risky_assets', 'Base'])\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n # -----------------------To start with year 0---------------------------------\n insert_col = [base_inv, base_inv, base_inv, base_inv, base_inv, base_inv,\n base_inv, clients_age, np.nan]\n base_qcut.loc[len(base_qcut) + 1, :] = 0.0\n base_qcut = base_qcut.shift(1)\n base_qcut.iloc[0] = insert_col\n base_qcut.reset_index(drop=True, inplace=True)\n base_qcut.to_excel(writer, sheet_name='base_ending_value_quantiles')\n # base_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n # base_income_qcut = base_income_qcut[1:] base_income_qcut.loc[:, 'clients_age'] = age_index\n # base_income_qcut.loc[:, 'comment'] = '' base_income_qcut.loc[:, 'comment'] = np.where(\n # base_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_quantiles')\n\n # age_index = list(range(clients_age+1, clients_age + len(port_qcut)+1))\n # port_qcut.loc[:, 'clients_age'] = age_index\n # port_qcut.loc[:, 'comment'] = ''\n # port_qcut.loc[:, 'comment'] = np.where(port_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[len(port_qcut) + 1, :] = 0.0\n port_qcut = port_qcut.shift(1)\n port_qcut.iloc[0] = insert_col\n port_qcut.reset_index(drop=True, inplace=True)\n port_qcut.to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n # port_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n # port_income_qcut = port_income_qcut[1:] port_income_qcut.loc[:, 'clients_age'] = age_index\n # port_income_qcut.loc[:, 'comment'] = '' port_income_qcut.loc[:, 'comment'] = np.where(\n # port_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'clients_age'] = age_index\n # prob_success_df.loc[:, 'comment'] = ''\n # prob_success_df.loc[:, 'comment'] = np.where(prob_success_df.clients_age == life_expectancy, 'expected_life', \"\")\n\n prob_success_df.loc[:, 'age'] = age_index\n prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_base'] = base_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_port'] = port_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_base'] = base_success_next_year / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_port'] = port_success_next_year / trials\n prob_success_df.loc[:, 'base_max_portfolio_at_acturial_age'] = base_max_portfolio\n prob_success_df.loc[:, 'port_max_portfolio_at_acturial_age'] = port_max_portfolio\n\n # --------------------Percentile Portfolio's based on Acturial Life------------------------\n base_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_base']\n port_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_port']\n\n # acturial_age_base_tv = sim_base_total.loc[:life_expectancy - clients_age, ]\n # percentile_base_tv = sim_base_total.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_base = base_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = base_for_next_year_need.copy().fillna(0)\n percentile_base = base_for_next_year_need.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n base_pre_income_success = sim_base_total_preincome.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n base_ann_ret_pre_income = base_pre_income_success.pct_change().fillna(0)\n\n # acturial_age_port_tv = sim_port_total.loc[:life_expectancy - clients_age, ]\n # percentile_port_tv = sim_port_total.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_port = port_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = port_for_next_year_need.copy().fillna(0)\n percentile_port = port_for_next_year_need.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n port_pre_income_success = sim_port_total_preincome.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n port_ann_ret_pre_income = port_pre_income_success.pct_change().fillna(0)\n\n prob_success_df.loc[:, 'acturial_success_percentile_base_portfolio'] = percentile_base\n prob_success_df.loc[:, 'acturial_success_percentile_port_portfolio'] = percentile_port\n\n prob_success_df.loc[:, 'base_pre_income_ann_ret'] = base_ann_ret_pre_income\n prob_success_df.loc[:, 'port_pre_income_ann_ret'] = port_ann_ret_pre_income\n\n # prob_success_df.loc[:, 'terminalVal_success_percentile_base_portfolio'] = percentile_base_tv\n # prob_success_df.loc[:, 'terminalVal_success_percentile_port_portfolio'] = percentile_port_tv\n\n sim_base_total_preincome.to_excel(writer, sheet_name='base_preincome_portfolios')\n # -------------Adding Premium to calculate the total initial investment--------------\n sim_port_total_preincome.iloc[0] = sim_port_total_preincome.iloc[0] + premium\n sim_port_total_preincome.to_excel(writer, sheet_name='port_preincome_portfolios')\n\n # -------------For Simulation slide - BASE Portfolio - Can Delete --------------------\n # base_qcut_preinc = pd.DataFrame(index=sim_base_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # base_qcut_preinc.loc[:, cols[c]] = sim_base_total_preincome.quantile(q_cut[c], axis=1)\n #\n # # -------------For Simulation slide - Proposed Portfolio --------------------\n # port_qcut_preinc = pd.DataFrame(index=sim_port_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # port_qcut_preinc.loc[:, cols[c]] = sim_port_total_preincome.quantile(q_cut[c], axis=1)\n #\n # base_qcut_preinc.to_excel(writer, sheet_name='base_preincome_quantiles')\n # port_qcut_preinc.to_excel(writer, sheet_name='port_preincome_quantiles')\n\n prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n # --------------BASE - Accumulation and Income Breakdown based on the success percentile portfolio---------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(base_success, axis=1))\n income_breakdown_base.loc[:, 'income_from_risky_assets'] = sim_base_income.quantile(base_success, axis=1) \\\n - social - cpn_income_port\n income_breakdown_base.loc[:, 'guaranteed_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_risky_assets'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ----------FIA PORTFOLIO - Accumulation and Income Breakdown based on the success percentile portfolio-----------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(port_success, axis=1))\n income_breakdown_port.loc[:, 'income_from_risky_assets'] = sim_port_income.quantile(port_success, axis=1) \\\n - income_from_fia - social - cpn_income_port\n income_breakdown_port.loc[:, 'guaranteed_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_risky_assets'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # -------------------Write simulation Statistics-------------------------------------\n simulation_stats.to_excel(writer, sheet_name='simulation_statistics')\n\n # port_psuccess.to_excel(writer, sheet_name='fia_port_success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n legacy_risk.to_excel(writer, sheet_name='ruin_probability')\n\n if method == 'normal':\n median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n elif method == 'smallest':\n median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n median_returns_smallest.to_excel(writer, sheet_name='gr_port_median_asc')\n\n else:\n median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n median_returns_largest.to_excel(writer, sheet_name='gr_port_median_desc')\n\n # ---------------------Histogram for S&P Forecast---------------------------------------\n sp_returns = read_returns_est.loc['SPXT Index', 'Annualized Returns']\n sp_risk = read_returns_est.loc['SPXT Index', 'Annualized Risk']\n sp_random_ret = np.random.normal(loc=sp_returns, scale=sp_risk, size=10000)\n bins, data = np.histogram(sp_random_ret, bins=20)\n df_ret = pd.DataFrame(data, columns=['Return_range'])\n df_bins = pd.DataFrame(bins, columns=['Count'])\n df_hist = df_ret.join(df_bins)\n df_hist.to_excel(writer, sheet_name='sp500_histogram')\n\n # ---------------------Histogram for FIA Portfolios TV>0 at the acturial age---------------------------------------\n tval_at_horizon = sim_port_total.loc[acturial_years, :]\n fact = 1 / acturial_years\n arr_returns = np.array((tval_at_horizon / 1000000) ** fact - 1)\n clean_ann_ret = arr_returns[~np.isnan(arr_returns)]\n p_bins, p_data = np.histogram(clean_ann_ret, bins=20)\n df_ret = pd.DataFrame(p_data, columns=['Return_range'])\n df_bins = pd.DataFrame(p_bins, columns=['Count'])\n df_hist = df_ret.join(df_bins)\n df_hist.to_excel(writer, sheet_name='fia_portfolio_histogram')\n\n tval_df = pd.DataFrame(sim_port_total.loc[acturial_years, :])\n tval_df.rename(columns={tval_df.columns[0]:'Terminal Values'}, inplace=True)\n tval_df.to_excel(writer, sheet_name='fia_ending_values_hist')\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed....\")", "def update_portfolio(self, portfolio: PortfolioController):\n now = portfolio.get_history(seconds_back=0)\n future = portfolio.get_history(seconds_back=-self.update_interval)\n\n for fund in portfolio.funds:\n best_currency = max(portfolio.currencies, key=lambda currency: future_value(fund, currency, now, future))\n if best_currency != fund.currency:\n portfolio.request_transfer(fund, best_currency)", "def __init__(self, returns: pd.Series, trades: pd.Series, lagged=True, transaction_cost=0, percent_invested_per_trade=1):\n\n if lagged:\n trades = trades.shift(1)\n trades.iloc[0] = False\n self.strategy_returns = ((returns * percent_invested_per_trade) * trades)\n self.trades = trades\n\n self.nr_trades = {'buy': [], 'sell': []}\n for i in range(1, len(trades)):\n if trades[i] != trades[i - 1]:\n self.strategy_returns.iloc[i] -= transaction_cost\n if trades[i]:\n self.nr_trades['buy'].append(self.trades.index[i])\n else:\n self.nr_trades['sell'].append(self.trades.index[i])\n if trades[-1]: # include last day sell to make benchmark possible\n self.nr_trades['sell'].append(self.trades.index[i])\n\n self.performance = (self.strategy_returns + 1).cumprod() - 1\n self.benchmark = (returns + 1).cumprod() - 1", "def portfolio_daily_roi(shares_allocation, capital, test_data):\n\n # computes the cumulative returns\n cumulative_daily_returns = cumulative_returns(\n shares_allocation,\n capital,\n test_data\n )\n\n # calculates daily return on investment\n daily_roi = cumulative_daily_returns.apply(\n lambda returns: ((returns - capital) / capital)*100\n )\n\n # returns the daily return on investment\n return daily_roi", "def income_model_asset_based_portfolio_custom(num_of_years=30, trials=100, method='normal', income=True):\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n\n # read_income_inputs = pd.read_csv(src + \"income_model_inputs.csv\", index_col='Items')\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n # read_returns_est = pd.read_csv(src + \"income_assets_returns_estimates.csv\", index_col='Symbol')\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n clean_names = list(read_returns_est.index)\n clean_names = [s.split(' ')[0] for s in clean_names]\n read_returns_est.loc[:, 'names'] = clean_names\n read_returns_est.set_index('names', drop=True, inplace=True)\n read_returns_est = read_returns_est[:-1]\n read_returns_est.rename(index={'SBMMTB3': 'Cash', read_returns_est.index[-1]: 'FIA'}, inplace=True)\n\n # ---------------Returns DataFrame based on the use input------------------------------------\n ann_ret = np.full((num_of_years + 1, len(read_returns_est)), read_returns_est.loc[:, 'Annualized Returns'])\n read_normal = pd.DataFrame(ann_ret, index=np.arange(num_of_years + 1), columns=read_returns_est.index)\n # read_normal.rename(columns={read_normal.columns[-1]: 'FIA'}, inplace=True)\n user_est_fia_return = float(read_income_inputs.loc['fia_forecast', 'inputs'])\n read_normal.loc[:, 'FIA'] = user_est_fia_return\n\n read_returns_est.loc['FIA', 'Annualized Returns'] = user_est_fia_return\n\n # read_returns_est.drop(['BM', read_returns_est.index[-1]], axis=0, inplace=True)\n # read_portfolio_inputs = pd.read_csv(src + \"income_portfolio_inputs.csv\", index_col='Items')\n\n # read_asset_weights = pd.read_csv(src + \"asset_weights.csv\", index_col='Asset')\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n # read_normal = pd.read_csv(src + 'sort_normal.csv', index_col=[0], parse_dates=True)\n # read_small = pd.read_csv(src + 'sort_small_to_large.csv', index_col=[0], parse_dates=True)\n # read_large = pd.read_csv(src + 'sort_large_to_small.csv', index_col=[0], parse_dates=True)\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = pd.DataFrame({t: asset_median_returns(read_normal, t) for t in tickers})\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n #\n # # dataframe for smallest to largest returns\n # median_returns_smallest = pd.DataFrame({t: asset_median_returns(read_small, t) for t in tickers})\n # median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n # median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'r_FIA')})\n #\n # # dataframe for unsorted returns (normal)\n # median_returns_largest = pd.DataFrame({t: asset_median_returns(read_large, t) for t in tickers})\n # median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n # median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'r_FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n while runs < trials:\n print(runs)\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # -------------------------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n if income:\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n else:\n req_annual_income = 0.0\n income_needed = 0.0\n income_net_fia_income = 0.0\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = ['r_{}'.format(name) for name in base_assets]\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n # for c in range(len(r_cols)):\n # ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n # this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_normal.loc[:, base_assets]\n\n # random_returns.loc[:, r_cols[c]] = ret.flatten()\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): ret.flatten()})\n\n # store the simulated assets returns in one dictionary\n # returns_dict.update({str(runs): random_returns})\n\n # collect the asset based returns from all simulation and calculate the median returns.\n # def get_median_returns(sym):\n # cols = [sym + '_' + str(c) for c in np.arange(trials)]\n # asset_df = pd.DataFrame({c: asset_dict.get(c) for c in cols})\n # return asset_df.median(axis=1)\n #\n # asset_median_returns = pd.DataFrame({symbol: get_median_returns(symbol) for symbol in r_cols})\n #\n # asset_median_returns.loc[:, 'simulated_portfolio_median_returns'] = asset_median_returns.dot(base_weights)\n\n base_df = random_returns.copy()\n\n # base_investment = float(read_portfolio_inputs.loc['risky_assets', 'Base'])\n\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # ----------------------------------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n sim_base_total_pre_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_pre_income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n if income:\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n else:\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] + \\\n income_from_fia\n\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n sim_port_total_pre_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_pre_income']\n\n runs += 1\n\n # ------------------Calculate % of portfolios ending value greater than required LIFETIME cumm. income---------\n total_income_by_age = sim_base_income.loc[:, sim_base_income.columns[0]].cumsum()\n total_income_by_acturial_age = total_income_by_age.loc[life_expectancy - clients_age]\n total_income_by_age.fillna(0, inplace=True)\n income_dataframe = pd.DataFrame(total_income_by_age)\n income_dataframe.loc[:, 'remaining_income_by_acturial_age'] = total_income_by_age.apply(\n lambda x: total_income_by_acturial_age - x)\n\n s = income_dataframe.loc[:, 'remaining_income_by_acturial_age']\n base_prob_of_success = sim_base_total.gt(s, axis=0).sum(axis=1)\n port_prob_of_success = sim_port_total.gt(s, axis=0).sum(axis=1)\n\n # ----------------------------Portfolio sufficient for NEXT YEARS income needs-------------------\n next_year_income = sim_base_income.loc[:, sim_base_income.columns[0]].shift(-1).fillna(0) # Yearly Income Reqd.\n base_success_next_year = sim_base_total.gt(next_year_income, axis=0).sum(axis=1)\n\n base_for_next_year_need = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n\n port_success_next_year = sim_port_total.gt(next_year_income, axis=0).sum(axis=1)\n\n port_for_next_year_need = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ---------------Portfolio for 45 years of simulation---------------------------------------\n base_success_portfolio = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n port_success_portfolio = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ----------------Portfolio Simulation until the acturial age------------------------------\n acturial_years = life_expectancy - clients_age\n base_success_portfolio_act_age = base_success_portfolio.loc[acturial_years, :]\n port_success_portfolio_act_age = port_success_portfolio.loc[acturial_years, :]\n\n # -------------------------Base Portfolio TS with max Terminal Value ----------------------------\n if base_success_portfolio_act_age.isnull().sum() == trials:\n base_max_portfolio = 0.0\n else:\n base_max_portfolio = base_success_portfolio.loc[:, base_success_portfolio_act_age.idxmax()]\n\n # -------------------------FIA Portfolio TS with max Terminal Value ----------------------------\n if port_success_portfolio_act_age.isnull().sum() == trials:\n port_max_portfolio = 0.0\n else:\n port_max_portfolio = port_success_portfolio.loc[:, port_success_portfolio_act_age.idxmax()]\n # ------------------------------Average age with full income------------------------------\n base_mean_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n port_mean_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n # ----------------------------Median Age with full Income------------------------------------------\n base_median_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n port_median_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n # --------------Mean Value for all the portfolios at end of the acturial age--------------------\n base_act_avg_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n port_act_avg_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n\n # --------------Median Value for all the portfolios at end of the acturial age--------------------\n base_act_median_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n port_act_median_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n\n # # --------------Mean Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n #\n # # --------------Median Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n\n # -------Max Portfolio value at the end of acturial age----------------------------------------\n base_act_max = base_success_portfolio.loc[life_expectancy - clients_age, :].max()\n port_act_max = port_success_portfolio.loc[life_expectancy - clients_age, :].max()\n\n # -------Min Portfolio value at the end of acturial age----------------------------------------\n base_act_min = base_success_portfolio.loc[life_expectancy - clients_age, :].min()\n port_act_min = port_success_portfolio.loc[life_expectancy - clients_age, :].min()\n\n # ---------------------Lifetime Average Income----------------------------------\n base_total_income = sim_base_income.cumsum().loc[acturial_years, :].mean()\n port_total_income = income_from_fia + sim_port_income\n port_total_income = port_total_income.cumsum().loc[acturial_years, :].mean()\n\n simulation_stats = pd.DataFrame(index=['Average Years', 'Median Years', 'Average Age', 'Median Age',\n 'Average Portfolio (act.age)', 'Median Portfolio (act.age)',\n 'Max Portfolio Value', 'Min Portfolio Value',\n 'Average Lifetime Income'], columns=['Base', 'FIA'])\n\n simulation_stats.loc['Average Years', :] = [base_mean_age, base_mean_age]\n simulation_stats.loc['Median Years', :] = [base_median_age, base_median_age]\n simulation_stats.loc['Average Age', :] = [base_mean_age + clients_age, base_mean_age + clients_age]\n simulation_stats.loc['Median Age', :] = [base_median_age + clients_age, base_median_age + clients_age]\n simulation_stats.loc['Average Portfolio (act.age)', :] = [base_act_avg_porfolio, port_act_avg_porfolio]\n simulation_stats.loc['Median Portfolio (act.age)', :] = [base_act_median_porfolio, port_act_median_porfolio]\n simulation_stats.loc['Max Portfolio Value', :] = [base_act_max, port_act_max]\n simulation_stats.loc['Min Portfolio Value', :] = [base_act_min, port_act_min]\n simulation_stats.loc['Average Lifetime Income', :] = [base_total_income, port_total_income]\n comments = ['Average years of portfolios that meet the next years income needs for the lifetime',\n 'Median years of portfolios that meet the next years income needs for the lifetime',\n 'Average Clients Age',\n 'Median Clients Age',\n 'Average of terminal values for the portfolios at the end of the acturial life',\n 'Median of terminal values for the portfolios at the end of the acturial life',\n 'Maximum of terminal values for the portfolios at the end of the acturial life',\n 'Minimum of terminal values for the portfolios at the end of the acturial life',\n 'Average of total income generated by all portfolios at the end of the acturial life']\n\n simulation_stats.loc[:, 'Notes'] = comments\n\n # --------------------------------------------------------------------------------\n\n # # -----------------------------------income breakdown for Base portfolio----------------------------------\n # base_df.to_csv(src + 'base_port_detail.csv')\n # sim_base_total.to_csv(src + 'base_ending_values.csv')\n # income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n # income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n # income_breakdown_base.loc[:, 'fia_income'] = 0.0\n # income_breakdown_base.loc[:, 'social_security_income'] = social\n # income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n #\n # income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_base.loc[:, 'income_from_portfolio'][\n # income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n # axis=1)\n #\n # # --------------------------------------Block Ends-----------------------------------------------------------\n #\n # # ---------------------------------------income breakdown for FIA portfolio----------------------------------\n # fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n # sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n #\n # income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n # income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n # income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n # income_breakdown_port.loc[:, 'social_security_income'] = social\n # income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n #\n # income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_port.loc[:, 'income_from_portfolio'][\n # income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n # axis=1)\n #\n # # ----------------------------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ------------------------------------------drop year 0-----------------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ------------------------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n sim_base_total.clip(lower=0, inplace=True)\n\n # -------------------------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # ---------------------------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ----------------------------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n # base_legacy_risk = (sim_base_total.loc[sim_base_total.index[-1]] < 0).sum() / (trials)\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n\n # port_legacy_risk = (sim_port_total.loc[sim_port_total.index[-1]] <= 0).sum() / (trials)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / trials\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / trials\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n\n writer = pd.ExcelWriter(src + method + '_simulated_income_summary_custom.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n # read_portfolio_inputs.to_excel(writer, sheet_name='portfolio_inputs')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n # base_qcut.loc[:, 'clients_age'] = age_index\n # base_qcut.loc[:, 'comment'] = ''\n # base_qcut.loc[:, 'comment'] = np.where(base_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_inv = float(read_income_inputs.loc['risky_assets', 'Base'])\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n # --------To start with year 0---------------------------------\n insert_col = [base_inv, base_inv, base_inv, base_inv, base_inv, base_inv,\n base_inv, clients_age, np.nan]\n base_qcut.loc[len(base_qcut) + 1, :] = 0.0\n base_qcut = base_qcut.shift(1)\n base_qcut.iloc[0] = insert_col\n base_qcut.reset_index(drop=True, inplace=True)\n base_qcut.loc[:, 'Annual Return'] = base_qcut.loc[:, '50th'].pct_change().fillna(0)\n base_qcut.to_excel(writer, sheet_name='base_ending_value_quantiles')\n # base_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n # base_income_qcut = base_income_qcut[1:] base_income_qcut.loc[:, 'clients_age'] = age_index\n # base_income_qcut.loc[:, 'comment'] = '' base_income_qcut.loc[:, 'comment'] = np.where(\n # base_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n base_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_quantiles')\n\n # age_index = list(range(clients_age+1, clients_age + len(port_qcut)+1))\n # port_qcut.loc[:, 'clients_age'] = age_index\n # port_qcut.loc[:, 'comment'] = ''\n # port_qcut.loc[:, 'comment'] = np.where(port_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[len(port_qcut) + 1, :] = 0.0\n port_qcut = port_qcut.shift(1)\n port_qcut.iloc[0] = insert_col\n port_qcut.reset_index(drop=True, inplace=True)\n port_qcut.loc[:, 'Annual Return'] = port_qcut.loc[:, '50th'].pct_change().fillna(0)\n port_qcut.to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n # port_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n # port_income_qcut = port_income_qcut[1:] port_income_qcut.loc[:, 'clients_age'] = age_index\n # port_income_qcut.loc[:, 'comment'] = '' port_income_qcut.loc[:, 'comment'] = np.where(\n # port_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'clients_age'] = age_index\n # prob_success_df.loc[:, 'comment'] = ''\n # prob_success_df.loc[:, 'comment'] = np.where(prob_success_df.clients_age == life_expectancy, 'expected_life', \"\")\n\n prob_success_df.loc[:, 'age'] = age_index\n prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_base'] = base_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_port'] = port_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_base'] = base_success_next_year / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_port'] = port_success_next_year / trials\n prob_success_df.loc[:, 'base_max_portfolio_at_acturial_age'] = base_max_portfolio\n prob_success_df.loc[:, 'port_max_portfolio_at_acturial_age'] = port_max_portfolio\n\n # --------------------Percentile Portfolio's based on Acturial Life------------------------\n base_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_base']\n port_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_port']\n\n # acturial_age_base_tv = sim_base_total.loc[:life_expectancy - clients_age, ]\n # percentile_base_tv = sim_base_total.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_base = base_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = base_for_next_year_need.copy().fillna(0)\n percentile_base = base_for_next_year_need.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # acturial_age_port_tv = sim_port_total.loc[:life_expectancy - clients_age, ]\n # percentile_port_tv = sim_port_total.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_port = port_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = port_for_next_year_need.copy().fillna(0)\n percentile_port = port_for_next_year_need.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n prob_success_df.loc[:, 'acturial_success_percentile_base_portfolio'] = percentile_base\n prob_success_df.loc[:, 'acturial_success_percentile_port_portfolio'] = percentile_port\n\n # prob_success_df.loc[:, 'terminalVal_success_percentile_base_portfolio'] = percentile_base_tv\n # prob_success_df.loc[:, 'terminalVal_success_percentile_port_portfolio'] = percentile_port_tv\n\n prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n # --------------BASE - Accumulation and Income Breakdown based on the success percentile portfolio---------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(base_success, axis=1))\n income_breakdown_base.loc[:, 'income_from_risky_assets'] = sim_base_income.quantile(base_success, axis=1) \\\n - social - cpn_income_port\n income_breakdown_base.loc[:, 'guaranteed_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_risky_assets'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ----------FIA PORTFOLIO - Accumulation and Income Breakdown based on the success percentile portfolio-----------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(port_success, axis=1))\n income_breakdown_port.loc[:, 'income_from_risky_assets'] = sim_port_income.quantile(port_success, axis=1) \\\n - income_from_fia - social - cpn_income_port\n income_breakdown_port.loc[:, 'guaranteed_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_risky_assets'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # -------------------Write simulation Statistics-------------------------------------\n simulation_stats.to_excel(writer, sheet_name='simulation_statistics')\n\n # port_psuccess.to_excel(writer, sheet_name='fia_port_success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n legacy_risk.to_excel(writer, sheet_name='ruin_probability')\n\n median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed....\")", "def stock_price_summary(price_changes):\n ## ***********\n #gains = 0\n #losses = 0\n #for price in price_changes :\n # if price <= 0 :\n # losses = losses + price\n # else:\n # gains = gains + price\n #return (gains, losses)\n ## ***********\n ## this works but supposedly shouldn't\n ## this is not a Python version issue -- passes all tests in 2.7 as well\n\n #pos = 0.0\n #neg = 0.0\n #for price in price_changes:\n # if price > 0:\n # pos = pos + price\n # elif price < 0:\n # neg = neg + price\n #return pos, neg\n ## passes all \n\n\n\n #gains_sum, losses_sum = 0 , 0\n #for i in range(len(price_changes)):\n # if price_changes[i] >= 0:\n # gains_sum+=price_changes[i]\n # else:\n # losses_sum+=price_changes[i]\n #return (round(gains_sum,2), round(losses_sum,2))\n ## appears to work\n\n #tuple_1 = 0\n #tuple_2 = 0\n #for i in price_changes:\n # if i >= 0:\n # tuple_1=round((tuple_1 + i),2)\n # else:\n # tuple_2=round((tuple_2 + i),2)\n #return (tuple_1,tuple_2)\n ## appears to work", "def values(self, start: XValue[T], stop: XValue[T], step: XValueDiff[T]) -> \"SortedDict[XValue[T], float]\":\n\n step = step or (stop - start)\n if len(self.breakpoints) == 0:\n num_values = int(math.ceil((stop - start) / step))\n return SortedDict([(start + step * i, self._initial_value) for i in range(num_values)])\n\n curr_xval = start\n curr_value = self.call(start)\n next_index, next_breakpoint, next_value = self._breakpoint_info(self.breakpoints.bisect(start))\n\n sequence = SortedDict()\n while curr_xval < stop:\n sequence[curr_xval] = curr_value\n\n next_xval = min(stop, curr_xval + step)\n while next_breakpoint and next_xval >= next_breakpoint:\n assert next_index is not None # if next_breakpoint is set, next_index should also be set\n curr_value = next_value\n next_index, next_breakpoint, next_value = self._breakpoint_info(next_index + 1)\n curr_xval = next_xval\n\n return sequence", "def get_unresolved_future_prices():\n #TODO this is inefficient, hits the db A LOT\n latest_bitcoin_time = get_latest_bitcoin_time()\n\n potentially_unresolved = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time\n #TODO would like a __gt condition somehow\n )\n\n unresolved_future_prices = []\n for p in potentially_unresolved:\n has_no_returned_amounts_from_before_window = Returned_Amount.objects.filter(to_prediction__future_price=p, from_received_amount__time__lt=F('from_received_amount__prediction__future_price__time_window_closes')).count() == 0\n if has_no_returned_amounts_from_before_window:\n has_received_amounts_from_before_window = Received_Amount.objects.filter(prediction__future_price=p, time__lt=F('prediction__future_price__time_window_closes')).count() > 0\n if has_received_amounts_from_before_window:\n bitcoin_price_exists = Bitcoin_Price.objects.filter(time=p.time_to_match_price).count() == 1\n if bitcoin_price_exists:\n unresolved_future_prices.append(p)\n\n return unresolved_future_prices\n\n \"\"\"\n The following commented-out method:\n - assumes that there is always a bitcoin_price for every minute before the\n last bitcoin_price\n - assumes that every future_prediction before the last returned_amount has\n been evaluated\n ...I am not willing to make these assumptions\n \n latest_bitcoin_time = get_latest_bitcoin_time()\n\n try:\n latest_returned_amount = Returned_Amount.objects.order_by('-from_received_amount__prediction__future_price__time_to_match_price')[0]\n latest_returned_time = latest_returned_amount.from_received_amount.prediction.future_price.time_to_match_price\n except IndexError:\n latest_returned_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\n unresolved_future_prices = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time,\n time_to_match_price__gt=latest_returned_time\n )\n\n return unresolved_future_prices\n \"\"\"", "def _calc_return(self, order_original, perf_df):\r\n\r\n order = order_original.copy()\r\n no_sec = len(self.perf_data)\r\n price_names = np.array(['price_' + str(i) for i in xrange(1, no_sec + 1)])\r\n ret = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n transaction_cost = 0\r\n\r\n # buy_list vs sell_list contains order bought vs sold that cannot be matched yet to determine the return\r\n # For example when something has been bought, but nothing or not enough has been sold yet, the residue will be\r\n # listed in these lists.\r\n buy_shares = np.zeros((np.shape(order)[0], no_sec))\r\n buy_price = np.zeros((np.shape(order)[0], no_sec))\r\n sell_shares = np.zeros((np.shape(order)[0], no_sec))\r\n sell_price = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n # bl_first vs sl_first indicates which row in buy_list vs sell_list can be used to \"match\" bought/sold shares.\r\n # It automatically points to the oldest row with still outstanding shares. Initial value is -1\r\n # bl_last vs sl_last indicates which row in buy_list vs sell_list can be used to write outstanding shares to.\r\n bl_first = np.ones(no_sec).astype(int) * -1\r\n bl_last = np.zeros(no_sec).astype(int)\r\n sl_first = np.ones(no_sec).astype(int) * -1\r\n sl_last = np.zeros(no_sec).astype(int)\r\n\r\n for ind in range(0, np.shape(order)[0]):\r\n bl_first[(bl_first == -1) & (bl_last > 0)] = 0\r\n sl_first[(sl_first == -1) & (sl_last > 0)] = 0\r\n\r\n # Three situations, per type: buy, sell, nothing\r\n # If nothing, skip to next day\r\n # Only returns made on one day are determined, later they will be accumulated.\r\n\r\n # Situation A.A: Sell order & outstanding buys larger than sell order\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(buy_shares, 0)\r\n share_compl = (share_cumsum < -order[ind, :]) & col_to_change\r\n numb_shares = sum(buy_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += numb_shares * perf_df.loc[ind, price_names[col_to_change]] \\\r\n - sum(buy_shares * buy_price * share_compl, 0)[col_to_change]\r\n buy_shares[share_compl] = 0\r\n bl_first += sum(share_compl)\r\n order[col_to_change] += numb_shares\r\n\r\n ret[ind, col_to_change] += perf_df.loc[ind, price_names[col_to_change]] * -order[ind, col_to_change] * (1 - transaction_cost) \\\r\n - buy_price[bl_first[col_to_change], col_to_change] \\\r\n * -order[ind, col_to_change] * (1 + transaction_cost)\r\n buy_shares[bl_first[col_to_change], col_to_change] += order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation A.B: Sell order & outstanding buys smaller than or equal to sell order\r\n # --> just fill out all outstanding buys, and change order. This order will be added to sell list in A.C\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > 0) \\\r\n & (np.sum(buy_shares, 0) <= -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = buy_shares[:, col_to_change]\r\n price_shares = buy_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares, 0) * \\\r\n perf_df.loc[ind, price_names[col_to_change]].values * (1 - transaction_cost) \\\r\n - np.sum(numb_shares * price_shares, 0) * (1 + transaction_cost)\r\n order[ind, col_to_change] += np.sum(numb_shares, 0)\r\n buy_shares[:, col_to_change] = 0\r\n bl_first[col_to_change] = bl_last[col_to_change] - 1\r\n\r\n # Situation A.C: Sell order & no outstanding buys\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n sell_shares[row_to_change, col_to_change] = -order[ind, col_to_change]\r\n sell_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n sl_last[col_to_change] += 1\r\n\r\n # Situation B.A: Buy order & outstanding sells larger than buy order\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) > order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(sell_shares, 0)\r\n share_compl = (share_cumsum < order[ind, :]) & col_to_change\r\n numb_shares = sum(sell_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += sum(sell_shares * sell_price * share_compl, 0)[col_to_change] * (1 - transaction_cost)\\\r\n - numb_shares * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n sell_shares[share_compl] = 0\r\n sl_first += sum(share_compl)\r\n order[col_to_change] += -numb_shares\r\n\r\n ret[ind, col_to_change] += sell_price[sl_first[col_to_change], col_to_change] * order[ind, col_to_change] * (1 - transaction_cost)\\\r\n - perf_df.loc[ind, price_names[col_to_change]] * order[ind, col_to_change] * (1 + transaction_cost)\r\n sell_shares[sl_first[col_to_change], col_to_change] += -order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation B.B: Buy order & outstanding sells smaller than buy order\r\n # --> just fill out all outstanding sells, and change order. This order will be added to buy list in B.C\r\n col_to_change = (order[ind, :] > 0) & \\\r\n (np.sum(sell_shares, 0) > 0) & (np.sum(sell_shares, 0) <= order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = sell_shares[:, col_to_change]\r\n price_shares = sell_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares * price_shares, 0) * (1 - transaction_cost) \\\r\n - np.sum(numb_shares, 0) * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n order[ind, col_to_change] += -np.sum(numb_shares, 0)\r\n sell_shares[:, col_to_change] = 0\r\n sl_first[col_to_change] = sl_last[col_to_change] - 1\r\n\r\n # Situation B.C: Buy order & no outstanding sells\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n buy_shares[row_to_change, col_to_change] = order[ind, col_to_change]\r\n buy_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n bl_last[col_to_change] += 1\r\n\r\n ret_abs = np.array([sum(ret[:r]) for r in range(1, len(ret) + 1)])\r\n returns_abs = np.sum(ret_abs, 1)\r\n returns_rel = [i / self.context['max_notional'] + 1 for i in returns_abs]\r\n\r\n return returns_rel, returns_abs, ret_abs", "def index():\n # Establish userID.\n userID = session[\"user_id\"]\n # Isolate all results from portfolio table for the current user.\n portfolio = db.execute(\"SELECT * FROM portfolio WHERE id=:userID\", userID=session[\"user_id\"])\n # Cash for current user (first row, cash column)\n cash = db.execute(\"SELECT cash FROM users WHERE id=:userID\", userID=userID)[0][\"cash\"]\n # Empty list to store stock data as iterating through rows.\n stockData = []\n # Set total for combined stoc value to 0.\n totalAllStocks = 0\n\n # Iterate over rows from portfolio and allocate a row for each stock that has more than 0 owned.\n for row in portfolio:\n if row[\"numOwned\"] != 0:\n stockData.append(row)\n\n # Iterate over rows in stock data and provide value for each column. Other values for use in html are already in list from previous loop.\n # Had to play around with usd, once in usd is a str rather than float so usd always has to be post calculations.\n for row in stockData:\n stock = lookup(row[\"symbol\"])\n row[\"name\"] = stock[\"name\"]\n row[\"currentPrice\"] = usd(stock[\"price\"])\n row[\"total\"] = usd(row[\"numOwned\"] * stock[\"price\"])\n totalAllStocks += row[\"numOwned\"] * stock[\"price\"]\n # Grand Total is combined stock values and cash value.\n grandTotal = totalAllStocks + cash\n # Return index.html input sources.\n return render_template(\"index.html\", stockData=stockData, cash=usd(cash), totalAllStocks = usd(totalAllStocks), grandTotal=usd(grandTotal))", "def stocks(values, maxSales):\n return 0", "def calc_portfolio_risk(\n context,\n data,\n risk_func,\n hist_days=180,\n **kwargs):\n\n \n positions = context.portfolio.positions\n positions_index = pd.Index(positions)\n share_counts = pd.Series( \n index=positions_index, \n data=[positions[asset].amount for asset in positions] \n )\n\n current_prices = data.current(positions_index, 'price') \n current_weights = (\n share_counts * current_prices / context.portfolio.portfolio_value\n )\n \n prices = data.history(\n current_weights.index.tolist(),\n 'price',\n hist_days,\n '1d'\n )\n\n daily_rets = prices.pct_change()\n daily_rets = daily_rets - daily_rets.mean(skipna=True)\n daily_rets = daily_rets.fillna(0.0)\n\n risk = risk_func(current_weights.values, daily_rets, **kwargs)\n return risk", "def test_portfolio_balance(\n session, account_checking, account_savings, account_sp500, asset_krw, asset_sp500\n):\n portfolio = Portfolio()\n portfolio.base_asset = asset_krw\n portfolio.add_accounts(account_checking, account_savings, account_sp500)\n\n assert portfolio.balance(parse_date(\"2016-05-20\")) == {}\n\n deposit(account_checking, asset_krw, 1500, parse_date(\"2016-05-01\"))\n deposit(account_savings, asset_krw, 3000, parse_date(\"2016-05-01\"))\n deposit(account_sp500, asset_sp500, 120, parse_date(\"2016-05-01\"))\n\n assert portfolio.balance(parse_date(\"2016-05-20\")) == {\n asset_krw: 4500,\n asset_sp500: 120,\n }\n\n deposit(account_savings, asset_krw, 4000, parse_date(\"2016-05-02\"))\n deposit(account_savings, asset_krw, 5000, parse_date(\"2016-05-03\"))\n\n assert portfolio.balance(parse_date(\"2016-05-20\")) == {\n asset_krw: 13500,\n asset_sp500: 120,\n }\n\n balance_adjustment(account_savings, asset_krw, 10000, parse_date(\"2016-05-04\"))\n\n assert portfolio.balance(parse_date(\"2016-05-20\")) == {\n asset_krw: 11500,\n asset_sp500: 120,\n }\n\n session.delete(portfolio)\n session.commit()", "def update_consumption_values(transactions):\n # grab all the transactions in the period, plus \n # the one immediately before the first one if there is one\n \n # walk through them, determining consumption amounts \n # between them.\n # for each delta, compute its total effect on consumption\n # throwing away anomalous values like SOH increasing\n if transactions.count():\n to_process = list(transactions)\n start_t = to_process[0].previous_transaction()\n if start_t:\n to_process.insert(0, start_t)\n if len(to_process) > 1:\n for i in range(len(to_process) - 1):\n start, end = to_process[i:i+2]\n assert start.supply_point == end.supply_point\n assert start.product == end.product\n assert start.date <= end.date\n delta = end.ending_balance - start.ending_balance\n # assert delta == end.quantity\n\n total_timedelta = end.date - start.date\n for year, month in months_between(start.date, end.date):\n window_date = datetime(year, month, 1)\n next_window_date = first_of_next_month(window_date)\n start_date = max(window_date, start.date)\n end_date = min(next_window_date, end.date)\n\n # the number of seconds in this window - should be either the interval\n # between transactions - or if that interval spans the border of a month\n # then the interval corresponding to the portion in this month.\n secs_in_window = delta_secs(end_date-start_date)\n proportion_in_window = secs_in_window / (delta_secs(total_timedelta)) if secs_in_window else 0\n assert proportion_in_window <= 1\n c = get_or_create_singular_model(\n CalculatedConsumption,\n supply_point=start.supply_point,\n date=window_date,\n product=start.product\n )[0]\n if delta < 0:\n # update the consumption by adding the proportion in the window\n c.calculated_consumption += float(abs(delta)) * proportion_in_window\n \n # only count time with data if the balance went down or \n # stayed the same, or was a receipt.\n # otherwise it's anomalous data.\n if delta <= 0 or end.product_report.report_type.code == Reports.REC:\n c.time_with_data += secs_in_window\n \n if start.ending_balance == 0:\n c.time_stocked_out += secs_in_window\n \n c.save()", "def calc_returns(prices):\n returns = []\n for i in range(len(prices) - 1):\n ret = (prices[i + 1] - prices[i]) / prices[i]\n returns.append(ret)\n return returns", "def budget(df, df_hist, harmonize_year=\"2015\"):\n\n harmonize_year = int(harmonize_year)\n\n df = df.set_axis(df.columns.astype(int), axis=\"columns\")\n df_hist = df_hist.set_axis(df_hist.columns.astype(int), axis=\"columns\")\n\n data_years = df.columns\n hist_years = df_hist.columns\n\n years = data_years[data_years >= harmonize_year]\n\n if data_years[0] not in hist_years:\n hist_years = hist_years.insert(bisect(hist_years, data_years[0]), data_years[0])\n df_hist = df_hist.reindex(columns=hist_years).interpolate(\n method=\"slinear\", axis=1\n )\n\n def carbon_budget(years, emissions):\n # trapezoid rule\n dyears = np.diff(years)\n demissions = np.diff(emissions)\n\n budget = (dyears * (np.asarray(emissions)[:-1] + demissions / 2)).sum()\n return budget\n\n solver = pyo.SolverFactory(\"ipopt\")\n if solver.executable() is None:\n raise RuntimeError(\n \"No executable for the solver 'ipopt' found \"\n \"(necessary for the budget harmonization). \"\n \"Install from conda-forge or add to PATH.\"\n )\n\n harmonized = []\n\n for region in df.index:\n model = pyo.ConcreteModel()\n\n \"\"\"\n PARAMETERS\n \"\"\"\n data_vals = df.loc[region, years]\n hist_val = df_hist.loc[region, harmonize_year]\n\n budget_val = carbon_budget(data_years, df.loc[region, :])\n\n if data_years[0] < harmonize_year:\n hist_in_overlap = df_hist.loc[region, data_years[0] : harmonize_year]\n budget_val -= carbon_budget(hist_in_overlap.index, hist_in_overlap)\n\n \"\"\"\n VARIABLES\n \"\"\"\n model.x = pyo.Var(years, initialize=0, domain=pyo.Reals)\n x = np.array(\n [model.x[y] for y in years]\n ) # keeps pyomo VarData objects, ie. modelling vars not numbers\n\n \"\"\"\n OBJECTIVE FUNCTION\n \"\"\"\n delta_years = np.diff(years)\n delta_x = np.diff(x)\n delta_m = np.diff(data_vals)\n\n def l2_norm():\n return pyo.quicksum((delta_m / delta_years - delta_x / delta_years) ** 2)\n\n model.obj = pyo.Objective(expr=l2_norm(), sense=pyo.minimize)\n\n \"\"\"\n CONSTRAINTS\n \"\"\"\n model.hist_val = pyo.Constraint(expr=model.x[harmonize_year] == hist_val)\n\n model.budget = pyo.Constraint(expr=carbon_budget(years, x) == budget_val)\n\n \"\"\"\n RUN\n \"\"\"\n results = solver.solve(model)\n\n assert (results.solver.status == pyo.SolverStatus.ok) and (\n results.solver.termination_condition == pyo.TerminationCondition.optimal\n ), (\n f\"ipopt terminated budget optimization with status: \"\n f\"{results.solver.status}, {results.solver.termination_condition}\"\n )\n\n harmonized.append([pyo.value(model.x[y]) for y in years])\n\n df_harm = pd.DataFrame(\n harmonized,\n index=df.index,\n columns=years.astype(str),\n )\n\n return df_harm", "def get_data(pair, other):\n days_ago = 7\n endtime = int(time())\n starttime = endtime - 60 * 60 * 24 * days_ago\n\n geckourl = '%s/markets?vs_currency=%s&ids=%s' % (API, pair[\"currency\"],\n pair[\"coin\"])\n liveprice = requests.get(geckourl).json()[0]\n pricenow = float(liveprice['current_price'])\n alltimehigh = float(liveprice['ath'])\n other['volume'] = float(liveprice['total_volume'])\n\n url_hist = '%s/%s/market_chart/range?vs_currency=%s&from=%s&to=%s' % (\n API, pair[\"coin\"], pair[\"currency\"], str(starttime), str(endtime))\n\n try:\n timeseriesarray = requests.get(url_hist).json()['prices']\n except JSONDecodeError as err:\n print(f'Caught JSONDecodeError: {repr(err)}')\n return None\n timeseriesstack = []\n length = len(timeseriesarray)\n i = 0\n while i < length:\n timeseriesstack.append(float(timeseriesarray[i][1]))\n i += 1\n\n timeseriesstack.append(pricenow)\n if pricenow > alltimehigh:\n other['ATH'] = True\n else:\n other['ATH'] = False\n\n other[\"image\"] = pair[\"image\"]\n other[\"coin\"] = pair[\"coin\"]\n\n return timeseriesstack", "def maxProfit(self, prices):\n if not prices:\n return 0\n \n today = 0\n total_profit = 0 \n \n min_price = prices[0]\n \n while today < len(prices):\n if prices[today] < min_price:\n # keep the lowest price\n min_price = prices[today]\n \n tomorrow = today + 1\n if tomorrow >= len(prices): # is the last day?\n if min_price < prices[today]:\n total_profit += prices[today] - min_price\n break\n \n elif prices[tomorrow] < prices[today]: # price going down, we sell out\n if min_price < prices[today]:\n total_profit += (prices[today] - min_price)\n \n min_price = prices[tomorrow] # can not buy today, start from tomorrow\n today = tomorrow + 1\n else: \n today = tomorrow # keep the stock\n \n return total_profit", "def index():\n def getListOfCompanies(username, symbolOrPriceOrNumber):\n if symbolOrPriceOrNumber == \"symbol\" or symbolOrPriceOrNumber == \"price\" or symbolOrPriceOrNumber == \"number\":\n rows = db.execute(\"SELECT {0} FROM portfolio WHERE username=:username\".format(symbolOrPriceOrNumber), username=username)\n if symbolOrPriceOrNumber == \"symbol\" and len(rows) >= 1:\n namesList = []\n for row in rows:\n namesList.append(lookup(row[symbolOrPriceOrNumber])[\"name\"])\n return namesList\n elif symbolOrPriceOrNumber == \"price\" and len(rows) >= 1:\n pricseList = []\n for row in rows:\n pricseList.append(row[symbolOrPriceOrNumber])\n return pricseList\n elif symbolOrPriceOrNumber == \"number\" and len(rows) >= 1:\n numbersList = []\n for row in rows:\n numbersList.append(row[symbolOrPriceOrNumber])\n return numbersList\n else:\n return None\n else:\n return None\n\n def getTotalValueHolding(username):\n priceRow = db.execute(\"SELECT price FROM portfolio WHERE username=:username\", username=username)\n numberRow = db.execute(\"SELECT number FROM portfolio WHERE username=:username\", username=username)\n\n if len(priceRow) >= 1 and len(numberRow) >= 1 and len(priceRow) == len(numberRow):\n totalList = []\n for i in range(len(priceRow)):\n totalList.append(float(priceRow[i][\"price\"]) * float(numberRow[i][\"number\"]))\n\n return totalList\n\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n companiesNames = getListOfCompanies(username, \"symbol\")\n numberOfShares = getListOfCompanies(username, \"number\")\n prices = getListOfCompanies(username, \"price\")\n totalValueHolding = getTotalValueHolding(username)\n\n currentCashBalance = db.execute(\"SELECT cash FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"cash\"]\n total = 0\n if totalValueHolding:\n for totalValue in totalValueHolding:\n total = total + totalValue\n\n cashAndStocksTotalValue = float(currentCashBalance) + total\n\n return render_template(\"index.html\", username=username, companiesNames=companiesNames, numberOfShares=numberOfShares,\n prices=prices, totalValueHolding=totalValueHolding, currentCashBalance=currentCashBalance, cashAndStocksTotalValue=cashAndStocksTotalValue)", "def index():\n stocks = []\n username = session.get(\"username\")\n symbol_list = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n cash_balance = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n total_value = cash_balance\n\n for sym in symbol_list:\n symbol = sym[\"stock_symbol\"]\n new_stock = Stock(username, symbol)\n stocks.append(new_stock)\n total_value += new_stock.quantity * new_stock.price\n\n\n return render_template(\"portfolio.html\", stocks = stocks, cash_balance=usd(cash_balance), total_value=usd(total_value))", "def portfolio_return_calculator(positions, returns):\r\n shifted = array_shift(positions, 1)\r\n if len(positions.shape) == 1:\r\n return shifted.flatten() * returns\r\n else:\r\n return np.nansum(shifted * returns, axis=1)", "def calculate_uncross(values: np.ndarray) -> dict:\n # output = dict(price=np.nan, trade_vol=np.nan, cum_bids=np.nan, cum_asks=np.nan, total_bids=np.nan, total_asks=np.nan)\n output = {'price': np.nan, 'trade_vol': np.nan, 'cum_bids': np.nan, 'cum_asks': np.nan, 'total_bids': np.nan, 'total_asks': np.nan}\n\n mark_buy = values[0, 1]\n mark_sell = values[0, 2]\n prices = values[1:, 0]\n limit_orders = values[1:, -2:]\n\n if np.min(np.sum(limit_orders[:, -2:], axis=0)) == 0:\n return output\n\n else:\n cumul = np.zeros((limit_orders.shape[0], 2))\n cumul[:, 0] = np.flip(np.cumsum(np.flip(limit_orders[:, -2]))) + mark_buy\n cumul[:, 1] = np.cumsum(limit_orders[:, -1]) + mark_sell\n\n if np.max(cumul[:, 0] * cumul[:, 1]) == 0: # No overlap\n return output\n\n volumes = np.minimum(cumul[:, 0], cumul[:, 1])\n imbalances = np.abs(cumul[:, 0] - cumul[:, 1])\n\n maxvol_indices = np.flatnonzero(volumes == volumes.max())\n minimb_index = np.argmin(imbalances[maxvol_indices])\n optimum = maxvol_indices.min() + minimb_index\n\n price = prices[optimum]\n trade_vol = np.min(cumul[optimum, :])\n\n sum_bids, sum_asks = cumul[:, 0].max(), cumul[:, 1].max()\n\n if min(cumul[optimum, 0], cumul[optimum, 1]) == 0:\n output = output\n else:\n output = {'price': price, 'trade_vol': trade_vol, 'cum_bids': cumul[optimum, 0], 'cum_asks': cumul[optimum, 1], 'total_bids': sum_bids,\n 'total_asks': sum_asks}\n # output = dict(price=price, trade_vol=trade_vol, cum_bids=cumul[optimum, 0], cum_asks=cumul[optimum, 1],\n # total_bids=sum_bids, total_asks=sum_asks)\n return output", "def calc(self,newValue):\n idx=np.searchsorted(self.quantiles, newValue, side=\"left\")\n if idx>=self.n-1:\n return idx/self.n\n if np.abs(newValue - self.quantiles[idx-1]) < np.abs(newValue - self.quantiles[idx]):\n return (idx-1)/self.n\n else:\n return idx/self.n", "def prices(symbol):\n to = date.today().strftime(\"%Y%m%d\")\n c = db.cursor()\n c.execute(\"SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s\",\n (symbol))\n (_from, ) = c.fetchone()\n if _from == date.today():\n print \"Skipping %s\" % symbol\n return\n print \"Downloading %s\" % symbol\n if _from is None: \n _from = start_date\n else:\n _from = _from.strftime(\"%Y%m%d\")\n prices = stockquote.get_historical_prices(symbol, _from, to)\n headers = prices[0]\n try:\n close = get_idx(headers, 'Close')\n date_ = get_idx(headers, 'Date')\n open = get_idx(headers, 'Open')\n high = get_idx(headers, 'High')\n low = get_idx(headers, 'Low')\n quotes = prices[1:]\n for l in quotes:\n #print \"%s %s\" % (l[date_], l[close])\n try:\n insert(symbol, l[date_], l[close], l[high], l[low], l[open])\n except Exception, e:\n print \"Could not insert %s:%s\" % (symbol, e)\n print \"Inserted %s new quotes for %s\" % (len(quotes), symbol)\n except Exception, e:\n print \"Could not download %s\" % symbol\n print e", "def initial_approximation(pulls, discount, grid_n):\n\n values = np.zeros([pulls - 1, pulls - 1, grid_n]) # Store V(a=k, b=n-k, r) in values[k,n-1,:] as k varies\n gittins = np.zeros([pulls - 1, pulls - 1]) # Store Gittins(a=k, b=n-k) in gittins[k,n-1] as k varies\n\n a_grid = np.arange(1, pulls)\n r_grid = np.linspace(0, 1, grid_n)\n\n initial_gittins = a_grid / float(pulls) # Initial Gittins Approximation to start Backward Induction\n gittins[0:pulls, pulls - 2] = initial_gittins # Record initial Gittins approximation\n\n for idx_a, a in enumerate(a_grid):\n values[idx_a, pulls - 2, :] = (1.0 / (1 - discount)) * \\\n np.maximum(r_grid, a / float(pulls)) # Record initial Value approximation\n\n return gittins, values" ]
[ "0.7288363", "0.6751513", "0.649566", "0.639736", "0.6342396", "0.58377224", "0.5771629", "0.57363266", "0.5704653", "0.5654795", "0.56540567", "0.564203", "0.5640176", "0.5620506", "0.55888516", "0.55628633", "0.5536075", "0.553035", "0.55150396", "0.55067617", "0.5476467", "0.54720634", "0.5450717", "0.5437177", "0.5428002", "0.5424734", "0.5400011", "0.53931135", "0.5391972", "0.5371628", "0.53491414", "0.53311694", "0.53249246", "0.53017324", "0.52938277", "0.528905", "0.5283509", "0.52653193", "0.5265243", "0.52610314", "0.52254957", "0.518319", "0.5161502", "0.5153441", "0.51433736", "0.51426756", "0.5136479", "0.51326555", "0.5130588", "0.51212835", "0.51045203", "0.51020676", "0.5094619", "0.50939006", "0.5088556", "0.5087758", "0.5085904", "0.50621754", "0.505898", "0.50463", "0.50225013", "0.5015775", "0.5010955", "0.50085735", "0.5004403", "0.4999907", "0.49948066", "0.49797553", "0.49683753", "0.49664456", "0.49535987", "0.4951655", "0.49500933", "0.494445", "0.49323142", "0.49221393", "0.49088818", "0.4908116", "0.48993307", "0.48978063", "0.489133", "0.48793998", "0.487529", "0.48736408", "0.48709238", "0.48700514", "0.48667493", "0.4860441", "0.48590517", "0.4857742", "0.48476848", "0.48381075", "0.48319492", "0.4831494", "0.48294553", "0.48271114", "0.4826386", "0.48171672", "0.48093084", "0.48071092" ]
0.7756678
0
Calculate sharpe ratio for minimizer.
def get_sharpe_ratio(allocs, prices): port_val = get_portfolio_value(prices, allocs, start_val=1.0) sharpe_ratio = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252)[3] return -sharpe_ratio
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sharpe_ratio(self, r_f):\n return (\n self.cumulative_returns().last('1D').iat[0] - r_f\n ) / self.cumulative_returns().std()", "def sharpe_ratio(r1, r2, rf, o1, o2, cov):\n def sr(x):\n w1 = x[0]\n w2 = 1 - w1\n\n Rp = w1 * r1 + w2 * r2\n STDEVp = math.sqrt(portfolio_variance(o1, o2, cov)(x))\n R = (Rp - rf) / STDEVp\n return R\n return sr", "def sharpe_ratio(adr,sddr,sf=252,rfr=0.0):\n rfr=((1.0 + rfr) ** (1/sf)) - 1 # Daily risk free return. This is the shortcut to calculate daily (sf=252) risk free return\n return sf**(1.0/2)*(adr-rfr)/sddr", "def calculate_gear_ratio(front_gear, back_gear):\n return front_gear/back_gear", "def sharpe_ratio(port_returns, risk_free_rate, asset_returns, weights):\n\n # calculate the standard deviation of the returns of the portfolio\n portfolio_standard_deviation = np.sqrt(portfolio_volatility(asset_returns, weights))\n\n # calculate the Sharpe ratio of the portfolio\n sr = (np.mean(port_returns) - risk_free_rate)/portfolio_standard_deviation\n\n return sr", "def sharpe_ratio(port_returns, risk_free_rate, asset_returns, weights):\n\n # calculate the standard deviation of the returns of the portfolio\n portfolio_standard_deviation = np.sqrt(portfolio_volatility(asset_returns, weights))\n\n # calculate the Sharpe ratio of the portfolio\n sr = (port_returns[-1] - risk_free_rate)/portfolio_standard_deviation\n\n return sr", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def sharpe_ratio(factor_returns, annualization_factor):\r\n\r\n return annualization_factor * factor_returns.mean() / factor_returns.std()", "def bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/(1024.*bw)", "def smape(self) -> float:\n _temp = np.sum(2 * np.abs(self.predicted - self.true) / (np.abs(self.true) + np.abs(self.predicted)))\n return float(100 / len(self.true) * _temp)", "def starsize(self, hipid):\n #if hipid<0 or len(self.hip_stars)<=hipid: return 0\n s = self.hip_stars[hipid]\n if s==None: return 0\n #return self.zerosize*(.8**(s[1]))\n #return self.zerosize-s[1]-2\n return self.dimmest_mag-s[1]+1", "def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))", "def horizontal_ratio(self):\n if self.pupils_located:\n pupil_left = self.eye_left.pupil.x / (self.eye_left.center[0] * 2 - 10)\n pupil_right = self.eye_right.pupil.x / (self.eye_right.center[0] * 2 - 10)\n return (pupil_left + pupil_right) / 2", "def infected_ratio(self):\n if self.max_pop != 0:\n return int(self.infected_pop) / self.max_pop\n else:\n return 1", "def pe_ratio(self):\n if self._pe_ratio == None:\n return float('inf')\n return self._pe_ratio", "def _calculate_snr_spread(self):\n\n dmSpacing, percentage = 100, 0\n while percentage < 0.5: \n x = np.linspace(self.centerDm - dmSpacing, self.centerDm + dmSpacing, 500)\n y = np.array([self.effective_snr(self.effective_width(self.pulseWidth, self.centerDm - dm_val, self.bandwidth, self.freq), self.pulseWidth * 20) for dm_val in x])\n y = (y / (np.max(y) * 1.0)) if np.max(y) > 0 else y\n percentage = np.size(np.where(y > 0)) / 1000.0\n dmSpacing = dmSpacing*0.6\n \n return x, y", "def golden_ratio():\n print((1+math.sqrt(5))/2)", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def golden_ratio():\n return 1.61803398875", "def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0", "def get_scaling_ratio(img):\n\n healthy_img_area = 4872 * 6496\n input_img_area = img.shape[0] * img.shape[1]\n ratio = input_img_area / healthy_img_area\n return ratio", "def get_expected_compression_ratio_pct(self) -> int:\n return 100", "def calc_NPSH(P_suction, P_vapor, rho_liq):\n # Note: NPSH = (P_suction - P_vapor)/(rho_liq*gravity)\n # Taking into account units, NPSH will be equal to return value\n return 0.334438*(P_suction - P_vapor)/rho_liq", "def sharpe_ratio(returns, risk_free=0, period=DAILY):\n\n returns_risk_adj = returns - risk_free\n\n if (len(returns_risk_adj) < 5) or np.all(returns_risk_adj == 0):\n return np.nan\n\n return np.mean(returns_risk_adj) / \\\n np.std(returns_risk_adj) * \\\n np.sqrt(ANNUALIZATION_FACTORS[period])", "def ratio(original, compressed):\n olen = len(original)\n clen = len(compressed)\n return (olen - clen) / olen", "def perfect_ratios(g, goal):\n if goal == 'ORE':\n return 1\n\n return Fraction(sum(perfect_ratios(g, subgoal) * mul for subgoal, mul in\n g[goal][1].items()), g[goal][0])", "def strm_bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if StatsRouter.global_strm_mean == 0.0: return 0\r\n else: return (1.0*bw)/StatsRouter.global_strm_mean", "def sharpe(returns):\n return returns.mean() / returns.std()", "def sharpness_penalty(self):\n # This polynomial function gives the gain for peaking filter which achieves 18 dB / octave max derivative\n # The polynomial estimate is accurate in the vicinity of 18 dB / octave\n gain_limit = -0.09503189270199464 + 20.575128011847003 * (1 / self.q)\n # Scaled sigmoid function as penalty coefficient\n x = self.gain / gain_limit - 1\n sharpness_penalty_coefficient = 1 / (1 + np.e ** (-x * 100))\n return np.mean(np.square(self.fr * sharpness_penalty_coefficient))", "def sharpe_ratio(self,r, riskfree_rate, periods_per_year):\n # Convert the annualized riskfree rate to period \n \n rf_per_period = (1+riskfree_rate) ** (1/periods_per_year) - 1\n \n excess_ret = r - rf_per_period\n \n ann_ex_ret = self.annualize_rets(excess_ret, periods_per_year)\n \n ann_vol = self.annualize_vol(r, periods_per_year)\n\n return ann_ex_ret / ann_vol", "def ratio(self):\n return float(self.max_width) / self.max_height", "def calculate_sh(self):\n if self.data.get('Specific_Humidity') is None:\n if self.data.get('Mixing_Ratio') is None:\n raise KeyError('Calculate mixing ratio first!')\n else:\n w_kg = self.data['Mixing_Ratio'] / 1000\n self.data['Specific_Humidity'] = (w_kg / (w_kg + 1)) * 1000", "def prescaler(self) -> int:", "def get_score(p):\n temp = path[round(p[0], 1), round(p[1], 1)] / a_star\n return (clip(1 - temp, a_min=0, a_max=1) + clip(1 - temp, a_min=0, a_max=1) ** 2) / 2", "def shear_est(self, gal_image, psf_image, noise=None, F=False):\n # gal_ps = self.pow_spec(gal_image)\n gal_ps = gal_image\n # gal_ps = hk_tool_box.smooth(gal_ps,self.size)\n if noise is not None:\n nbg = self.pow_spec(noise)\n self.flux2 = numpy.sqrt(gal_ps[int(self.size/2), int(self.size/2)]/numpy.sum(self.rim*gal_ps)*numpy.sum(self.rim))\n # nbg = hk_tool_box.smooth(nbg,self.size)\n # rim = self.border(2, size)\n # n = numpy.sum(rim)\n # gal_pn = numpy.sum(gal_ps*rim)/n # the Possion noise of galaxy image\n # nbg_pn = numpy.sum(nbg*rim)/n # the Possion noise of background noise image\n gal_ps = gal_ps - nbg# + nbg_pn - gal_pn\n\n if F:\n psf_ps = psf_image\n else:\n psf_ps = self.pow_spec(psf_image)\n # self.get_radius_new(psf_ps, 2)\n wb, beta = self.wbeta(self.hlr)\n maxi = numpy.max(psf_ps)\n idx = psf_ps < maxi / 100000.\n wb[idx] = 0\n psf_ps[idx] = 1.\n tk = wb/psf_ps * gal_ps\n\n # ky, kx = self.ky, self.kx\n # #\n # kx2 = kx*kx\n # ky2 = ky*ky\n # kxy = kx*ky\n # k2 = kx2 + ky2\n # k4 = k2*k2\n # mn1 = (-0.5)*(kx2 - ky2) # (-0.5)*(kx**2 - ky**2)\n # mn2 = -kxy # -kx*ky\n # mn3 = k2 - 0.5*beta**2*k4 # kx**2 + ky**2 - 0.5*beta**2*(kx**2 + ky**2)**2\n # mn4 = k4 - 8*kx2*ky2 # kx**4 - 6*kx**2*ky**2 + ky**4\n # mn5 = kxy*(kx2 - ky2) # kx**3*ky - kx*ky**3\n\n # mn1 = self.mn1\n # mn2 = self.mn2\n mn3 = self.k2 - 0.5*beta**2*self.k4\n # mn4 = self.mn4\n # mn5 = self.mn5\n\n mg1 = numpy.sum(self.mn1 * tk)*self.alpha\n mg2 = numpy.sum(self.mn2 * tk)*self.alpha\n mn = numpy.sum(mn3 * tk)*self.alpha\n mu = numpy.sum(self.mn4 * tk)*(-0.5*beta**2)*self.alpha\n mv = numpy.sum(self.mn5 * tk)*(-2.*beta**2)*self.alpha\n\n return mg1, mg2, mn, mu, mv", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def _reduce(self) -> None:\n divisor = self._gcd(self._numerator, self._denominator)\n self._numerator = self._numerator // divisor\n self._denominator = self._denominator // divisor", "def test_mixing_ratio():\n p = 998. * units.mbar\n e = 73.75 * units.mbar\n assert_almost_equal(mixing_ratio(e, p), 0.04963, 2)", "def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])", "def propensity_to_move(self, animal):\n if isinstance(self, Mountain) or isinstance(self, Ocean):\n return 0\n else:\n return math.exp(animal.parameters['lambda'] *\n self.relative_abundance_fodder(animal))", "def get_sharpe_ratio(df, df_type=None, working_days=None):\n if df_type is None:\n df_type = const.RETURN_DATAFRAME\n\n if working_days is None:\n working_days = 252\n\n if df_type == const.RETURN_DATAFRAME:\n return df.mean() / df.std() * np.sqrt(working_days)\n\n elif df_type == const.WEALTH_DATAFRAME:\n return_df = (df - df.shift(1)) / df.shift(1)\n # return_df.loc[return_df.first_valid_index(), :] = 0.0\n return get_sharpe_ratio(return_df, df_type=const.RETURN_DATAFRAME, working_days=working_days)\n\n else:\n raise ValueError('Unknown dataframe type {}'.format(df_type))", "def calculate_score_pairs(hand_value,*args):\n # ratios=[1,10,100,1000,10000]\n ratios = CONST.RATIOS[:]\n return sum(map(lambda a,b:a/b, args, ratios))+hand_value", "def ratio_calc(first_strandI, second_strandI):\n if first_strandI + second_strandI != 0:\n Ratio = first_strandI / float(first_strandI + second_strandI)\n return Ratio\n else:\n return np.nan", "def approx_shoulders(upper_body_roi):\n height = upper_body_roi.shape[0]; width = upper_body_roi.shape[1]\n return (int(width / 6), int((height / 4) * 3)), (int((width / 6) * 5), int((height / 4) * 3))", "def s_multiplier(self):\n return 4 * np.pi * (self.bins[:, 1]/2)**2", "def MixRatio(e,p):\n\n return Epsilon*e/(p-e)", "def pss(self):\n return (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / \\\n ((self.table[0, 0] + self.table[1, 0]) * (self.table[0, 1] + self.table[1, 1]))", "def hero_healing_per_min(self):\n return self._hero_healing_per_min", "def smdape(self) -> float:\n return float(np.median(2.0 * self._ae() / ((np.abs(self.true) + np.abs(self.predicted)) + EPS)))", "def stretch_factor(self):\n p = self._pants_decomposition\n\n # pick a curve to iterate\n c = PantsLamination.random(p)\n # print(c)\n\n cc = (self**100) * c\n # print(self**100)\n # print(cc)\n return float(sum(abs(x) for x in (self*cc).to_vector())) / \\\n sum(abs(x) for x in cc.to_vector())", "def robbins(counts):\n return float(singles(counts))/counts.sum()", "def normalized_effect_size(self):\n mus = self.mu + self.absolute_effects\n pop_mu = (mus * self.test_splits).sum()\n sigma2_m = (self.test_splits * np.square(mus - pop_mu)).sum()\n f = np.sqrt(sigma2_m) / self.sigma\n return f", "def calculate_shot(self, player_ships: list):\n board = Board(self.__size)\n prob_board = Board(self.__size)\n\n for move in self.__moves:\n x, y = move[1], move[2]\n board.shoot(x, y)\n prob_board.board[x][y] = -1000\n\n if move[0] == ShotResult.HIT:\n if any(ship.sunk and [anything, x, y] in ship.pieces for ship in player_ships):\n # part of a sunken ship; no need to increase neighbours probability\n continue\n\n for (i, j) in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\n try: # easier to ask for forgiveness that permission :d\n if (ShotResult.HIT, x - i, y - j) in self.__moves: # opposite neighbour\n prob_board.board[x + i][y + j] += self.offset\n prob_board.board[x + i][y + j] += self.offset\n except IndexError:\n pass\n\n final_x, final_y = 0, 0\n max_prob = -1\n for s in player_ships:\n if not s.sunk: # the only time we use unsunken ships; we use just their size\n for i in range(self.__size):\n for j in range(self.__size):\n for o in range(0, 2): # for every (x, y, orientation) possible\n try:\n board.check(Ship(s.type, o, i, j))\n for offset in range(s.size):\n x, y = i - offset * o, j + offset * (not o)\n prob_board.board[x][y] += 1 # increase the probability of each piece\n except IllegalMove:\n pass\n\n for i in range(self.__size):\n for j in range(self.__size):\n if prob_board.board[i][j] > max_prob:\n final_x, final_y = i, j\n max_prob = prob_board.board[i][j]\n elif prob_board.board[i][j] == max_prob:\n if randint(0, 10) < 5: # random aspect to the ai, harder to predict\n final_x, final_y = i, j\n return final_x, final_y", "def sharpe(returns, freq=30, rfr=0):\n return (np.sqrt(freq) * np.mean(returns - rfr + eps)) / np.std(returns - rfr + eps)", "def _heuristic_weight(self, origin, destination):\r\n\r\n # A naive heuristics : straight line distance\r\n distance = abs(origin.position - destination.position)\r\n speed = ROAD_DEFAULT_MAX_SPEED\r\n \r\n return distance/speed", "def sag_ratio(V):\n\n Vmin = np.amin(V)\n Vend = V[-1]\n sr = (Vmin - Vend) / Vmin\n if sr < 0:\n print(\"Warning: sag ratio being negative indicates there is no sag\")\n return sr", "def rho(self, Ppump):\n\n etaP, EsatL, TR = self.etaP, self.EsatL, self.TR\n return(self.Psteady(Ppump) * etaP / (EsatL * TR * self.w0(Ppump)**2))", "def overall_reduction(self):\n return 84", "def sivina(self):\n return (self.r + self.g + self.b) / 3", "def calc_heuristic(self, state):\n h = 0\n board = state.board.array\n\n for i in range(self._n):\n for j in range(self._n):\n\n if board[i][j] != space_rep:\n tile_as_number = board[i][j]\n correct_x = (tile_as_number - 1) // self._n\n correct_y = (tile_as_number - 1) % self._n\n else:\n continue\n h += calc_diffs(i, j, correct_x, correct_y)\n return h", "def _calculate_score(lsh, minhash, total_num_events):\n neighbours = lsh.query(minhash)\n return float(len(neighbours)) / float(total_num_events)", "def quality(self) -> float:\n if self.get_cover_size() == 0:\n return 0\n else:\n if self.baseline == Baseline.COMPLEMENT:\n return self.__complement_quality()\n else:\n return self.__population_quality()", "def _comput_PSNR(self, input, target):\n shave = 4\n ch, h, w = input.size()\n input_Y = rgb2ycbcrT(input.cpu())\n target_Y = rgb2ycbcrT(target.cpu())\n diff = (input_Y - target_Y).view(1, h, w)\n\n diff = diff[:, shave:(h - shave), shave:(w - shave)]\n mse = diff.pow(2).mean()\n psnr = -10 * np.log10(mse)\n return psnr", "def rmse5 (a, p) :\n s = len(a)\n v = sum(map(sqre_diff, a, p), 0.0)\n return math.sqrt(v / s)", "def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol", "def ratio_local_prod(self):\n if self.current_energy_produced == 0.0:\n return 1.0\n else:\n return 1. - self.export_grid / self.current_energy_produced", "def heuristic_combined_1_3(game, player) -> float:\n\n center_available_factor = get_center_available_factor(game, player)\n partition_possible_factor = get_partition_possible_factor(game, player)\n\n return float(center_available_factor + partition_possible_factor)", "def depolarization_ratio(self):\r\n if self._depol_ratio is not None:\r\n return round(self._depol_ratio,3)\r\n else:\r\n return self._depol_ratio", "def ratio_func(a, b):\n return a / b", "def shapley(self, R, t):\n\t n = self.nodes\n\n\t # phi contains the shapley values of nodes\n\t phi = [0 for i in range(n)]\n\n\t # MC, i.e., marginal contribution of each node\n\t # which reflects the change in coverage due to\n\t # the addition of node i in the set of initilly\n\t # activated nodes\n\t MC = [0 for i in range(n)]\n\n\t # randomly select t permutations from n! possible\n\t # permutations of nodes\n\t for j in range(t):\n\t temp = [0 for i in range(n)]\n\n\t # repeat the experiment R times (take the average)\n\t for r in range(R):\n\t self.theta = nprnd.random_sample((n,))\n\t self.deactivate_all()\n\t k = nprnd.permutation(n)\n\t for i in k:\n\t temp[i] += self.v(i)\n\n\t # Add the contribution for each permuation\n\t for i in range(n):\n\t MC[i] += temp[i]*1.00/R\n\n\t for i in range(n):\n\t phi[i] = (MC[i]*1.00)/t\n\n\t x = {i: phi[i] for i in range(n)}\n\t self.shapley_rank = sorted(x.items(), key=operator.itemgetter(1), reverse=True)\n\n\t return self.shapley_rank", "def feasible_ratio(self, solutions):\r\n count = np.zeros(len(solutions[0]))\r\n for x in solutions:\r\n count += x.unrepaired == x\r\n return count / float(len(solutions))", "def penalize_corners_heuristic(game, player):\n if game.is_loser(player):\n return float('-inf')\n\n if game.is_winner(player):\n return float('inf')\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n # Penalize player for moving to corner positions\n corner_weight = 2\n if is_curr_location_corner(game, game.get_player_location(player)):\n own_moves -= corner_weight\n\n return float(own_moves - opp_moves)", "def moving_average_ratio(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"moving_average_ratio\")", "def optimal_shu_osher_form(self):\n m=len(self)\n r = self.absolute_monotonicity_radius()\n v, alpha = self.canonical_shu_osher_form(r)\n beta = alpha / r\n if self.is_explicit():\n for i in range(1,len(self)+1):\n alpha[i,0]=1.-np.sum(alpha[i,1:])\n return alpha, beta", "def _compute_score(img_binary: np.ndarray, s: float) -> float:\n img_sheared = _shear_img(img_binary, s, 0)\n h = img_sheared.shape[0]\n\n img_sheared_mask = img_sheared > 0\n first_fg_px = np.argmax(img_sheared_mask, axis=0)\n last_fg_px = h - np.argmax(img_sheared_mask[::-1], axis=0)\n num_fg_px = np.sum(img_sheared_mask, axis=0)\n\n dist_fg_px = last_fg_px - first_fg_px\n col_mask = np.bitwise_and(num_fg_px > 0, dist_fg_px == num_fg_px)\n masked_dist_fg_px = dist_fg_px[col_mask]\n\n score = sum(masked_dist_fg_px ** 2)\n return score", "def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty", "def shanten(self):\n if not self.is_concealed:\n return self.shanten_std\n\n return min(self.shanten_std, self.shanten_7, self.shanten_13)", "def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)", "def hss(self):\n return 2 * (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / (\n (self.table[0, 0] + self.table[0, 1]) * (self.table[0, 1] + self.table[1, 1]) +\n (self.table[0, 0] + self.table[1, 0]) * (self.table[1, 0] + self.table[1, 1]))", "def rmse1 (a, p) :\n s = len(a)\n i = 0\n v = 0.0\n while i != s :\n v += sqre_diff(a[i], p[i])\n i += 1\n return math.sqrt(v / s)", "def _shrink_main(self, amt):\n self.ratio -= amt\n self.ratio = max(self.min_ratio, self.ratio)", "def __reduce(self):\n if self.denominator <0:\n self.denominator *= -1\n self.numerator *= -1\n gcd = math.gcd(int(self.denominator),int(self.numerator))\n if self.denominator != 0 and self.numerator!= 0:\n if gcd > 0:\n self.denominator /= gcd\n self.numerator /= gcd\n self.numerator = int(self.numerator)\n self.denominator = int(self.denominator)", "def ship_collecting_halite_coefficient(ship, gmap):\n ship_cargo_free = constants.MAX_HALITE - ship.halite_amount\n cell = gmap[ship.position].halite_amount or 10 ** -10\n return max((cell - ship_cargo_free) / cell, .1)", "def see(p, y, yHat):\n n = y.shape[0]\n numer = ((y - yHat) ** 2).sum()\n denom = n - p - 1\n if (denom == 0):\n s = 0\n elif ( (numer / denom) < 0 ):\n s = 0.001\n else:\n s = (numer / denom) ** 0.5\n return s", "def P(lag):\n N = len(SP)\n ratios = SP[lag:N]/SP[0:N-lag]\n P = 100.*(ratios-1.)\n return P", "def running_ratio(self) -> np.ndarray:\n result_array = self.result_array\n result = result_array.sum(axis=1) / result_array.sum()\n\n if isinstance(result, np.ndarray):\n result_out = result\n else:\n result_out = np.array(result)\n\n return result_out", "def tunnel1_rekey_fuzz_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tunnel1_rekey_fuzz_percentage\")", "def tunnel1_rekey_fuzz_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tunnel1_rekey_fuzz_percentage\")", "def get_sharpe(self,df, df_type = \"returns\"):\n if df_type == \"price\":\n df = df.pct_change()\n sharpe = (df.mean() * 252) / (df.std() * np.sqrt(252))\n return sharpe", "def tunnel1_rekey_fuzz_percentage(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"tunnel1_rekey_fuzz_percentage\")", "def taper_ratio(self) -> float:\n return self.xsecs[-1].chord / self.xsecs[0].chord", "def __heuristic1__(game, player):\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n if game.move_count < ((game.height * game.width)/2):\n return float(own_moves - 3 * opp_moves)\n else:\n return float(own_moves - opp_moves)", "def __get_rho__(self) -> float:\n\n train_data_sum = np.sum(self.train_data)\n\n data_length = len(self.train_data)\n\n rho = train_data_sum / (data_length * self.num_neurons)\n\n return rho", "def getStarRating(waveHeight, windDir, avgWind, tideHeight):\n\n starRating = 0\n\n # wave height\n if waveHeight > 2:\n starRating += 4\n elif waveHeight > 1.6:\n starRating += 3\n elif waveHeight > 1.4:\n starRating += 2\n elif waveHeight > 1.2:\n starRating += 1\n\n # wind direction\n if windDir >= 270 or windDir <= 30:\n starRating += 1\n\n # wind strength\n if avgWind < 15:\n starRating += 1\n\n # tide\n if tideHeight < 1.2:\n starRating += 1\n elif tideHeight > 2.2:\n starRating = 1\n\n # check upper bound of 5 stars\n if starRating > 5:\n starRating = 5\n elif waveHeight < 1:\n starRating = 0\n\n return starRating", "def __calc_s(self, df):\n df.loc[:, \"avg_num_drivers\"] = df.idle + df.incoming\n s = df.total / df.avg_num_drivers # df.total := amount of demand\n s[s > 1] = 1\n s[np.isnan(s)] = 0.0001\n s[np.isinf(s)] = 1\n\n df.loc[:, \"prob_of_s\"] = s\n df = df[[\"zone_id\", \"prob_of_s\"]]\n return df", "def get_chic_ratio(rfile, var, selection):\n h_chic1 = get_histogram(rfile, var, 'chic1', selection)\n h_chic2 = get_histogram(rfile, var, 'chic2', selection)\n\n ## apply same minimum bin content cut as on data\n set_bins_to_zero(h_chic1, thresh=r.TMath.Sqrt(10))\n set_bins_to_zero(h_chic2, thresh=r.TMath.Sqrt(10))\n\n ratio = h_chic2.Clone(h_chic2.GetName().replace('chic2', 'ratio'))\n ratio.Divide(h_chic1)\n\n return ratio", "def heuristic_3_partition(game, player) -> float:\n\n partition_possible_factor = get_partition_possible_factor(game, player)\n\n return float(partition_possible_factor)", "def find_rsh(v, j):\r\n\r\n zp = sp.where(v[:-1] * v[1:] <= 0)[0][0] #make a list of A[x] * A[x -1] without usinf \"for\" loop in original python.\r\n m = np.polyfit(v[(zp - 5):(zp + 5)], j[(zp -5):(zp + 5)], 1)\r\n return 1/abs(m[0]) * 1000 #[Ohm cm^2]\r", "def stump_S(z) :\n\n if z > 0:\n sz = sqrt(z) \n return (sz - sin(sz))/pow(sz,3)\n elif z < 0 :\n s_z = sqrt(-z) \n # According to the equation the denominatori is pow(sqrt(z),3)\n return (sinh(s_z) - s_z)/pow(s_z,3)\n else :\n return 0.1666666666666666" ]
[ "0.6567362", "0.6437101", "0.63409936", "0.6130169", "0.6042319", "0.60273135", "0.5931197", "0.59244883", "0.5785139", "0.5736135", "0.57228225", "0.5692179", "0.5680298", "0.5669893", "0.5615704", "0.5592618", "0.55892605", "0.55826575", "0.55295515", "0.5504721", "0.54996413", "0.5473298", "0.54647964", "0.54578286", "0.54437244", "0.543583", "0.5428085", "0.54129666", "0.5402032", "0.53897303", "0.53697985", "0.536444", "0.53550136", "0.535041", "0.5331226", "0.5291232", "0.5263956", "0.52426195", "0.5240613", "0.52399415", "0.5217957", "0.52147645", "0.5210385", "0.5210333", "0.5209263", "0.52022266", "0.5191306", "0.51909363", "0.5187347", "0.5179323", "0.51624197", "0.51614225", "0.515867", "0.5137342", "0.51318896", "0.5107458", "0.50817204", "0.5081195", "0.5075762", "0.50741524", "0.50739396", "0.506926", "0.50677794", "0.5064654", "0.505979", "0.50545675", "0.50521237", "0.5039855", "0.5031254", "0.50305873", "0.5027631", "0.5024608", "0.5023462", "0.50232977", "0.5018493", "0.5011133", "0.5010861", "0.50092167", "0.5007134", "0.5001754", "0.4998929", "0.4997379", "0.49945337", "0.49928737", "0.4983138", "0.49815807", "0.4979176", "0.49780035", "0.49780035", "0.49762964", "0.49722663", "0.49717197", "0.49631968", "0.4952857", "0.4949538", "0.49485165", "0.49399343", "0.49390876", "0.49380198", "0.4934259" ]
0.67490816
0
Plot stock prices with a custom title and meaningful axis labels.
def plot_normalized_data(df, title="Daily portfolio value and SPY", xlabel="Date", ylabel="Normalized price"): plot_data(df/df.iloc[0], title, xlabel, ylabel)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_data(df, title=\"normalized Stock prices\", ylabel=\"Price\", xlabel=\"Date\"):\n plt.clf()\n ax = df.plot(title=title, fontsize=12)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.savefig('files/output/' + title + '.png')", "def plot_data(df, title=\"normalized Stock prices\", ylabel=\"Price\", xlabel=\"Date\" ):\n plt.clf()\n ax = df.plot(title=title, fontsize=12)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.savefig('files/output/'+title+'.png')", "def plot_equity_prices(ticker, prices):\n\n # define x-axis data points\n x = np.linspace(0, prices.shape[0], prices.shape[0])\n\n plt.plot(x, prices[ticker], linewidth=1, color='b', label=ticker)\n plt.legend(loc='upper left')\n plt.xlabel('Time (days)')\n plt.ylabel('Price')\n plt.title('Price vs Time: ' + ticker)\n plt.show()", "def plot_data(self, df, title=\"Bitcoin Data\", xlabel=\"Date\", ylabel=\"Price\"):\n title = title.strip('.csv')\n ax = df.plot(title=title, fontsize=12)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.show()", "def set_plot_labels():\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Temperature (F)\")\n plt.title('Temperature Time Series')", "def plot_equity_prices(ticker, prices):\n\n # define x-axis data points\n x = np.linspace(0, prices.shape[0], prices.shape[0])\n\n figure = plt.figure()\n axis = figure.add_subplot(111)\n\n axis.plot(x, prices[ticker], linewidth=1, color='b', label=ticker)\n axis.legend(loc='upper left')\n axis.set_xlabel('Time (days)')\n axis.set_ylabel('Price')\n axis.set_title('Price vs Time: ' + ticker)\n\n return figure", "def plot_rsi(stocks_data, stock):\n stock = stocks_data[stock]\n closes = stock['Close'].values\n plt.subplot(211)\n plt.plot(closes)\n\n labels, ifr = find_rsi(stock)\n plt.subplot(212)\n plt.plot(ifr)\n # plt.xlabel(labels)\n\n plt.show()", "def stock_value_history(stock_values, value_name='Close'):\n ticker = stock_values.name\n dates = stock_values.index\n \n # hover tool\n phover = HoverTool(tooltips=[(\"price\", \"$y\"),])\n\n # plot\n p = figure(x_axis_type = \"datetime\", tools=[\"pan,wheel_zoom,box_zoom,reset,resize\", phover])\n\n p.title = \"{} Closing Prices\".format(ticker)\n p.title_text_font_size = '12'\n p.title_text_font_style = 'bold'\n\n # x axis\n p.xaxis.axis_label = 'Date'\n p.xaxis.axis_label_text_font_size = '9'\n\n # y axis\n p.yaxis.axis_label = 'Price (US$)'\n p.yaxis.axis_label_text_font_size = '9'\n\n line1_name = value_name\n p.line(np.array(dates, 'M64'), stock_values[value_name], legend=value_name,\n color='#182b8b', **line_style)\n\n line1_name = 'SMA 30'\n p.line(np.array(stock_values.index, 'M64'), stock_values[line1_name], legend=line1_name,\n color='#5477a0', **line_style)\n\n line2_name = 'SMA 100'\n p.line(np.array(stock_values.index, 'M64'), stock_values[line2_name], legend=line2_name,\n color='#dfbd4d', **line_style)\n\n line3_name = 'SMA 300'\n p.line(np.array(stock_values.index, 'M64'), stock_values[line3_name], legend=line3_name,\n color='#df1b06', **line_style)\n\n # set plot style\n p.plot_width = 800\n p.plot_height = 300\n p.grid.grid_line_alpha=0.3\n\n # set grid\n # change just some things about the x-grid\n p.xgrid.grid_line_color = None\n\n # change just some things about the y-grid\n p.ygrid.grid_line_alpha = 0.5\n p.ygrid.grid_line_dash = [6, 4]\n\n # legend\n p.legend.orientation = \"bottom_left\"\n p.legend.label_text_font_size = '3'\n \n return p", "def set_plot_title_labels(title, x_label, y_label):\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.legend(loc='best')", "def graph(stock):\n output=stock_price(stock)\n return plt.plot(output)", "def volatility_factor_plot(prices: list, dates: list, vf_data: VFStopsResultType,\n green_zone_x_values: List[list], red_zone_x_values: List[list],\n yellow_zone_x_values: List[list], y_range: float, minimum: float,\n text_str: str = \"\", str_color: str = \"\", **kwargs):\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n register_matplotlib_converters()\n\n title = kwargs.get('title', '')\n save_fig = kwargs.get('save_fig', False)\n filename = kwargs.get('filename', 'temp_candlestick.png')\n\n stop_loss_objects = vf_data.data_sets\n\n shown_stop_loss = f\"VF: {np.round(vf_data.vf.curated, 3)}\\n\"\n if vf_data.current_status.status.value != 'stopped_out':\n shown_stop_loss += f\"Stop Loss: ${np.round(vf_data.stop_loss.curated, 2)}\"\n else:\n shown_stop_loss += \"Stop Loss: n/a\"\n\n fig, ax_handle = plt.subplots()\n\n date_indexes = [datetime.strptime(date, '%Y-%m-%d').date() for date in dates]\n ax_handle.plot(date_indexes, prices, color='black')\n\n # Set the tick spacing (this is because dates crowd easily)\n mid_tick_size = int(len(date_indexes) / 4)\n ax_handle.xaxis.set_ticks([\n date_indexes[0], date_indexes[mid_tick_size], date_indexes[mid_tick_size * 2],\n date_indexes[mid_tick_size * 3], date_indexes[-1]\n ])\n\n y_start = minimum - (y_range * 0.05)\n height = y_range * 0.02\n\n for stop in stop_loss_objects:\n sub_dates = [date_indexes[index] for index in stop.time_index_list]\n ax_handle.plot(sub_dates, stop.caution_line, color='gold')\n ax_handle.plot(sub_dates, stop.stop_loss_line, color='red')\n\n for green_zone in green_zone_x_values:\n start = mdates.date2num(date_indexes[green_zone[0]])\n end = mdates.date2num(date_indexes[green_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='green',\n facecolor='green',\n fill=True\n )\n )\n\n for red_zone in red_zone_x_values:\n start = mdates.date2num(date_indexes[red_zone[0]])\n end = mdates.date2num(date_indexes[red_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='red',\n facecolor='red',\n fill=True\n )\n )\n\n for yellow_zone in yellow_zone_x_values:\n start = mdates.date2num(date_indexes[yellow_zone[0]])\n end = mdates.date2num(date_indexes[yellow_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='yellow',\n facecolor='yellow',\n fill=True\n )\n )\n\n ax_handle.set_title(title)\n\n if len(text_str) > 0 and len(str_color) > 0:\n new_start = minimum - (y_range * 0.2)\n new_end = minimum + (y_range * 1.02)\n ax_handle.set_ylim(new_start, new_end)\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.02,\n text_str,\n color=str_color,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n if len(shown_stop_loss) > 0:\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.90,\n shown_stop_loss,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n try:\n if save_fig:\n temp_path = os.path.join(\"output\", \"temp\")\n if not os.path.exists(temp_path):\n # For functions, this directory may not exist.\n plt.close(fig)\n plt.clf()\n return\n\n filename = os.path.join(temp_path, filename)\n if os.path.exists(filename):\n os.remove(filename)\n plt.savefig(filename)\n\n else:\n plt.show()\n\n except: # pylint: disable=bare-except\n print(\n f\"{utils.WARNING}Warning: plot failed to render in 'volatility factor plot' of \" +\n f\"title: {title}{utils.NORMAL}\")\n\n plt.close('all')\n plt.clf()", "def peek(self, title=\"GOES Xray Flux\", **kwargs):\n # Check we have a timeseries valid for plotting\n self._validate_data_for_plotting()\n\n figure = plt.figure()\n axes = plt.gca()\n\n dates = matplotlib.dates.date2num(parse_time(self.to_dataframe().index).datetime)\n\n axes.plot_date(dates, self.to_dataframe()['xrsa'], '-',\n label=r'0.5--4.0 $\\AA$', color='blue', lw=2, **kwargs)\n axes.plot_date(dates, self.to_dataframe()['xrsb'], '-',\n label=r'1.0--8.0 $\\AA$', color='red', lw=2, **kwargs)\n\n axes.set_yscale(\"log\")\n axes.set_ylim(1e-9, 1e-2)\n axes.set_title(title)\n axes.set_ylabel('Watts m$^{-2}$')\n axes.set_xlabel(datetime.datetime.isoformat(self.to_dataframe().index[0])[0:10])\n\n ax2 = axes.twinx()\n ax2.set_yscale(\"log\")\n ax2.set_ylim(1e-9, 1e-2)\n labels = ['A', 'B', 'C', 'M', 'X']\n centers = np.logspace(-7.5, -3.5, len(labels))\n ax2.yaxis.set_minor_locator(mticker.FixedLocator(centers))\n ax2.set_yticklabels(labels, minor=True)\n ax2.set_yticklabels([])\n\n axes.yaxis.grid(True, 'major')\n axes.xaxis.grid(False, 'major')\n axes.legend()\n\n # TODO: display better tick labels for date range (e.g. 06/01 - 06/05)\n formatter = matplotlib.dates.DateFormatter('%H:%M')\n axes.xaxis.set_major_formatter(formatter)\n\n axes.fmt_xdata = matplotlib.dates.DateFormatter('%H:%M')\n figure.autofmt_xdate()\n\n return figure", "def plotstocksdata(datadict,formats):\n\t#plot data\n\tf = plt.figure()\n\tax1 = plt.subplot(111)\n\tdata = datadict[\"yahoo\"]\n\tyahoo = ax1.plot(data['date'],data['val'],formats[\"yahoo\"], label = 'Yahoo Stock Value',linewidth = 1.5)\n\tdata = datadict[\"google\"]\n\tgoogle = ax1.plot(data['date'],data['val'],formats[\"google\"], label = 'Google Stock Value',linewidth = 1.5)\n\tax2 = ax1.twinx()\n\tdata = datadict[\"nytmp\"]\n\tnytmp = ax2.plot(data['date'],data['val'],formats[\"nytmp\"],label = 'NY Mon. High Temp',linewidth=1.5)\n\tax1.set_xlabel('Date (MJD)')\n\tax1.set_ylabel('Value (Dollars')\n\tax1.set_ylim((-20,765))\n\tax1.yaxis.set_minor_locator(plt.MultipleLocator(20))\n\tax1.set_xlim((48800, 55600))\n\tax1.xaxis.set_minor_locator(plt.MultipleLocator(200))\n\t#plt.show() #ISAAC EDIT\n\tax2.set_ylim((-150, 100))\n\tax2.set_ylim((-150, 100))\n\tax2.set_ylabel('Temperature ($^\\circ$F)')\n\tax2.yaxis.set_minor_locator(plt.MultipleLocator(10))\n\tplt.title('New York Temperature, Google, and Yahoo!', fontname = 'serif',fontsize = 18)\n\tplts = yahoo+google+nytmp\n\tlabels = [l.get_label() for l in plts]\n\tax1.legend(plts, labels, loc=(0.025,0.5) ,frameon=False, prop={'size':11}, markerscale = 2)\n\tplt.show()", "def plot_data(stock, data):\n data['Date'] = pd.to_datetime(data['Date']).dt.date\n x = np.arange(0,len(data))\n fig, ax = plt.subplots()\n ax.plot(x,data['High_Norm'])\n ax.plot(x, data['Mentions_Norm'])\n ax.legend([f\"{stock} Daily High Price\", f\"{stock} Mentions\"])\n plt.ylabel('Normalized Value')\n plt.xlabel('Date')\n ax.set_xticklabels(data['Date'])\n plt.xticks(np.arange(0,len(x), 10), data['Date'][::10])\n plt.title(f\"Mentions and Stock price for {stock} normalized\\n between {data['Date'][0]} and {data['Date'][len(data)-1]}\")\n fig.tight_layout()\n plt.show(block=False)\n plt.pause(2)\n plt.close()\n # Save figure to file\n figname = f\"figures/{stock}_Mentions_Price\"\n fig.savefig(figname)", "def set_axis_title_labels(ax, title, x_label, y_label):\n ax.set_title(title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)", "def _plot(self, df, head, title, lines, verbose: bool = False):\n fig = go.Figure(layout=set_layout())\n\n if isinstance(lines, str):\n lines = [lines]\n elif not isinstance(lines, list):\n raise ValueError(\"Only string or list is valid type for lines.\")\n\n for n in lines:\n fig.add_trace(self._plot_line(df, head=head, y=n, line_name=n.upper()))\n\n if verbose:\n fig.add_trace(self._plot_stock_data(self._df, head))\n\n fig.update_layout(\n title_text=f\"{title} Chart ({self.stock_code})\",\n xaxis_rangeslider_visible=False,\n )\n fig.show()", "def plot_stock_price(mu,sigma):\n plt.figure(figsize=(9,4))\n for i in range(8):\n plt.plot(b.stock_price(mu=mu,\n sigma=sigma,\n dt=0.1))\n plt.legend(['Scenario-'+str(i) for i in range(1,6)],\n loc='upper left')\n plt.hlines(y=100,xmin=0,xmax=1460,\n linestyle='--',color='k')\n plt.show()", "def stock_volume_history(stock_values):\n ticker = stock_values.name\n dates = stock_values.index\n \n # stock volume plot \n p2hover = HoverTool(tooltips=[(\"volume\", \"$y\"),])\n\n p = figure(x_axis_type = \"datetime\")\n\n p.title = \"{} Daily Volume\".format(ticker)\n p.title_text_font_size = '12'\n p.title_text_font_style = 'bold'\n\n # x axis\n p.xaxis.axis_label = 'Date'\n p.xaxis.axis_label_text_font_size = '9'\n\n # y axis\n p.yaxis.axis_label = 'Kilo Transactions'\n p.yaxis.axis_label_text_font_size = '9'\n p.yaxis[0].formatter = PrintfTickFormatter(format=\"%3d\")\n\n p.quad(top=stock_values['Volume'], bottom=0, left=dates, right=dates,\n fill_color=\"#036564\", line_color=\"#033649\")\n\n p.line(np.array(dates, 'M64'), stock_values['Volume 30'],\n color='#dfbd4d', **line_style)\n\n p.line(np.array(dates, 'M64'), stock_values['Volume 300'],\n color='#df1b06', **line_style)\n\n # set plot style\n p.plot_width = 800\n p.plot_height = 200\n p.grid.grid_line_alpha=0.3\n\n # set grid\n # change just some things about the x-grid\n p.xgrid.grid_line_color = None\n\n # change just some things about the y-grid\n p.ygrid.grid_line_alpha = 0.5\n p.ygrid.grid_line_dash = [6, 4]\n\n return p", "def plot_equity_returns(ticker, returns):\n\n # define x-axis data points\n x = np.linspace(0, returns.shape[0], returns.shape[0])\n\n figure = plt.figure()\n axis = figure.add_subplot(111)\n\n axis.plot(x, returns[ticker], linewidth=1, color='b', label=ticker)\n axis.legend(loc='upper left')\n axis.set_xlabel('Time (days)')\n axis.set_ylabel('Daily Return')\n axis.set_title('Return vs Time for: ' + ticker)\n\n return figure", "def plotTicker(ticker, startdate, enddate, fillcolor):\n\n fh = finance.fetch_historical_yahoo(ticker, startdate, enddate) \n r = mlab.csv2rec(fh); \n fh.close()\n r.sort()\n\n ### plot the relative strength indicator\n prices = r.adj_close\n\n ### plot the price and volume data\n \n ax.plot(r.date, prices, color=fillcolor, lw=2, label=ticker)\n ax.legend(loc='top right', shadow=True, fancybox=True)\n\n # set the labels rotation and alignment \n for label in ax.get_xticklabels():\n # To display date label slanting at 30 degrees\n label.set_rotation(30)\n label.set_horizontalalignment('right')\n\n ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')", "def plot_equity_price_analytics(ticker, e_ma, s_ma, t_wap, v_wap, close_prices, interval):\n\n # define x-axis data points\n x = np.linspace(0, close_prices.shape[0], close_prices.shape[0])\n\n figure = plt.figure()\n axis = figure.add_subplot(111)\n\n axis.plot(x, close_prices, linewidth=1, color='k', label='Close Price')\n axis.plot(x, e_ma, linewidth=1, color='r', label='EMA Price')\n axis.plot(x[interval:], s_ma, linewidth=1, color='b', label='SMA Price')\n axis.plot(x[interval:], t_wap, linewidth=1, color='g', label='TWAP Price')\n axis.plot(x[interval:], v_wap, linewidth=1, color='m', label='VWAP Price')\n axis.legend(loc='upper left')\n axis.set_xlabel('Time (days)')\n axis.set_ylabel('Price')\n axis.set_title('Price vs Time: ' + ticker)\n\n return figure", "def plot_equity_price_analytics(ticker, e_ma, s_ma, t_wap, v_wap, mean_prices):\n\n # define x-axis data points\n x = np.linspace(0, mean_prices.shape[0], mean_prices.shape[0])\n\n plt.plot(x, mean_prices, linewidth=1, color='k', label='Mean Price')\n plt.plot(x, e_ma, linewidth=1, color='r', label='EMA Price')\n plt.plot(x[interval:], s_ma, linewidth=1, color='b', label='SMA Price')\n plt.plot(x[interval:], t_wap, linewidth=1, color='g', label='TWAP Price')\n plt.plot(x[interval:], v_wap, linewidth=1, color='m', label='VWAP Price')\n plt.legend(loc='upper left')\n plt.xlabel('Time (days)')\n plt.ylabel('Price')\n plt.title('Price vs Time: ' + ticker)\n plt.show()", "def plot_equity_returns(ticker, returns):\n\n # define x-axis data points\n x = np.linspace(0, returns.shape[0], returns.shape[0])\n\n plt.plot(x, returns[ticker], linewidth=1, color='b', label=ticker)\n plt.legend(loc='upper left')\n plt.xlabel('Time (days)')\n plt.ylabel('Daily Return')\n plt.title('Return vs Time for: ' + ticker)\n plt.show()", "def make_plot(self):\n self.ax[0].set_ylabel(r'$C_{{\\ell}}^{{\\kappa\\kappa}}$')\n self.ax[1].set_ylabel('$\\mathrm{rel. dev. [\\%]$}')\n self.ax[1].set_xlabel(r'$\\ell$')", "def mplot(m, xlabel='', ylabel='', title='', custom=(7,7)):\n lag = ps.lag_spatial(m.w, m.z)\n fit = ps.spreg.OLS(m.z[:, None], lag[:,None])\n\n # Customize plot\n fig = plt.figure(figsize=custom)\n ax = fig.add_subplot(111)\n\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n fig.suptitle(title)\n\n ax.scatter(m.z, lag, s=60, color='k', alpha=.6)\n ax.plot(lag, fit.predy, color='r')\n\n ax.axvline(0, alpha=0.5)\n ax.axhline(0, alpha=0.5)\n\n return fig", "def plot_coefs(data, x_label, y_label, title, kind = 'barh', style = 'seaborn-darkgrid',\n figsize = (10, 8)):\n\n with plt.style.context(style):\n \n ax = data.plot(kind=kind, figsize = figsize, rot=45)\n \n if kind == 'barh':\n \n ax.xaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('${x:,.0f}'))\n ax.set_yticklabels(ax.get_yticklabels(), ha='right')\n ax.axvline(color='k')\n ax.set(xlabel = x_label, ylabel = y_label, title = title)\n \n else:\n ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('${x:,.0f}'))\n ax.set_xticklabels(ax.get_xticklabels(), ha='right')\n ax.axhline(color='k')\n ax.set(xlabel = x_label, ylabel = y_label, title = title)\n\n return ax", "def setlabels(self, title=None, Xaxis=None, Yaxis=None):\n self.data['title'] = title\n self.data['xlabel'] = Xaxis\n self.data['ylabel'] = Yaxis", "def plotEquivalenceClass(self, inputData, title, xAxisTitle, yAxisTitle, chartName):\n\n labels = [\"label%d\" % i for i in range(len(inputData))]\n x = range(len(inputData))\n y = inputData\n\n tmpX =[]\n for i in x:\n if i < 10:\n tmpX.append(\"0\" + str(i))\n else:\n tmpX.append(str(i))\n #print tmpX\n #TODO: DISPLAY XTICKS IN HOUR FORMAT\n plt.xticks(x, fontsize=7)\n #plt.yticks(y, fontsize=2)\n plt.bar(x , y , align='center' , color='blue', alpha=0.8)\n\n plt.xlabel(xAxisTitle, fontsize=11)\n plt.ylabel(yAxisTitle, fontsize=11)\n plt.title(title)\n\n fileName = os.getcwd() + self.figPath + chartName + \".png\"\n\n if (platform.system() == 'Darwin' or platform.system() == 'Linux'):\n if fileName.count(\"\\latex_report\\latex_template\") > 1:\n fileName = fileName.replace('/latex_report/latex_template', '', 1)\n else:\n if fileName.count(\"\\latex_report\\latex_template\") > 1:\n fileName = fileName.replace('\\\\latex_report\\\\latex_template', '', 1)\n\n\n\n plt.savefig(fileName, format='png', dpi=600)\n plt.close()", "def plot(self):\n fig, ax = plt.subplots()\n ticklabels = [item.strftime('%b %d') for item in self.series.index]\n ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))\n\n plt.ylabel('#Cases')\n i = 0\n for y in self.countries:\n plt.plot(ticklabels, self.series[y], GRAPH_FORMATS[i], label=y)\n i += 1\n ax.set_xticklabels(ticklabels, rotation='vertical', fontsize=10)\n plt.legend()\n plt.grid()\n if self.log:\n plt.yscale(\"log\")\n plt.show()", "def plot_stock_prices(self, ins_id):\n # creating api-object\n # using api-object to get stock prices from API\n stock_prices = self._borsdata_api.get_instrument_stock_prices(ins_id)\n # calculating/creating a new column named 'sma50' in the table and\n # assigning the 50 day rolling mean to it\n stock_prices['sma50'] = stock_prices['close'].rolling(window=50).mean()\n # filtering out data after 2015 for plot\n filtered_data = stock_prices[stock_prices.index > dt.datetime(2015, 1, 1)]\n # plotting 'close' (with 'date' as index)\n plt.plot(filtered_data['close'], color='blue', label='close')\n # plotting 'sma50' (with 'date' as index)\n plt.plot(filtered_data['sma50'], color='black', label='sma50')\n # show legend\n plt.legend()\n # show plot\n plt.show()", "def plot_selected(df, title='title', columns=[], shouldNormalize=True, symbol='any stock'):\n # df = df[columns][start_index:end_index]\n # df = df.loc[start_index:end_index, columns]\n df = df.loc[:, columns]\n ylabel = \"Price\"\n normal = \"un normalized\"\n if shouldNormalize:\n df = normalize(df.loc[:, ['Close', 'sma200']])\n ylabel = \"%\"\n normal = \"normalized\"\n # print('df.shape in plot=',df.shape)\n plot_data(df, title=title, ylabel=ylabel)", "def update_price_figure(tickers, price):\n\n return {\n \"data\": [\n {\n \"x\": [date for date in prices.loc[(prices.ticker == stock)][\"date\"]],\n \"y\": [p for p in prices.loc[(prices.ticker == stock)][price]],\n \"type\": \"scatter\",\n \"mode\": \"lines\",\n \"name\": stock,\n }\n for stock in tickers\n ],\n \"layout\": {\n \"title\": \"Stock Price - (%s)\" % \" & \".join(tickers),\n \"xaxis\": {\"title\": \"Date\"},\n \"yaxis\": {\"title\": \"Price\"},\n },\n }", "def plot_selected(df, title='title', columns=[], shouldNormalize = True, symbol='any stock'):\n #df = df[columns][start_index:end_index]\n #df = df.loc[start_index:end_index, columns]\n df = df.loc[:, columns]\n ylabel=\"Price\"\n normal = \"un normalized\"\n if shouldNormalize:\n df = normalize(df.loc[:,['Close', 'sma200']])\n ylabel = \"%\"\n normal = \"normalized\"\n #print('df.shape in plot=',df.shape)\n plot_data(df, title=title, ylabel=ylabel)", "def plot_high_low_changes(stock, data):\n data['Date'] = pd.to_datetime(data['Date']).dt.date\n x = np.arange(0,len(data))\n fig, ax = plt.subplots()\n ax.plot(x,data['Change_High_Low_Norm'])\n ax.plot(x, data['Mentions_Norm'])\n ax.legend([F\"{stock} Daily High, Low Change\", f\"{stock} Mentions\"])\n plt.ylabel('Normalized Value')\n plt.xlabel('Date')\n ax.set_xticklabels(data['Date'])\n plt.xticks(np.arange(0,len(x), 10), data['Date'][::10])\n plt.title(f\"Mentions and Daily High Low Stock Change for {stock} normalized\\n between {data['Date'][0]} and {data['Date'][len(data)-1]}\")\n fig.tight_layout()\n plt.show(block=False)\n plt.pause(2)\n plt.close()\n # Save figure to file\n figname = f\"figures/{stock}_High_Low_Changes_Mentions\"\n fig.savefig(figname)", "def plot_data(data):\n\n print(\"Plotting.\")\n\n # Changing dates to floating point, also moves Date out of index\n df_ohlc = data.reset_index()\n df_ohlc['index'] = df_ohlc['index'].map(mdates.date2num)\n df_ohlc.rename(columns={'index': 'Date'}, inplace=True)\n\n # Rearrange columns for ohlc\n columns = ['Date', 'Open', 'High', 'Low', 'Close']\n df_ohlc = df_ohlc[columns]\n\n fig = plt.figure()\n ax1 = plt.subplot()\n ax1.xaxis_date()\n\n candlestick_ohlc(ax1, df_ohlc.values, width=.5, colorup='g', colordown='r')\n\n plt.ylabel(\"Price\")\n plt.xlabel(\"Date\")", "def set_labels(x, y=''):\n plt.xlabel(x)\n plt.ylabel(y)", "def plot_rel_data_v2(multiple_states_data, \n x_label, \n y_label, \n title_str, \n x_tick=1, \n y_tick=0.2, \n fig_length=16,\n fig_breadth=9,\n x_label_font_size=18,\n y_label_font_size=16):\n fig, ax = plt.subplots(1,1,figsize=(fig_length, fig_breadth)) \n multiple_states_data.plot(ax=ax)\n ax.xaxis.label.set_size(x_label_font_size)\n ax.yaxis.label.set_size(y_label_font_size)\n ax.set(xlabel=x_label, ylabel=y_label)\n ax.set_title(title_str)\n ax.set_xticks(np.arange(multiple_states_data.index.min(), multiple_states_data.index.max(), x_tick))\n ax.set_yticks(np.arange(multiple_states_data.min().min(), multiple_states_data.max().max(), y_tick))\n print(multiple_states_data.describe())\n return ax", "def plot_data(data, title):\n plt.title(title)\n plt.plot(range(len(data)), data[:, 0], 'r-', label='x')", "def set_title(self, title):\n self.axplot.set_title(title)", "def PlotTimeSeries(ticker, years_ago=5, verbose_mode=False):#, months_ago=0): \n \n # There are two Yahoo Modules we can use to pull our data (closeHist)\n # We'll pull from one and if we get an error will use the alternate\n try:\n closeHist = pd.DataFrame(yf.download(ticker,\n period='max', \n progress=False)['Close']).rename({'Close':'Price'}, axis=1)\n #closeHist = pd.DataFrame(yf.Ticker(ticker).history(period='max')['Close']).rename({'Close':'Price'}, axis=1)\n closeHist.index = closeHist.index.to_pydatetime()\n closeHist.index.name = 'Date'\n except json.JSONDecodeError:\n closeHist = pd.DataFrame(y_fin.get_data(ticker)['close']).rename({'close':'Price'}, axis=1)\n closeHist.index = closeHist.index.to_pydatetime()\n closeHist.index.name = 'Date'\n # Trim our data to years_ago\n closeHist = closeHist[closeHist.index > dt.datetime.now() + relativedelta(years=-years_ago)]\n closeHist.reset_index(inplace=True)\n #Calculate monthly avg. Price\n closeHist['Month'] = closeHist.Date.apply(lambda x: dt.date(x.year, x.month, 1))\n closeHist = closeHist.groupby('Month').last().rename({'Price':'Price(Monthly avg.)'}, axis=1)\n closeHist['x_index'] = pd.Series(range(len(closeHist.index)), closeHist.index)\n\n # Find Peaks and Troughs (Local Maximums and Minimums)\n MinSeries = closeHist['Price(Monthly avg.)'][(closeHist['Price(Monthly avg.)'].shift(1) > closeHist['Price(Monthly avg.)']) & \n (closeHist['Price(Monthly avg.)'].shift(-1) > closeHist['Price(Monthly avg.)'])]\n MaxSeries = closeHist['Price(Monthly avg.)'][(closeHist['Price(Monthly avg.)'].shift(1) < closeHist['Price(Monthly avg.)']) & \n (closeHist['Price(Monthly avg.)'].shift(-1) < closeHist['Price(Monthly avg.)'])]\n \n \n MinSeries = pd.concat([MinSeries, \n closeHist['Price(Monthly avg.)'][(closeHist.index <= MaxSeries.index[0])&\n (closeHist['Price(Monthly avg.)'] < MaxSeries.iloc[0])].head(1)]).sort_index()\n\n \n #BothSeries = pd.concat([MinSeries, MaxSeries]).sort_index()\n #MaxMaxSeries = BothSeries[(BothSeries.shift(1) < BothSeries) & (BothSeries.shift(-1) < BothSeries)]\n #MinMinSeries = BothSeries[(BothSeries.shift(1) > BothSeries) & (BothSeries.shift(-1) > BothSeries)]\n \n \n\n #3PTL Buy Line\n X = list()\n Y = list()\n x_1_date = MaxSeries.idxmax()\n x_1 = closeHist[closeHist.index==x_1_date].x_index.iloc[0]\n X.append(x_1)\n Y.append(MaxSeries.max())\n try:\n x_2_date = MaxSeries[MaxSeries.index > x_1_date].idxmax()\n x_2 = closeHist[closeHist.index==x_2_date].x_index.iloc[0]\n X.append(x_2)\n Y.append(MaxSeries[MaxSeries.index > x_1_date].max())\n except ValueError:\n pass\n #3PTL Sell Line\n X2 = list()\n Y2 = list()\n x2_1_date = MinSeries.idxmin()\n x2_1 = closeHist[closeHist.index==x2_1_date].x_index.iloc[0]\n X2.append(x2_1)\n Y2.append(MinSeries.min())\n try:\n x2_2_date = MinSeries[MinSeries.index > x2_1_date].idxmin()\n x2_2 = closeHist[closeHist.index==x2_2_date].x_index.iloc[0]\n X2.append(x2_2)\n Y2.append(MinSeries[MinSeries.index > x2_1_date].min())\n except ValueError:\n pass\n\n print('Current Price for', ticker, 'is', str(round(closeHist['Price(Monthly avg.)'].iloc[-1], 2)))\n\n sellLine_list = list()\n buyLine_list = list()\n\n #Calculate and plot Sell line:\n if len(X2) < 2:\n # IF WE CANNOT BUILD A SELL LINE USING MAX, START WITH FIRST TWO TROUGHS\n X2 = list(closeHist.loc[MinSeries.index]['x_index'].iloc[:2])\n Y2 = list(closeHist.loc[MinSeries.index]['Price(Monthly avg.)'].iloc[:2])\n ThreePtS = drawLine2P(x=X2,y=Y2,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n sellLine_list.append(ThreePtS[1])\n else: \n ThreePtS = drawLine2P(x=X2,y=Y2,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n sellLine_list.append(ThreePtS[1])\n\n #Calculate and plot Buy line:\n if len(X) < 2:\n pass\n else: \n ThreePtB = drawLine2P(x=X,y=Y,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n buyLine_list.append(ThreePtB[1])\n\n\n Buy_Breach = max(closeHist[closeHist.x_index.isin(X2)].index)\n if verbose_mode:\n n = 1 #TESTING\n while Buy_Breach:\n # FIRST BUY ITERATION\n latestHist = closeHist.loc[Buy_Breach:]\n subSell = latestHist.index[latestHist['Price(Monthly avg.)'] < pd.Series(ThreePtS[1], closeHist.index).loc[Buy_Breach:]]\n if len(subSell) > 0:\n Sell_Breach = subSell[0] \n preBreach = MaxSeries[MaxSeries.index < Sell_Breach].index\n postBreach = MaxSeries[MaxSeries.index > Sell_Breach].index\n if verbose_mode:\n print(\"{} Sell Breach at {}, this is Breach #{}\".format(ticker, Sell_Breach, n)) #TESTING\n n+=1\n if len(postBreach) > 0:\n pt_1 = closeHist.loc[closeHist.loc[preBreach]['Price(Monthly avg.)'].idxmax()]\n pt_2 = closeHist.loc[postBreach[0]]\n Y2 = [pt_1['Price(Monthly avg.)'], pt_2['Price(Monthly avg.)']]\n X2 = [pt_1['x_index'], pt_2['x_index']]\n ThreePtB = drawLine2P(x=X2,y=Y2,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n # plt.plot(closeHist.index, ThreePtB[1],\n # c='g', linestyle='dashed', \n # alpha=buyAlpha)\n buyLine_list.append(ThreePtB[1])\n else:\n Sell_Breach = None\n break \n else:\n Sell_Breach = None\n break\n while Sell_Breach:\n # FIRST SELL ITERATION\n latestHist = closeHist.loc[Sell_Breach:]\n superBuy = latestHist.index[latestHist['Price(Monthly avg.)'] > pd.Series(ThreePtB[1], closeHist.index).loc[Sell_Breach:]]\n if len(superBuy) > 0:\n Buy_Breach = superBuy[0]\n preBreach = MinSeries[MinSeries.index < Buy_Breach].index\n postBreach = MinSeries[MinSeries.index > Buy_Breach].index\n if verbose_mode:\n print(\"{} Buy Breach at {}, this is Breach #{}\".format(ticker, Buy_Breach, n)) #TESTING\n n+=1\n if len(postBreach) > 0:\n pt_1 = closeHist.loc[closeHist.loc[preBreach]['Price(Monthly avg.)'].idxmin()]\n pt_2 = closeHist.loc[postBreach[0]]\n Y2 = [pt_1['Price(Monthly avg.)'], pt_2['Price(Monthly avg.)']]\n X2 = [pt_1['x_index'], pt_2['x_index']]\n ThreePtS = drawLine2P(x=X2,y=Y2,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n # plt.plot(closeHist.index, ThreePtS[1],\n # c='r', linestyle='dashed', \n # alpha=sellAlpha)\n sellLine_list.append(ThreePtS[1])\n\n break\n else:\n Buy_Breach = None\n break\n else:\n Buy_Breach = None\n break\n #sellLine_alpha = np.linspace(0.1, 1, len(sellLine_list))\n #buyLine_alpha = np.linspace(0.1, 1, len(buyLine_list))\n sellLine_alpha = np.flipud(np.linspace(1, 0.1, len(sellLine_list)+1)[:-1])\n buyLine_alpha = np.flipud(np.linspace(1, 0.1, len(buyLine_list)+1)[:-1])\n\n\n\n if len(sellLine_list) > 0:\n sellPrice = round(sellLine_list[-1][-1], 2)\n if sellPrice < 0:\n sellPrice = round(0.00, 2) \n print('Sell Price for', ticker, 'is', sellPrice)\n if len(buyLine_list) > 0:\n buyPrice = round(buyLine_list[-1][-1], 2)\n if buyPrice < 0:\n buyPrice = round(0.00, 2)\n print('Buy Price for', ticker, 'is', buyPrice)\n\n plt.figure(figsize=[20,9])\n with plt.style.context('fivethirtyeight'):\n plt.plot(closeHist['Price(Monthly avg.)'], zorder=0)\n \n if verbose_mode:\n for i in np.arange(len(sellLine_list)):\n plt.plot(closeHist.index, sellLine_list[i],\n c='r', linestyle='dashed', \n alpha=sellLine_alpha[i])\n\n for i in np.arange(len(buyLine_list)):\n plt.plot(closeHist.index, buyLine_list[i],\n c='g', linestyle='dashed', \n alpha=buyLine_alpha[i])\n\n if len(sellLine_list) > 0:\n plt.plot(closeHist.index, sellLine_list[-1],\n c='r',\n alpha=1)\n \n if len(buyLine_list) > 0:\n plt.plot(closeHist.index, buyLine_list[-1],\n c='g', \n alpha=1) \n\n plt.scatter(MinSeries.index, \n MinSeries,\n c='r', s=50, zorder=10)\n plt.scatter(MaxSeries.index, \n MaxSeries,\n c='g', s=50, zorder=10)\n # plt.scatter(MaxMaxSeries.index, \n # MaxMaxSeries,\n # c='y', s=100, zorder=5)\n # plt.scatter(MinMinSeries.index, \n # MinMinSeries,\n # c='y', s=100, zorder=5)\n plt.title(\"Buy and Sell Lines for \"+ ticker, {'fontsize':20})\n plt.autoscale()\n num = closeHist['Price(Monthly avg.)'].min()\n Y_lim_min = math.floor(num / 10 ** math.floor(math.log10(num))) * 10 ** math.floor(math.log10(num))\n num = closeHist['Price(Monthly avg.)'].max()\n Y_lim_max = math.ceil(num / 10 ** math.floor(math.log10(num))) * 10 ** math.floor(math.log10(num))\n plt.ylim(0, Y_lim_max)#,Y_lim_max)\n plt.show()", "def single_plot(x_data, y_data, title, x_label, y_label):\n plt.figure(1, (18, 8)) # something, plot size\n plt.subplot(111)\n plt.plot(x_data, y_data)\n plt.title(title)\n plt.xlabel(x_label, fontsize=12)\n plt.ylabel(y_label, fontsize=12)\n # plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()", "def plot_series(self, dates, mean_series, year_series, std_series, savename):\n year = dates[0].year\n year_label = 'Discharge for '+str(year)\n dates2 = np.concatenate([dates,dates[::-1]])\n std2 = np.concatenate([std_series+mean_series,\\\n (mean_series-std_series)[::-1]])\n fig = plt.figure(figsize=(10,5))\n ax = fig.add_subplot(111)\n p1 = ax.plot(dates,mean_series,'-k', label = 'Mean Discharge')\n p3 = ax.fill(dates2,std2,facecolor = 'gray',label='Mean Variance')\n p2 = ax.plot(dates,year_series,'-r', label = year_label)\n ax.set_ylabel('$m^3/s$')\n ax.set_title('Brazos River Discharge Near Rosharon, TX')\n plt.ylim([0,max(year_series)+500])\n plt.legend(fontsize='x-small')\n idx = [i for i in range(dates.shape[0]) if (dates[i].day == 1)]\n dt_form = '%b'\n plt.xticks(dates[idx],[datetime.datetime.strftime(dates[i],dt_form) for i in idx])\n plt.savefig(savename)", "def plot(\n self,\n xscale=\"symlog\",\n xscale_linthresh=20,\n zoom=\"auto\",\n hlines=(),\n ):\n import matplotlib.pyplot as plt\n from matplotlib.colors import hsv_to_rgb\n\n ys = np.array(self.losses)\n xs = np.arange(ys.size)\n\n fig, ax = plt.subplots()\n ax.plot(xs, ys, \".-\")\n if xscale == \"symlog\":\n ax.set_xscale(xscale, linthresh=xscale_linthresh)\n else:\n ax.set_xscale(xscale)\n ax.set_xlabel(\"Iteration\")\n ax.set_ylabel(\"Loss\")\n\n if hlines:\n hlines = dict(hlines)\n for i, (label, value) in enumerate(hlines.items()):\n color = hsv_to_rgb([(0.1 * i) % 1.0, 0.9, 0.9])\n ax.axhline(value, color=color, ls=\"--\", label=label)\n ax.text(1, value, label, color=color, va=\"bottom\", ha=\"left\")\n\n if zoom is not None:\n if zoom == \"auto\":\n zoom = min(50, ys.size // 2)\n\n iax = ax.inset_axes([0.5, 0.5, 0.5, 0.5])\n iax.plot(xs[-zoom:], ys[-zoom:], \".-\")\n\n return fig, ax", "def plot(self):\r\n plt.title(\"Iris setosa (blue) vs iris versicolor (red)\")\r\n plt.xlabel(\"Sepal length\")\r\n plt.ylabel(\"Petal length\")\r\n plt.legend(loc='best')\r\n plt.show()", "def xaxis(self,label,units):\n if units != \"\": label = label + \" (\" + units + \")\"\n self.subplot.set_xlabel(label)\n pass", "def plot_store_item(df_sub):\n \n ax1 = df_sub['sales'].plot()\n ax1.set_xlabel('Date')\n ax1.set_ylabel('Sales')\n ax1.set_title('Sales of Store {} Item {}'\n .format(df_sub['store'][0], df_sub['item'][0]))\n plt.show()", "def plot(self, *args, **kwargs):\n\n n = len(args)\n\n self.fig, ax = plt.subplots(n,1)\n if 'title' in kwargs:\n self.fig.canvas.set_window_title(kwargs['title'])\n self.fig.suptitle(kwargs['title'], fontsize=11, fontweight='bold')\n if n == 1:\n ax.plot(self.vecs['time'], self.vecs[args[0]])\n ax.set_title('Time vs. ' + args[0].title())\n\n ax.set_ylabel(args[0].title())\n ax.set_xlim([self.vecs['time'][0], self.vecs['time'][-1]])\n\n else:\n for i in range(n):\n ax[i].plot(self.vecs['time'], self.vecs[args[i]])\n ax[i].set_title('Time vs. ' + args[i].title())\n ax[i].set_ylabel(args[i].title())\n ax[i].set_xlim([self.vecs['time'][0], self.vecs['time'][-1]])\n if i != (n-1):\n plt.setp(ax[i].get_xticklabels(), visible=False)\n else:\n ax[i].set_xlabel('Time')\n\n plt.tight_layout(h_pad=0.2)\n plt.subplots_adjust(top=0.85)\n plt.show()", "def show_line(dict, xlabel=\"x\", ylabel=\"y\", title=\"title\"):\n plt.clf()\n plt.cla()\n plt.plot(list(dict.keys()), list(dict.values()), alpha=0.4, color = 'g')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()", "def plot(self, title, series, x, y, setup=None, xlabel='Epochs', ylabel=None):\n hr_min = datetime.datetime.now().strftime(\"%I:%M\")\n timestamp = datetime.datetime.now().strftime(\"%A, %B %d, %Y %I:%M%p\")\n self.vis.text(\n f'<b>LAST UPDATED</b><br>{time}', env=self.env, win='last_updated')\n\n # if setup.expname != 'NoName':\n # title += f\" ({setup.expname})\"\n # if setup.has_suggestion:\n # title += f\" ({setup.sugg_id})\"\n #title += f\" (Phase {setup.phaser.idx}) \"\n\n # if setup.config.sigopt:\n # display_title = f\"{display_title}:{setup.sugg_id}\"\n # if setup.config.mode is not None:\n # display_title += f\" ({setup.config.mode})\"\n\n display_title = f\"{title} ({hr_min})\"\n\n if title in self.plots: # update existing plot\n self.vis.line(\n X=np.array([x]),\n Y=np.array([y]),\n env=self.env,\n win=self.plots[title],\n name=series,\n update='append'\n )\n else: # new plot\n self.plots[title] = self.vis.line(\n X=np.array([x, x]),\n Y=np.array([y, y]),\n env=self.env,\n opts={\n 'legend': [series],\n 'title': display_title,\n 'xlabel': xlabel,\n 'ylabel': ylabel,\n })\n #mlb.gray(\"[plotted to visdom]\")", "def visualize_time_series(fig_ax, data, inp_color, missing_data, lag_color, first_date,\n x_label=\"Number of Days\", y_label=\"Log of Aluminium Price\", title=\"Prices over time\"):\n fig, ax = fig_ax\n ((x_train_raw, y_train_raw), y_pred_list) = data\n\n missing_x, missing_y = missing_data\n is_missing = len(missing_x) != 0\n\n first_date = datetime.strptime(first_date, '%Y-%m-%d')\n\n convert_date = lambda x: [\n np.datetime64((first_date + timedelta(days=d)).strftime('%Y-%m-%d'))\n for d in x\n ]\n convert_price = lambda x: x[\"Output\"].to_list()\n\n x_train = convert_date(x_train_raw[\"Date\"].to_list())\n y_train = convert_price(y_train_raw)\n \n cut_point = x_train[-1]\n ax.plot(x_train, y_train, color=color[inp_color])\n\n for i, y_pred in enumerate(y_pred_list):\n data, plot_name, color_code, is_bridge = y_pred\n mean_pred, x_test_raw = data[\"mean\"], data[\"x\"]\n x_test = convert_date(x_test_raw)\n\n if i == 0 and is_missing:\n missing_x = convert_date(missing_x)\n ax.axvline(x_test[0], color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n ax.plot([missing_x[-1], x_test[0]], [missing_y[-1], mean_pred[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvspan(cut_point, x_test[0], color=color[lag_color], alpha=0.1)\n\n plot_bound(ax, data, x_test, color[color_code], plot_name)\n\n if is_bridge and (not is_missing): \n ax.plot([x_train[-1], x_test[0]], [y_train[-1], mean_pred[0]], color[color_code], linewidth=1.5)\n\n if is_missing:\n ax.plot(missing_x, missing_y, color=color[lag_color], linestyle=\"dashed\")\n ax.plot([x_train[-1], missing_x[0]], [y_train[-1], missing_y[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvline(cut_point, color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n else:\n ax.axvline(cut_point, color=color[\"k\"], linestyle='--')\n\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.legend()\n\n # ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n\n # ax.set_xlim(left=cut_point-np.timedelta64(1, 'm'))\n plot_axis_date(ax, x_train + missing_x + x_test)\n ax.grid()\n return fig, ax", "def ploter(self):\n if len(self.dataset[self.first_title]) != 2:\n print('plot is only avilable for two features')\n return\n x_axis = []\n y_axis = []\n for title in self.dataset:\n x_axis.append(self.dataset[title][0])\n y_axis.append(self.dataset[title][1])\n plt.plot(x_axis, y_axis, 'o')\n plt.show()", "def generate_history_plot(data, labels_dict, file_title, plot_title):\n fig = plt.figure()\n ax = sns.histplot(data)\n\n if labels_dict:\n ax.set_xlabel(labels_dict[\"x\"])\n if plot_title:\n ax.set_title(plot_title)\n\n plt.savefig(file_title)", "def raw_time_series():\n symbol, company, stock_df = get_csv_data()\n price = input(\"\"\"\n Please pick your pricing measure (Open, High, Low, Close)\n Please enter a valid price: \"\"\") \n #Close = stock_df.Close\n #Date = stock_df.Date\n \n # print(type(stock_df))\n # print(stock_df)\n \n #plots the user specified price measure against date\n plot = stock_df.plot(x='Date', y=price)\n\n #renaming the details of the graph\n plot.set_xlabel(\"Date\")\n plot.set_ylabel('Price at ' + price)\n plot.set_title('Raw time series for ' + company)\n plot.ticklabel_format(axis = 'y', style = 'plain')\n plt.show()\n\n #code for following graphs was adapted from https://towardsdatascience.com/an-end-to-end-project-on-time-series-analysis-and-forecasting-with-python-4835e6bf050b\n matplotlib.rcParams['axes.labelsize'] = 14\n matplotlib.rcParams['xtick.labelsize'] = 12\n matplotlib.rcParams['ytick.labelsize'] = 12\n matplotlib.rcParams['text.color'] = 'k'\n \n #Converting Date to datetime so that it works for the operations below\n stock_df['Date'] = pd.to_datetime(stock_df.Date)\n #stock_df.index = stock_df['Date'] \n \n #Removes the other columns from the dataframe\n data = stock_df.filter(['Date',price])\n\n #setting the data as the index so that it can be plotted against 'price'\n data = data.set_index('Date')\n \n #returns the monthly average 'price'\n y = data[price].resample('MS').mean()\n \n #renaming details of the graph\n plot2 = y.plot(figsize=(15, 6))\n plot2.set_xlabel(\"Date\")\n plot2.set_ylabel('Price at ' + price)\n plot2.set_title('Average monthly ' + price + ' price for ' + company)\n plt.show()\n \n #decomposition graphs from the link above\n rcParams['figure.figsize'] = 18, 8\n decomposition = sm.tsa.seasonal_decompose(y, model='additive')\n fig = decomposition.plot()\n #fig.set_title('Time series decompisition for ' + company)\n plt.show()", "def plot_one_axes(self, fig_num: int, title: str, y_label: str, raw: np.ndarray, smoothed: np.ndarray,\n legend_entries: Sequence[str]) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n ax = fig.subplots(1, 1)\n raw_lines = kine_graph_init(ax, raw, y_label, self.frame_nums, [{'ls': ':', 'lw': 2}] * 3)\n ax.set_prop_cycle(None)\n smoothed_lines = kine_graph_add(ax, smoothed, self.frame_nums, [{'ls': '-'}] * 3)\n plt.tight_layout()\n fig.suptitle(title, x=0.7)\n legend_text = ('Raw (' + legend_entries[0] + ')', 'Smoothed (' + legend_entries[1] + ')',\n 'Smoothed (' + legend_entries[2] + ')')\n fig.legend((raw_lines[0], smoothed_lines[1], smoothed_lines[2]), legend_text, ncol=3, handlelength=0.75,\n handletextpad=0.25, columnspacing=0.5, loc='lower left')\n make_interactive()\n return fig", "def plot_changes(stock, data):\n data['Date'] = pd.to_datetime(data['Date']).dt.date\n x = np.arange(0,len(data))\n fig, ax = plt.subplots()\n ax.plot(x,data['Change_Norm'])\n ax.plot(x, data['Mentions_Norm'])\n ax.legend([F\"{stock} Daily Change\", f\"{stock} Mentions\"])\n plt.ylabel('Normalized Value')\n plt.xlabel('Date')\n ax.set_xticklabels(data['Date'])\n plt.xticks(np.arange(0,len(x), 10), data['Date'][::10])\n plt.title(f\"Mentions and Daily Stock Change for {stock} normalized\\n between {data['Date'][0]} and {data['Date'][len(data)-1]}\")\n fig.tight_layout()\n plt.show(block=False)\n plt.pause(2)\n plt.close()\n # Save figure to file\n figname = f\"figures/{stock}_Mentions_Changes\"\n fig.savefig(figname)", "def plot_full_values(stock, data):\n data['Date'] = pd.to_datetime(data['Date']).dt.date\n x = np.arange(0,len(data))\n fig, ax = plt.subplots()\n ax.plot(x, data['Mentions'])\n ax.legend([f\"{stock} Mentions\"])\n plt.ylabel('Number of Mentions')\n plt.xlabel('Date')\n ax.set_xticklabels(data['Date'])\n plt.xticks(np.arange(0,len(x), 10), data['Date'][::10])\n plt.title(f\"Mentions for {stock} \\n between {data['Date'][0]} and {data['Date'][len(data)-1]}\")\n fig.tight_layout()\n plt.show(block=False)\n plt.pause(2)\n plt.close()\n # Save figure to file\n figname = f\"figures/{stock}_Mentions\"\n fig.savefig(figname)", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def plotData(data, xLabel, yLabel, plotTitle, save=False, saveName=None):\n fig, ax = plt.subplots()\n ax.plot(data)\n ax.set(xlabel=xLabel, ylabel=yLabel,\n title=plotTitle)\n ax.grid()\n if save:\n if saveName is not None:\n fig.savefig(saveName)\n else:\n fig.savefig(\"figure\")\n plt.show()", "def xyplot(x_vals,y_vals,name):\n # set the figure's size\n set_figsize(figsize=(5, 2.5))\n # detach() is used to get a variable from the current calculation graph\n # in which this variable is the not gradient tracking version\n plt.plot(x_vals.detach().numpy(), y_vals.detach().numpy())\n # set the constant x axis label\n plt.xlabel('x')\n # combine and set the y axis label\n plt.ylabel(name+'(x)')\n plt.show()", "def plot1D(x, y, title=\"Title\", xlab=\"x-axis\", ylab=\"y-axis\"):\n plt.plot(x, y, linewidth=2)\n plt.title(title)\n plt.xlabel(xlab)\n plt.ylabel(ylab)", "def scatter_error_plot(y_true, y_predict, datelist,\n xlab='Dates', ylab='Units sold', title='Error analysis',\n ticks_separation='weeks'):\n plt.style.use('seaborn')\n\n #create plot\n fig=plt.figure(figsize=(15,10))\n \n #plot things\n plt.plot(datelist,y_true, label=r'True Values' ,\n linestyle='--', linewidth=2)\n plt.plot(datelist,y_predict, label=r'Predicted Values',\n linestyle='--', linewidth=2)\n plt.scatter(datelist,y_true)\n plt.scatter(datelist,y_predict)\n \n #labels\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.title(title)\n \n #set ticks every week\n if ticks_separation == 'days':\n plt.gca().xaxis.set_major_locator(matplotlib.dates.DayLocator())\n \n elif ticks_separation == 'weeks':\n plt.gca().xaxis.set_major_locator(matplotlib.dates.WeekdayLocator())\n \n elif ticks_separation == 'months':\n plt.gca().xaxis.set_major_locator(matplotlib.dates.MonthLocator())\n \n elif ticks_separation == 'days':\n plt.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())\n\n \n #set week format\n plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))\n \n \n plt.legend(loc='best')\n \n #increase all text\n ax=plt.gca()\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels() + ax.legend().get_texts()):\n item.set_fontsize(18)\n \n \n return fig", "def graph(self):\n seq_obj = MultiSequence(self.symbol, self.__best_model.window_size,1)\n test_predict = self.__best_model.model.predict(seq_obj.X)\n\n #our data is scaled between -1 and 1 so lets scale it back up\n scaler = MinMaxScaler(feature_range=(self.__min_price ,self.__max_price))\n orig_data = seq_obj.original_data.reshape(-1,1)\n orig_prices = scaler.fit_transform(orig_data).flatten()\n \n # plot actual prices\n plt.plot(orig_prices, color='k')\n \n # plot test set prediction after scaling back up\n length = len(seq_obj.X) + self.__best_model.window_size \n test_in = np.arange(self.__best_model.window_size,length,1)\n pred_prices = scaler.fit_transform(test_predict.reshape(-1,1)).flatten()\n plt.plot(test_in,pred_prices,color = 'b')\n \n # pretty up graph\n plt.xlabel('day')\n plt.ylabel('Closing price of stock')\n plt.title(\"Price prediction for {}\".format(self.symbol))\n plt.legend(['Actual','Prediction'],loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()", "def plotprice(self):\n plt.figure()\n plt.hist( self.pricetree[-1,:] )\n plt.title(\"price Distribution\") \n plt.show()", "def plot(self, num_levels=10,**kwargs):\n Schrodinger1D.plot(self, num_levels,**kwargs)\n xlabel('$\\delta/2\\pi$')\n ylabel('E/h (GHz)')\n ylim(min(self.fluxonium_potential()),min(2*self.energies(num_levels)[-1],max(self.fluxonium_potential())))\n title('(Ej=%.2f , El=%.2f , Ec=%.2f)GHz , ($\\Phi_J=%.2f $,$\\Phi_L=%.2f \\,) \\Phi_0$ ' % (self.Ej, self.El, self.Ec, self.phi,self.phiL))", "def add_title(self, title, x_title = \"\", y_title = \"\"):\n self._fig.update_layout(title_text = title,\n xaxis_title = x_title,\n yaxis_title = y_title)", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def plot_series_and_differences(series, ax, num_diff, title=''):\n plt.xticks(rotation=40)\n ax[0].plot(series.index, series)\n ax[0].set_title('Raw series: {}'.format(title))\n ax[0].set_xticklabels(labels=series.index.date, rotation=45)\n for i in range(1, num_diff+1):\n diff = series.diff(i)\n ax[i].plot(series.index, diff)\n ax[i].set_title('Difference # {}'.format(str(i)))\n ax[i].set_xticklabels(labels=series.index.date, rotation=45)", "def plot_linear_trend(ax, series, title='', xlabel='', ylabel=''):\n linear_trend = fit_linear_trend(series)\n plot_trend_data(ax, title, series)\n ax.plot(series.index, linear_trend)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)", "def set_title(self):\n plt.title(label=self.title, fontsize=self.titlesize)", "def plot(self, num_levels=10,**kwargs):\n Schrodinger1D.plot(self, num_levels,**kwargs)\n xlabel('$\\delta/2\\pi$')\n ylabel('E/h (GHz)')\n ylim(min(self.flux_qubit_potential()),min(2*self.energies(num_levels)[-1],max(self.flux_qubit_potential())))\n title('Ej=%.2f GHz, El=%.2f GHz, Ec=%.2f GHz, $\\Phi=%.2f \\, \\Phi_0$' % (self.Ej, self.El, self.Ec, self.phi))", "def plot(self, ax=..., *, name=..., **kwargs):\n ...", "def simple_plot(self):\n for i in np.arange(len(self.e2)):\n self.ax.plot(self.e2[i], 'o', label=self.labels[i])", "def plot_kine_var(self, fig_num: int, title: str, y_labels: Sequence[str], prev_filled: np.ndarray,\n smoothed: np.ndarray, filled: np.ndarray, sfs: np.ndarray) -> matplotlib.figure.Figure:\n fig = plt.figure(fig_num)\n ax = fig.subplots(3, 1)\n prev_filled_lines = marker_graph_init(ax, prev_filled, '', self.frame_nums, color='red')\n smoothed_lines = marker_graph_add(ax, smoothed, self.frame_nums, color='blue')\n smoothed_filled_lines = marker_graph_add(ax, filled, self.frame_nums, ls=':', lw=2, color='green')\n sfs_lines = marker_graph_add(ax, sfs, self.frame_nums, color='green')\n for idx, sub_ax in enumerate(ax):\n plot_utils.update_ylabel(sub_ax, y_labels[idx], font_size=10)\n sub_ax.axvline(self.vicon_endpts[0])\n sub_ax.axvline(self.vicon_endpts[1])\n sub_ax.set_xlim(left=1)\n plt.tight_layout()\n fig.suptitle(title)\n fig.legend((prev_filled_lines[0], smoothed_lines[0], smoothed_filled_lines[0], sfs_lines[0]),\n ('Prev Filled', 'Smoothed', 'Smoothed/Filled', 'SFS'),\n ncol=4, handlelength=0.75, handletextpad=0.25, columnspacing=0.5, loc='lower left')\n make_interactive()\n return fig", "def setFigureTitle(self, title: str):\n self.fig.text(0.5, 0.99, title,\n horizontalalignment='center',\n verticalalignment='top',\n fontsize='small')\n self.draw()", "def liveplot(x, y, xlim, ylim, title):\n plt.plot(x,y,'b.')\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.xlabel('North-South Axis')\n plt.ylabel('East-West Axis')\n plt.title(title)\n plt.show()", "def plot_xy(x, y, ax=None, xlabel='Energy [keV]', **kwargs):\n\n if not ax:\n new_plot = True\n plt.figure()\n ax = plt.axes()\n else:\n new_plot = False\n\n plt.semilogy(x, y, axes=ax, drawstyle='steps-mid', **kwargs)\n\n if new_plot:\n plt.xlabel(xlabel)\n plt.ylabel('Counts')\n\n if 'label' in kwargs:\n plt.legend()\n plt.show()\n\n return ax", "def plot(self, **kwargs):\n if self.order != None:\n name = str(_constructModelName(self.teff, self.logg, \n self.metal, self.en, self.order, self.path))\n output = kwargs.get('output', str(name) + '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', \n alpha=.8, linewidth=1, label=name)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim) \n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()\n\n else:\n output = kwargs.get('output'+ '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', alpha=.8, linewidth=1)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim)\n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()", "def __plot( dict1, title):\n plt.title(title)\n plt.xlabel(\"Run Time (microsecondsS)\")\n plt.ylabel(\"Number of Items Retreived\")\n plt.ylim(0, max(dict1.values()))\n plt.xlim(min(dict1.keys()), max(dict1.keys()) )\n x1, y1 = zip(*dict1.items())\n\n plt.scatter(x1, y1)\n plt.show()", "def hogg_lim_and_label():\n plt.xlim(-20., 1020.)\n plt.xlabel(\"time (d)\")\n plt.ylim(-20., 20.)\n plt.ylabel(\"radial velocity (m\\,s$^{-1}$)\")\n return None", "def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)", "def comparison_plot(X, Y, title):\n linreg = scipy.stats.linregress(X, Y)\n\n x_txt = rel_pos(X, 0.95)\n y_txt = rel_pos(Y, 0)\n\n plt.scatter(X, Y)\n plt.xlabel('B-S')\n plt.ylabel('S-B')\n if title == 'RouteScore':\n plt.xscale('log')\n plt.yscale('log')\n if len(set(X)) > 1 or len(set(Y)) > 1:\n plt.text(x_txt, y_txt,\n f'slope = {round(linreg.slope, 2)}\\n$R^{2}$ = {round(linreg.rvalue,2)}',\n horizontalalignment='right',\n verticalalignment='bottom')\n plt.xlim(0.95 * min(X), 1.05 * max(X))\n plt.ylim(0.95 * min(Y), 1.05 * max(Y))\n plt.savefig(os.path.join(FIG_DIR, f'BSvsSB {title}.png'))\n plt.show()", "def plot_data(self, symbols, columns, start_date=None, end_date=None):\n # TODO plot stock data for given symbols, maybe on a single graph\n pass", "def plot(self, data_frame):\n self.axes.plot(data_frame, 'o-')\n self.axes.set_ylim(0.0, 200.0)\n self.fig.autofmt_xdate()\n self.draw()", "def _plotDisplay(self):\n self.gc.tick_labels.set_xformat('ddd')\n self.gc.tick_labels.set_yformat('ddd')\n if self.csys == 'GAL':\n if self.xlabel is None: self.xlabel = r'Galactic longitude $l$ $(^{\\circ})$'\n if self.ylabel is None: self.ylabel = r'Galactic latitude $b$ $(^{\\circ})$'\n else:\n if self.xlabel is None: self.xlabel = r'RA (J2000)'\n if self.ylabel is None: self.ylabel = r'Dec (J2000)'\n self.gc.axis_labels.set_xtext(self.xlabel)\n self.gc.axis_labels.set_ytext(self.ylabel)\n self.gc.set_axis_labels_font(size=self.ftsize1)\n self.gc.tick_labels.set_font(size=self.ftsize2) # <====== perhaps a string here?\n self.gc.ticks.set_color('black')", "def plot_figure(title: str, plot_data_list: List[PlotData], **fig_kw) -> Figure:\n fig = plt.figure(**fig_kw)\n for plot_data in plot_data_list:\n plt.plot(plot_data.timestamps, plot_data.feerates, label=plot_data.label)\n \n min_timestamp = min(min(plot_data.timestamps) for plot_data in plot_data_list)\n max_timestamp = max(max(plot_data.timestamps) for plot_data in plot_data_list)\n min_feerate = min(min(plot_data.feerates) for plot_data in plot_data_list)\n max_feerate = max(max(plot_data.feerates) for plot_data in plot_data_list)\n # graph config\n plt.legend(loc=\"best\")\n plt.title(title)\n \n xticks = np.linspace(start=min_timestamp, stop=max_timestamp, num=5)\n yticks = np.linspace(start=min_feerate, stop=max_feerate, num=5)\n \n timestamp_to_date_str = lambda t: datetime.utcfromtimestamp(t).strftime('%Y-%m-%d')\n plt.xticks(\n ticks=xticks,\n labels=[timestamp_to_date_str(t) for t in xticks]\n )\n \n plt.yticks(ticks=yticks)\n \n return fig", "def plot(self, plotaxis=None, xlabel='', ylabel='', description='', **kwargs):\n if not ylabel:\n ylabel = 'Quantum EFficiency'\n if not description:\n description = \"detector %s\" % self.meta.instrument.detector\n super(MiriQuantumEfficiency, self).plot(plotaxis=plotaxis, xlabel=xlabel,\n ylabel=ylabel, description=description,\n **kwargs)", "def plot(data,fig,figure_number = 1,xlabel='',ylabel='',title=''):\n fig.add_subplot(1,2,figure_number)\n plt.title(title)\n plt.xlabel(xlabel)\n # only plot the y label if ipython is True, else the plot looks to cramped\n if is_ipython:\n plt.ylabel(ylabel)\n plt.plot(np.array(data),'orange')\n plt.ylim((0,1))\n if not is_ipython:\n plt.pause(0.000001) # pause a bit so that plots are updated", "def plot(nSamples, expectations, variances, exactE, exactVar, title=\"\"):\n fig, axes = plt.subplots(2, 1)\n\n axes[0].semilogx(nSamples, expectations, color=\"black\", label=\"Empirical E(X)\")\n axes[0].axhline(exactE, color=\"darkgray\", label=\"Exact E(X)\")\n\n axes[1].semilogx(nSamples, variances, color=\"black\", label=\"Empirical Var(X)\")\n axes[1].axhline(exactVar, color=\"darkgray\", label=\"Exact Var(X)\")\n\n for ax in axes:\n ax.set_xlabel(\"N draws in the simulation\")\n ax.legend()\n axes[0].set_ylabel(\"E(X)\")\n axes[0].set_title(title)\n axes[1].set_ylabel(\"Var(X)\")\n plt.show()", "def plot_time_series(\n data,\n y_keys,\n x_name,\n title,\n name=None,\n y_names=None,\n plot_width=PLOT_WIDTH,\n):\n if y_names is None:\n y_names = [str(key) for key in y_keys]\n\n plot = create_styled_figure(title=title, name=name, plot_width=plot_width)\n # this ensures that the y range spans at least 0.1\n plot.y_range.range_padding = Y_RANGE_PADDING\n plot.y_range.range_padding_units = Y_RANGE_PADDING_UNITS\n\n colors = get_colors(\"categorical\", len(y_keys))\n\n legend_items = []\n for color, y_key, y_name in zip(colors, y_keys, y_names):\n if len(y_name) <= 35:\n label = y_name\n else:\n label = \"...\" + y_name[-32:]\n line_glyph = plot.line(\n source=data,\n x=x_name,\n y=y_key,\n line_width=2,\n color=color,\n muted_color=color,\n muted_alpha=0.2,\n )\n legend_items.append((label, [line_glyph]))\n legend_items.append((\" \" * 60, []))\n\n tooltips = [(x_name, \"@\" + x_name)]\n tooltips += [(\"param_name\", y_name), (\"param_value\", \"@\" + y_key)]\n hover = HoverTool(renderers=[line_glyph], tooltips=tooltips)\n plot.tools.append(hover)\n\n legend = Legend(\n items=legend_items,\n border_line_color=None,\n label_width=100,\n label_text_font_size=LEGEND_LABEL_TEXT_FONT_SIZE,\n spacing=LEGEND_SPACING,\n )\n legend.click_policy = \"mute\"\n plot.add_layout(legend, \"right\")\n\n return plot", "def plot2dTimeSeries(values, title='series', xLabel='time', yLabel='values', savePath='.'):\n plt.plot(values)\n plt.ylabel(yLabel)\n plt.xlabel(xLabel)\n plt.xticks(np.linspace(0, len(values), 11))\n plt.title(title)\n plt.savefig(f'{savePath}/{title}.png')\n plt.show(block=False)\n plt.pause(2)\n plt.close()", "def render_axis_labels(self, axes=None):\n axes = plt if axes is None else axes\n if self.x_label is not None:\n axes.set_xlabel(self.x_label)\n if self.y_label is not None:\n axes.set_ylabel(self.y_label)", "def xaxis(self,label,units):\r\n if units != \"\": label = label + \" (\" + units + \")\"\r\n self.xbox.set_text(r\"$%s$\" % (label))\r\n pass", "def plot(df, saveName=None, extrem=None,\r\n axeslabel_fontsize=10., title_fontsize=20., axesvalues_fontsize=10., annotation_fontsize=10., legend_fontsize=8.):\r\n\r\n print \"plotting timeseries data...\"\r\n fig = plt.figure(tight_layout=True)\r\n \r\n ax = fig.add_subplot(111)\r\n df.plot(colormap=\"jet_r\", ax=ax, marker='x', title=\"Farge: Measured water level in observation wells and river Weser\")\r\n\r\n\r\n if extrem:\r\n print \"plotting low-high tide scatter data...\"\r\n # if we have extrem.... we want to plot them with same color\r\n handles, labels = ax.get_legend_handles_labels()\r\n colors = list()\r\n for h in handles:\r\n colors.append(h.get_color())\r\n if len(colors) != len(extrem):\r\n raise IndexError(\"Number of hydrographs do not correspond to number of passed extrem. Cannot get proper colors. Do hardcode quickly\")\r\n i = 0\r\n for a, c in zip(extrem, colors):\r\n i += 1\r\n print \"\\t>>> {0}/{1}\".format(i, len(extrem))\r\n for item, marker in zip(a, ['o', 's']): # a = list( hightide, lowtide)\r\n item.plot(x='datetime', y='y', ax=ax, marker=marker, lw=2., style='.', markeredgecolor='black', markeredgewidth=0.4, color=c, legend=False)\r\n\r\n #ax.set_xlim([datetime.date(2015, 1, 26), datetime.date(2015, 1, 30)])\r\n handles, labels = ax.get_legend_handles_labels()\r\n ax.legend(handles[0:7], labels[0:7], fontsize=legend_fontsize)\r\n ax.grid(True, which='major')\r\n ax.set_title(\"Measured water level in observation wells and river Weser\", fontsize=title_fontsize)\r\n ax.set_ylabel(\"m AMSL\", fontsize=axeslabel_fontsize)\r\n ax.set_xlabel(\"\", fontsize=axeslabel_fontsize)\r\n ax.tick_params(axis='both', which=\"both\", labelsize=axesvalues_fontsize)\r\n\r\n\r\n #figManager = plt.get_current_fig_manager()\r\n #figManager.window.showMaximized()\r\n\r\n if saveName:\r\n fig.savefig(saveName, dpi=300, tight_layout=True, format='pdf')\r\n print 'saving figure... :', saveName\r\n plt.show()", "def universal_plot_labels(fig, xlabel, ylabel):\n fig.add_subplot(111, frameon=False)\n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.tight_layout()", "def labelling():\n title = input('Please enter the title of output figure: ')\n plt.title(title)\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Voltage (V)\")\n plt.legend()\n plt.savefig(\"Scope_%s\"%str(round(time.time()))) # Time stamp on file names\n plt.show()", "def SPXVIXPlot(self, startDate, endDate):\n\t\tstartDate = pd.Timestamp(startDate)\n\t\tendDate = pd.Timestamp(endDate)\n\t\tSPXIndex = (startDate < self.SPXHist.index) & (self.SPXHist.index < endDate)\n\t\tVIXIndex = (startDate < self.VIXHist.index) & (self.VIXHist.index < endDate)\n\t\tSPXVIXHist = pd.DataFrame({\n\t\t\t'SPX': self.SPXHist['Close'][SPXIndex],\n\t\t\t'VIX': self.VIXHist['Close'][VIXIndex]})\n\t\tSPXVIXHist.fillna(method = 'ffill')\n\t\t\n\t\t\n\t\tplt.figure()\n\t\tSPXVIXHist.plot(subplots = True, grid = True, \n\t\t\tstyle = 'b', figsize = (8, 8), colormap=plt.cm.jet)\n\t\tplt.xlabel('date')\n\t\tplt.tight_layout()\n\t\tif os.path.exists('../data_graphs/spx_vix_time_series.pdf'):\n\t\t\tos.remove('../data_graphs/spx_vix_time_series.pdf')\n\t\tplt.savefig('../data_graphs/spx_vix_time_series.pdf')\n\n\t\tplt.figure()\n\t\tSPXVIXReturn = np.log(SPXVIXHist / SPXVIXHist.shift(1))\n\t\tSPXVIXReturn = SPXVIXReturn.ix[1:]\n\t\txDat = SPXVIXReturn['SPX']\n\t\tyDat = SPXVIXReturn['VIX'] \n\t\tmodel = sm.OLS(yDat, xDat)\n\t\t#model = sm.OLS(yDat, add_constant(xDat))\n\t\t#xAjdDat = xDat[(xDat < 0.05)][(xDat > -0.05)]\n\t\t#yAdjDat = yDat[(xDat < 0.05)][(xDat > -0.05)]\n\t\t#model = sm.OLS(yAdjDat, xAjdDat)\n\t\tres = model.fit()\n\t\t#print res.summary()\n\n\t\tplt.plot(xDat, yDat, 'r.')\n\t\tax = plt.axis()\n\t\tx = np.linspace(ax[0], ax[1] - 0.00001)\n\t\tyPred = res.predict(x)\n\t\tplt.plot(x, yPred, 'b', lw = 2)\n\t\tplt.grid(True)\n\t\tplt.xlabel('SPX returns')\n\t\tplt.ylabel('VIX returns')\n\t\tif os.path.exists('../data_graphs/spx_vix_returns.pdf'):\n\t\t\tos.remove('../data_graphs/spx_vix_returns.pdf')\n\t\tplt.savefig('../data_graphs/spx_vix_returns.pdf')", "def plot_strategy(ticker, start, end, df):\r\n\r\n fig = plt.figure(figsize=(20, 10))\r\n ax1 = plt.plot(df)\r\n ax1 = plt.title(\"Comparing simple investment strategies for \" +\r\n ticker + \" between \" + start + \" and \" + end, fontsize=22)\r\n ax1 = plt.xlabel(\"Date\", fontsize=18)\r\n ax1 = plt.ylabel(\"Price\", fontsize=18)\r\n ax1 = plt.legend(list(df_return_of_strategy.columns), prop={\"size\": 22}, loc=\"upper left\")\r\n plt.grid(True)\r\n plt.show()", "def plot_strategy(ticker, start, end, df):\r\n\r\n fig = plt.figure(figsize=(20, 10))\r\n ax1 = plt.plot(df)\r\n ax1 = plt.title(\"Comparing simple investment strategies for \" +\r\n ticker + \" between \" + start + \" and \" + end, fontsize=22)\r\n ax1 = plt.xlabel(\"Date\", fontsize=18)\r\n ax1 = plt.ylabel(\"Price\", fontsize=18)\r\n ax1 = plt.legend(list(df_return_of_strategy.columns), prop={\"size\": 22}, loc=\"upper left\")\r\n plt.grid(True)\r\n plt.show()", "def plot(ns, ts, label, color='blue', exp=1.0):\n tfit = fit(ns, ts, exp)\n pyplot.plot(ns, tfit, color='0.7', linewidth=2, linestyle='dashed')\n pyplot.plot(ns, ts, label=label, color=color, linewidth=3)", "def set_up(self):\n self.h, = self.ax.plot(self.x, lw=2)\n self.ax.set_ylim(0,100)\n self.ax.set_xlim(0,100)\n self.ax.title.set_text(self.config[\"title\"])\n self.ax.set_xlabel(self.config[\"x_label\"])\n self.ax.set_ylabel(self.config[\"y_label\"])" ]
[ "0.731029", "0.7286386", "0.68272114", "0.6824816", "0.6797167", "0.67936105", "0.6715498", "0.6569525", "0.64421463", "0.6384674", "0.63568515", "0.634274", "0.63376296", "0.6326378", "0.62356347", "0.62333536", "0.6199969", "0.6193358", "0.6185668", "0.618083", "0.61708236", "0.6158279", "0.6117646", "0.6098429", "0.6090432", "0.6079716", "0.6077552", "0.6062381", "0.60202324", "0.6009401", "0.60009545", "0.5994758", "0.59714776", "0.5944389", "0.59338385", "0.59336185", "0.5896496", "0.58746064", "0.5854995", "0.5815292", "0.581492", "0.58081657", "0.5784671", "0.57806665", "0.57805955", "0.57751006", "0.57676566", "0.5764686", "0.5764449", "0.5757302", "0.5755269", "0.5751235", "0.5747838", "0.5745023", "0.5744973", "0.5742863", "0.5737169", "0.5732379", "0.5726774", "0.5725067", "0.5724546", "0.57146764", "0.5708125", "0.5708099", "0.57027966", "0.5697726", "0.5686398", "0.5684591", "0.56625766", "0.5651384", "0.5638491", "0.5631748", "0.5625324", "0.5614471", "0.5611791", "0.5607248", "0.56026274", "0.5599349", "0.55902493", "0.55848736", "0.5582856", "0.55778617", "0.5573098", "0.55645585", "0.55622715", "0.55605376", "0.55554825", "0.55530286", "0.55511326", "0.5548941", "0.5547871", "0.5537343", "0.5531925", "0.55280703", "0.5526828", "0.55202353", "0.5516091", "0.5516091", "0.55136573", "0.55122566" ]
0.59642863
33
Creates a SnowflakeSource object.
def __init__( self, name: Optional[str] = None, database: Optional[str] = None, schema: Optional[str] = None, table: Optional[str] = None, query: Optional[str] = None, event_timestamp_column: Optional[str] = "", created_timestamp_column: Optional[str] = "", field_mapping: Optional[Dict[str, str]] = None, date_partition_column: Optional[str] = "", ): if table is None and query is None: raise ValueError('No "table" argument provided.') # If no name, use the table as the default name _name = name if not _name: if table: _name = table else: raise DataSourceNoNameException() super().__init__( _name, event_timestamp_column, created_timestamp_column, field_mapping, date_partition_column, ) # The default Snowflake schema is named "PUBLIC". _schema = "PUBLIC" if (database and table and not schema) else schema self.snowflake_options = SnowflakeOptions( database=database, schema=_schema, table=table, query=query )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_proto(data_source: DataSourceProto):\n return SnowflakeSource(\n field_mapping=dict(data_source.field_mapping),\n database=data_source.snowflake_options.database,\n schema=data_source.snowflake_options.schema,\n table=data_source.snowflake_options.table,\n event_timestamp_column=data_source.event_timestamp_column,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n query=data_source.snowflake_options.query,\n )", "def create_from_source(self):\n create_statement = self.source.create_statement\n self.create_from_statement(create_statement)\n # Add constraints\n constraints = self.source.constraints\n self.add_constraints(constraints)\n\n # Add indexes\n indexes = self.source.indexes\n self.add_indexes(indexes)\n\n # Add the non-referenced foreign keys\n non_referenced_fks = [x for x in self.source.foreign_keys if not x.referenced]\n self.add_foreign_keys(non_referenced_fks, override_table=self.name)", "def create(self, saved_source_id):\n raw_saved_source_data = self.es.get(index='.kibana', doc_type='doc', id=saved_source_id)\n\n saved_source_type = raw_saved_source_data['_source']['type']\n\n if saved_source_type == 'search':\n return SavedSearch(saved_source_id, self.conf)\n elif saved_source_type == 'visualization':\n return SavedVisualization(saved_source_id, self.conf)\n else:\n # TODO: Raise some exception.\n pass", "def source():\n\n source = models.Source(name=u\"Joe's Funerals.com\", url=u\"http://www.joesfunerals.com\")\n return source", "def new():\n source = os.environ.get('EVENT_SOURCE', 'commandline')\n if source not in ['filesystem', 'kinesis', 'environment', 'commandline']:\n eprint('docker-lambda.source.unknown source=%s' % source)\n sys.exit(1)\n else:\n log('docker-lambda.source.selected source=%s' % source)\n if source == 'filesystem':\n source = FilesystemEventSource()\n elif source == 'kinesis':\n source = KinesisEventSource()\n elif source == 'environment':\n source = EnvironmentEventSource()\n elif source == 'commandline':\n source = CommandLineEventSource()\n return source", "def create_project_funding_source(cls, name, description):\n project_funding_source = ProjectFundingSource.objects.create(\n name=name,\n description=description,\n )\n return project_funding_source", "def new_source(self, name):\n params = {\"name\": name}\n return JSONRPCRequest(self, \"newSource\", params)", "def new(name, source):", "def SphinxDummySourceClass(source: Any, *args: Any, **kwargs: Any) -> Any:\n return source", "def create_source(cls, data):\n class SourceOutput(OutputPort):\n\n \"\"\"A port attached to a source task.\"\"\"\n\n name = '0'\n description = str(data)\n\n def emits(self):\n \"\"\"Return the type of the provided datum.\"\"\"\n return type(data)\n\n class Source(Task):\n\n \"\"\"Generated source task.\"\"\"\n\n output_ports = {'0': SourceOutput}\n\n def get_input_data(self, name='0'):\n \"\"\"Return the datum associated with this source.\"\"\"\n return data\n\n def run(self, *arg, **kw):\n \"\"\"Do nothing.\"\"\"\n super(Source, self).run(*arg, **kw)\n self._output_data['0'] = data\n\n return Source()", "def test_project_funding_source_creation(self):\n name = 'A project function source name'\n description = 'A project funding source description'\n project_funding_source = self.create_project_funding_source(\n name=name,\n description=description,\n )\n self.assertTrue(isinstance(project_funding_source, ProjectFundingSource))\n self.assertEqual(project_funding_source.__str__(), project_funding_source.name)\n self.assertEqual(project_funding_source.name, name)\n self.assertEqual(project_funding_source.description, description)", "def _set_source(source, context):\n if isinstance(source, (str, list, dict, Dataset)):\n return Source(source, context)\n elif isinstance(source, Source):\n return source\n else:\n raise ValueError('Wrong source')", "def alert_source_create(context, values):\n alert_source_ref = models.AlertSource()\n alert_source_ref.update(values)\n\n session = get_session()\n with session.begin():\n session.add(alert_source_ref)\n\n return _alert_source_get(context,\n alert_source_ref['storage_id'],\n session=session)", "def create_source(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_source_with_http_info(**kwargs)\n else:\n (data) = self.create_source_with_http_info(**kwargs)\n return data", "def init_celeste_source_from_df(self, celestedf_row, is_star=None):\n if is_star in [True, False]:\n celestedf_row.is_star = is_star\n params = du.celestedf_row_to_params(celestedf_row)\n src = self._source_type(params, model=self, imgs=self.images)\n # add on some more info for tracking\n src.objid = celestedf_row.objid\n src.run = celestedf_row.run\n src.camcol = celestedf_row.camcol\n src.field = celestedf_row.field\n return src", "def give_source(self):\n has_src, src_sobj = self.get_sobj().ReferencedObject()\n if has_src:\n return self.__class__(self._std, self._bld, src_sobj.GetID())", "def mutate(self, info, input):\n # Convert input to dictionary\n data = api_utils.input_to_dictionary(input)\n data_source_type = Operation('ModelDataSourceType').create(**data)\n return CreateDataSourceType(data_source_type=data_source_type)", "def add_source(self, name, position):#)*args, **kwargs):\n return self._add_object(name, Source, position)#*args, **kwargs)", "def mutate(self, info, input):\n # Convert input to dictionary\n data = api_utils.input_to_dictionary(input)\n data_source = Operation('ModelDataSource').create(**data)\n return CreateDataSource(data_source=data_source)", "def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n type=DataSourceProto.BATCH_SNOWFLAKE,\n field_mapping=self.field_mapping,\n snowflake_options=self.snowflake_options.to_proto(),\n )\n\n data_source_proto.event_timestamp_column = self.event_timestamp_column\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto", "def __init__(self, source):\n self._source = source", "def New(*args, **kargs):\n obj = itkMeshSourcePSD3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, source_name, config, cache, gateway=None):\n\t\tamass.features.FeatureSource.__init__(\n\t\t\tself, source_name, config, cache, gateway)\n\t\tself.cache_table_name = self.cache.source_to_table_name(self.name)\n\t\tself.resources = {}\n\t\tfor i, resource in enumerate(config.get_resources(self.name)):\n\t\t\tself.resources[resource] = i\n\t\tself.history_path = config.get(\"inca\", \"dir\")\n\n\t\tself.columns = {\n\t\t\t\"SOURCE_RESOURCE\": \"VARCHAR(100)\",\n\t\t\t\"TARGET_RESOURCE\": \"VARCHAR(100)\",\n\t\t\t\"TEST_NAME\": \"VARCHAR(100)\",\n\t\t\t\"COLLECTED_DATE\": \"DATETIME\",\n\t\t\t\"RESULT\": \"BOOLEAN\",\n\t\t\t\"ERROR_MSG\": \"LONGTEXT NULL\"\n\t\t}\n\t\tself.column_names = sorted(self.columns.keys())\n\t\tself.primary_keys = [\n\t\t\t\"SOURCE_RESOURCE\", \"TARGET_RESOURCE\", \"TEST_NAME\", \"COLLECTED_DATE\"]\n\t\tself.test_names = sorted(test.test_names)", "def _create_source_stage(\n self, stage_name: str, output: codepipeline.Artifact):\n secret_token = ''\n repo = ''\n owner = ''\n github_action = codepipeline_actions.GitHubSourceAction(\n action_name='Github_Source',\n owner=owner,\n repo=repo,\n oauth_token=secret_token,\n output=output\n )\n return {\n 'stageName': stage_name,\n 'actions': [github_action]\n }", "def New(*args, **kargs):\n obj = itkMeshSourcePSSS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def gen_from_source(source_id, *source_args, **source_kwargs):\n pass", "def __init__(self, source):\n self.source = source", "def __init__(self, source):\n self.source = source", "def New(*args, **kargs):\n obj = itkMeshSourcePSF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def eqsrcFactory(name):\n from pyre.inventory import facility\n from EqKinSrc import EqKinSrc\n return facility(name, family=\"eq_kinematic_src\", factory=EqKinSrc)", "def get_source(source_name):\n if source_name == \"SCHOLAR_CENSUS\":\n from mec_data.source.scholar import ScholarSource\n\n return ScholarSource()\n elif source_name == \"UNIVERSITY_CENSUS\":\n from mec_data.source.university import UniversitySource\n\n return UniversitySource()", "def New(*args, **kargs):\n obj = itkMeshSourcePSD2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def from_config(config: Dict[str, Any]):\n source_name = config[\"source\"]\n host = config.get(\"host\", \"localhost\")\n port = config.get(\"port\", 8081)\n api_key = (config.get(\"api_key_name\", \"\"), config.get(\"api_key\", \"\"))\n return KukurSource(source_name, host, port, api_key)", "def New(*args, **kargs):\n obj = itkMeshSourcePSSS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkMeshSourcePSUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def open_source_datastore(self, connection_string, *args, **kwargs):\n return self.open_datastore(connection_string, self.source_inspectors, *args, **kwargs)", "def New(*args, **kargs):\n obj = itkMeshSourcePSUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkMeshSourceMSS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkMeshSourcePSF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkMeshSourceMSS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def __init__(self, source):\r\n self.source = source", "def get_source(self, format: str) -> Source:\n if format in SOURCE_MAP:\n s = SOURCE_MAP[format]\n return s(self)\n else:\n raise TypeError(f\"{format} in an unrecognized format\")", "def setSourceName(self, instance, value):\n mapping = IAnnotations(instance).setdefault(\n 'collective.table',\n PersistentMapping()\n )\n mapping['source_name'] = value", "def __init__(self, data_source_identifier, verbose=True):\n pass", "def create_connection(connection_name: str, **kwargs) -> SnowflakeConnection:\n ret = get_db_parameters(connection_name)\n ret.update(kwargs)\n connection = snowflake.connector.connect(**ret)\n return connection", "def __init__(\n self,\n server_name,\n schema,\n database,\n staging_bucket_name,\n storage_integration_name,\n create_disposition,\n write_disposition,\n table_schema,\n user_data_mapper,\n username=None,\n password=None,\n private_key_path=None,\n raw_private_key=None,\n private_key_passphrase=None,\n o_auth_token=None,\n table=None,\n query=None,\n role=None,\n warehouse=None,\n expansion_service=None,\n ):\n # pylint: disable=line-too-long\n verify_credentials(\n username=username,\n password=password,\n private_key_path=private_key_path,\n raw_private_key=raw_private_key,\n o_auth_token=o_auth_token,\n )\n WriteDisposition.VerifyParam(write_disposition)\n CreateDisposition.VerifyParam(create_disposition)\n\n self.params = WriteToSnowflakeSchema(\n server_name=server_name,\n schema=schema,\n database=database,\n staging_bucket_name=staging_bucket_name,\n storage_integration_name=storage_integration_name,\n create_disposition=create_disposition,\n write_disposition=write_disposition,\n table_schema=table_schema,\n username=username,\n password=password,\n private_key_path=private_key_path,\n raw_private_key=raw_private_key,\n private_key_passphrase=private_key_passphrase,\n o_auth_token=o_auth_token,\n table=table,\n query=query,\n role=role,\n warehouse=warehouse,\n )\n self.user_data_mapper = user_data_mapper\n self.expansion_service = expansion_service or default_io_expansion_service()", "def set_source(self, source_name):\n self.source = source_name", "def make(self, *args, **kwargs):\n return _uhd_swig.amsg_source_sptr_make(self, *args, **kwargs)", "def add_source_file(self, filename):\n self.sources.add(Source.create(filename))", "def _init_extractor_from_source(self, source_name):\n try:\n source = [s for s in self.sources if s['id'] == source_name][0]\n except IndexError:\n source = None\n\n if source is None:\n return\n\n extractor_klass = load_object(source['extractor'])\n return extractor_klass(source)", "def mesh_source(*args, **kwargs):\n import itk\n instance = itk.MeshSource.New(*args, **kwargs)\n return instance.__internal_call__()", "def mesh_source(*args, **kwargs):\n import itk\n instance = itk.MeshSource.New(*args, **kwargs)\n return instance.__internal_call__()", "def _create_dataset(source=''):\n return ExperimentalDataset()", "def make(*args, **kwargs):\n return _uhd_swig.amsg_source_make(*args, **kwargs)", "def config_source(tbl, source):\r\n \r\n # Stupidly using source as a variable name twice\r\n source_ra = np.rad2deg(source._ra)\r\n source_dec = np.rad2deg(source._dec)\r\n source_name = source.name\r\n \r\n print('Source is: %s'%source.name)\r\n \r\n source = tbl.data[0]\r\n \r\n source['SOURCE_ID'] = 1\r\n source['SOURCE'] = source_name\r\n source['VELDEF'] = 'RADIO'\r\n source['VELTYP'] = 'GEOCENTR'\r\n source['FREQID'] = 1\r\n source['RAEPO'] = source_ra\r\n source['DECEPO'] = source_dec\r\n source['EQUINOX'] = 'J2000'\r\n \r\n # Things I'm just making up\r\n source['IFLUX'] = 0\r\n source['QFLUX'] = 0\r\n source['UFLUX'] = 0\r\n source['VFLUX'] = 0\r\n source['ALPHA'] = 0\r\n source['FREQOFF'] = 0\r\n \r\n tbl.data[0] = source\r\n \r\n return tbl", "def setSource(self, *args):\n return _libsbml.ExternalModelDefinition_setSource(self, *args)", "def __init__(\n self,\n sources: List[str],\n source_parameters: Sequence[SourceParameterFactory] = [],\n hints: Sequence[HintRowFactory] = [],\n ) -> None:\n\n self.sources = sources\n self.source_parameters = list(source_parameters)\n self.hints = list(hints)\n return", "def snowflake(self) -> Optional[pulumi.Input['FlowDestinationFlowConfigDestinationConnectorPropertiesSnowflakeArgs']]:\n return pulumi.get(self, \"snowflake\")", "def makeSourceCat(self, distortedWcs):\n loadRes = self.refObjLoader.loadPixelBox(bbox=self.bbox, wcs=distortedWcs, filterName=\"r\")\n refCat = loadRes.refCat\n refCentroidKey = afwTable.Point2DKey(refCat.schema[\"centroid\"])\n refFluxRKey = refCat.schema[\"r_flux\"].asKey()\n\n sourceSchema = afwTable.SourceTable.makeMinimalSchema()\n measBase.SingleFrameMeasurementTask(schema=sourceSchema) # expand the schema\n sourceCat = afwTable.SourceCatalog(sourceSchema)\n sourceCentroidKey = afwTable.Point2DKey(sourceSchema[\"slot_Centroid\"])\n sourceInstFluxKey = sourceSchema[\"slot_ApFlux_instFlux\"].asKey()\n sourceInstFluxErrKey = sourceSchema[\"slot_ApFlux_instFluxErr\"].asKey()\n\n sourceCat.reserve(len(refCat))\n for refObj in refCat:\n src = sourceCat.addNew()\n src.set(sourceCentroidKey, refObj.get(refCentroidKey))\n src.set(sourceInstFluxKey, refObj.get(refFluxRKey))\n src.set(sourceInstFluxErrKey, refObj.get(refFluxRKey)/100)\n return sourceCat", "def __init__(self, sourcedata=None, metadata=None):\n SourceHook.__init__(self, sourcedata=sourcedata, metadata=metadata)", "def set_source(self, source):\n self.data['source'] = source", "def makeSource(self, name):\n source = mock.Mock(spec=\"title description\".split())\n source.title = '%s title' % name\n source.description = '%s description' % name\n source.configurationView = '@%s_configuration' % name\n return source", "def source_instance(self, source_instance):\n self._source_instance = source_instance", "def _StageSource(self, source, gcs_staging_dir_bucket,\n gcs_staging_dir_object):\n\n suffix = '.tgz'\n if source.startswith('gs://') or os.path.isfile(source):\n _, suffix = os.path.splitext(source)\n\n source_object = 'source/{stamp}-{uuid}{suffix}'.format(\n stamp=times.GetTimeStampFromDateTime(times.Now()),\n uuid=uuid.uuid4().hex,\n suffix=suffix,\n )\n\n if gcs_staging_dir_object:\n source_object = gcs_staging_dir_object + '/' + source_object\n\n gcs_source_staging = resources.REGISTRY.Create(\n collection='storage.objects',\n bucket=gcs_staging_dir_bucket,\n object=source_object)\n\n gcs_client = storage_api.StorageClient()\n if source.startswith('gs://'):\n gcs_source = resources.REGISTRY.Parse(\n source, collection='storage.objects')\n staged_source = gcs_client.Rewrite(gcs_source, gcs_source_staging)\n else:\n if not os.path.exists(source):\n raise c_exceptions.BadFileException(\n 'could not find source [{src}]'.format(src=source))\n elif os.path.isdir(source):\n source_snapshot = snapshot.Snapshot(source)\n size_str = resource_transform.TransformSize(\n source_snapshot.uncompressed_size)\n log.status.Print(\n 'Creating temporary tarball archive of {num_files} file(s)'\n ' totalling {size} before compression.'.format(\n num_files=len(source_snapshot.files), size=size_str))\n staged_source = source_snapshot.CopyTarballToGCS(\n gcs_client, gcs_source_staging)\n elif os.path.isfile(source):\n unused_root, ext = os.path.splitext(source)\n if ext not in _ALLOWED_SOURCE_EXT:\n raise c_exceptions.BadFileException(\n 'Local file [{src}] is none of '.format(src=source) +\n ', '.join(_ALLOWED_SOURCE_EXT))\n log.status.Print('Uploading local file [{src}] to '\n '[gs://{bucket}/{object}].'.format(\n src=source,\n bucket=gcs_source_staging.bucket,\n object=gcs_source_staging.object,\n ))\n staged_source = gcs_client.CopyFileToGCS(source,\n gcs_source_staging)\n\n return staged_source", "def setUp(self):\n self.new_source = News_Source(\"CBSN\",\"CBSN NEWS\",\"CBSN is the leading free news platform\",\"cbsn.com\",\"business\",\"us\", \"en\")", "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "def make(self, *args):\n return _uhd_swig.usrp_source_sptr_make(self, *args)", "def create_source_power(\n self,\n face_id,\n input_power=\"0W\",\n thermal_condtion=\"Total Power\",\n surface_heat=\"0irrad_W_per_m2\",\n temperature=\"AmbientTemp\",\n radiate=False,\n source_name=None,\n ):\n if not source_name:\n source_name = generate_unique_name(\"Source\")\n props = {}\n props[\"Faces\"] = [face_id]\n props[\"Thermal Condition\"] = thermal_condtion\n props[\"Total Power\"] = input_power\n props[\"Surface Heat\"] = surface_heat\n props[\"Temperature\"] = temperature\n props[\"Radiation\"] = OrderedDict({\"Radiate\": radiate})\n bound = BoundaryObject(self, source_name, props, \"SourceIcepak\")\n if bound.create():\n self.boundaries.append(bound)\n return bound", "def test_simple_source_constructor():\n TESTPATH = \"/usr/local/share/testfile.mp3\"\n test01 = Source(path=TESTPATH)\n debug(test01)\n assert(test01.path == TESTPATH)\n assert(test01.fname == \"testfile.mp3\")\n assert(test01.root == \"testfile\")\n assert(test01.ext == \".mp3\")\n assert(test01.isValidExtension(\".mp3\") is True)", "def from_tle(cls, sate_id, source, date=None):\n # Get latest TLE, or the one corresponding to a specified date\n if date is None:\n date = dt.datetime.utcnow()\n\n tle = source.get_tle(sate_id, date)\n\n # Retrieve TLE epoch and corresponding position\n epoch = twoline2rv(tle.lines[0], tle.lines[1], wgs84).epoch\n pos = TLEPredictor(sate_id, source).get_position(epoch)\n\n # Convert position from ECEF to ECI\n gmst = gstime_from_datetime(epoch)\n position_eci = coordinate_systems.ecef_to_eci(pos.position_ecef, gmst)\n velocity_eci = coordinate_systems.ecef_to_eci(pos.velocity_ecef, gmst)\n\n # Convert position to Keplerian osculating elements\n p, ecc, inc, raan, argp, ta = rv2coe(\n wgs84.mu, np.array(position_eci), np.array(velocity_eci))\n sma = p / (1 - ecc ** 2)\n\n return cls(sma, ecc, degrees(inc), degrees(raan), degrees(argp), degrees(ta), epoch)", "def source(self, source):\n allowed_values = [\"FUGUE\", \"CUSTOM\"] # noqa: E501\n if source not in allowed_values:\n raise ValueError(\n \"Invalid value for `source` ({0}), must be one of {1}\" # noqa: E501\n .format(source, allowed_values)\n )\n\n self._source = source", "def add_source_file(name, folder, srcPath, submission):\n f = srcPath\n # check if the submission if for the help system\n if isinstance(submission, Post):\n return SourceFile.objects.get_or_create(name=name, folder=folder, file=f, submission=None)\n \n return SourceFile.objects.get_or_create(name=name, folder=folder, file=f, submission=submission)", "def __init__(self, source, factory, key = None):\n # TODO: This class current has more methods than ICachableSource. We either \n # need to update the interface, or create a new one for the extra methods\n self._key = key\n self.source = source\n self.factory = factory\n self._files = list()\n self._csv_dictreader_list = list()\n \n if isinstance(source, str):\n if os.path.isfile(source):\n _file = open(source,'rb')\n self._files.append(_file)\n self._csv_dictreader_list.append(DictReader(_file))\n elif os.path.isdir(source):\n for _entry in os.listdir(source):\n _file = open(_entry,'rb')\n self._files.append(_file)\n self._csv_dictreader_list.append(DictReader(_file))\n else:\n raise ValueError(\"expected string source parameter to reference a valid file or directory: \" + str(source))\n elif isinstance(source, DictReader):\n self._csv_dictreader_list.append(source)\n else:\n self._csv_dictreader_list.append(DictReader(source))", "def __init__(self, source, schema, show_all=False):\n self.source = source\n self.schema = schema\n self.show_all = show_all\n self._saved_columns = None", "def testCreateWmsSource(self):\n\n path = '/minerva_source_wms'\n name = 'testWMS'\n username = ''\n password = ''\n baseURL = 'http://demo.boundlessgeo.com/geoserver/ows'\n params = {\n 'name': name,\n 'username': username,\n 'password': password,\n 'baseURL': baseURL\n }\n response = self.request(path=path, method='POST', params=params, user=self._user)\n self.assertStatusOk(response)\n wmsSource = response.json\n minerva_metadata = wmsSource['meta']['minerva']\n self.assertEquals(wmsSource['name'], name, 'incorrect wms source name')\n self.assertEquals(minerva_metadata['source_type'], 'wms', 'incorrect wms source type')\n self.assertEquals(minerva_metadata['wms_params']['base_url'], baseURL, 'incorrect wms source baseURL')", "def __init__(self, source, *args, **kwargs):\n super(self.__class__, self).__init__()\n self._source = source\n self.provides = source.provides", "def __init__(__self__, *,\n id: str,\n source: str):\n pulumi.set(__self__, \"id\", id)\n pulumi.set(__self__, \"source\", 'Azure')", "def source_instance(self):\n return self._source_instance", "def New(*args, **kargs):\n obj = itkMeshSourcePSUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkMeshSourcePSUC2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def makeSourceCat(self, wcs, sourceSchema=None, doScatterCentroids=False):\n loadRes = self.refObjLoader.loadPixelBox(bbox=self.bbox, wcs=wcs, filterName=\"r\")\n refCat = loadRes.refCat\n\n if sourceSchema is None:\n sourceSchema = afwTable.SourceTable.makeMinimalSchema()\n measBase.SingleFrameMeasurementTask(schema=sourceSchema) # expand the schema\n sourceCat = afwTable.SourceCatalog(sourceSchema)\n\n sourceCat.resize(len(refCat))\n scatterFactor = 1.0\n if doScatterCentroids:\n np.random.seed(12345)\n scatterFactor = np.random.uniform(0.999, 1.001, len(sourceCat))\n sourceCat[\"slot_Centroid_x\"] = scatterFactor*refCat[\"centroid_x\"]\n sourceCat[\"slot_Centroid_y\"] = scatterFactor*refCat[\"centroid_y\"]\n sourceCat[\"slot_ApFlux_instFlux\"] = refCat[\"r_flux\"]\n sourceCat[\"slot_ApFlux_instFluxErr\"] = refCat[\"r_flux\"]/100\n\n # Deliberately add some outliers to check that the magnitude\n # outlier rejection code is being run.\n sourceCat[\"slot_ApFlux_instFlux\"][0: 4] *= 1000.0\n\n return sourceCat", "def create_source(self, source):\n if not os.path.isdir(source):\n os.makedirs(source)\n # Create a text file in the source directory.\n text_file = os.path.join(source, 'notes.txt')\n with open(text_file, 'w') as handle:\n handle.write(\"This file should be included in the backup.\\n\")\n # Create a subdirectory in the source directory.\n subdirectory = os.path.join(source, 'subdirectory')\n os.mkdir(subdirectory)\n # Create a symbolic link in the subdirectory.\n symlink = os.path.join(subdirectory, 'symbolic-link')\n os.symlink('../include-me.txt', symlink)", "def from_dict(cls, dikt) -> 'SourceSchema':\n return util.deserialize_model(dikt, cls)", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def init_transform(source_path=None, template_path=None, **kwargs):\r\n kwargs.setdefault('adjust_for_shapefile', False)\r\n kwargs.setdefault('clean_whitespace_field_names', ())\r\n kwargs.setdefault('dissolve_field_names')\r\n kwargs.setdefault('extract_where_sql')\r\n kwargs.setdefault('field_name_change_map', {})\r\n kwargs.setdefault('insert_dataset_paths', ())\r\n kwargs.setdefault('insert_dicts_kwargs', ())\r\n kwargs.setdefault('insert_iters_kwargs', ())\r\n kwargs.setdefault('unique_id_field_names', ())\r\n kwargs.setdefault('xy_tolerance')\r\n import arcetl\r\n # Init.\r\n try:\r\n if source_path:\r\n etl = arcetl.ArcETL('Extract from ' + os.path.basename(source_path))\r\n etl.extract(source_path, extract_where_sql=kwargs['extract_where_sql'])\r\n else:\r\n etl = arcetl.ArcETL('Init from ' + os.path.basename(template_path))\r\n etl.init_schema(template_path)\r\n rename_fields(etl, kwargs['field_name_change_map'])\r\n # Insert features.\r\n for func, arg in ((insert_features_from_paths, 'insert_dataset_paths'),\r\n (insert_features_from_dicts, 'insert_dicts_kwargs'),\r\n (insert_features_from_iters, 'insert_iters_kwargs')):\r\n func(etl, kwargs[arg])\r\n # Alter attributes.\r\n clean_whitespace(etl, kwargs['clean_whitespace_field_names'])\r\n # Combine features.\r\n if kwargs['dissolve_field_names'] is not None:\r\n etl.transform(arcetl.features.dissolve,\r\n dissolve_field_names=kwargs['dissolve_field_names'],\r\n tolerance=kwargs['xy_tolerance'])\r\n # Finalize attributes.\r\n update_attributes_by_unique_ids(etl, kwargs['unique_id_field_names'])\r\n if kwargs['adjust_for_shapefile']:\r\n etl.transform(arcetl.combo.adjust_for_shapefile)\r\n except:\r\n etl.close()\r\n raise\r\n return etl", "def setup_target_snowflake(self):\n\n if self.env['TARGET_SNOWFLAKE']['is_configured']:\n self.run_query_target_snowflake(\n f'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres{self.sf_schema_postfix} CASCADE'\n )\n self.run_query_target_snowflake(\n f'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres_public2{self.sf_schema_postfix} CASCADE'\n )\n self.run_query_target_snowflake(\n f'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres_logical1{self.sf_schema_postfix} CASCADE'\n )\n self.run_query_target_snowflake(\n f'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres_logical2{self.sf_schema_postfix} CASCADE'\n )\n self.run_query_target_snowflake(\n f'DROP SCHEMA IF EXISTS ppw_e2e_tap_mysql{self.sf_schema_postfix} CASCADE'\n )\n self.run_query_target_postgres(\n f'DROP SCHEMA IF EXISTS ppw_e2e_tap_mysql_2{self.sf_schema_postfix} CASCADE'\n )\n self.run_query_target_snowflake(\n f'DROP SCHEMA IF EXISTS ppw_e2e_tap_s3_csv{self.sf_schema_postfix} CASCADE'\n )\n self.run_query_target_snowflake(\n f'DROP SCHEMA IF EXISTS ppw_e2e_tap_mongodb{self.sf_schema_postfix} CASCADE'\n )\n\n # Clean config directory\n shutil.rmtree(os.path.join(CONFIG_DIR, 'snowflake'), ignore_errors=True)", "def source(self, source: Source):\n self._source = source", "def create_new_feed_source(link):\n try:\n response = parse_new_feeds(link)\n if response[\"status\"]:\n if \"logo\" in response[\"details\"]:\n logo_link = response[\"details\"][\"logo\"]\n elif \"image\" in response[\"details\"]:\n logo_link = response[\"details\"][\"image\"][\"href\"]\n else:\n logo_link = ''\n FeedSource.objects.create(\n name=response[\"details\"][\"title\"],\n link=link,\n logo_link=logo_link,\n details=json.dumps(response[\"details\"]),\n )\n else:\n return feeds_pb2.OperationStatus(\n op_status=feeds_pb2.Status.Value('FAILURE'),\n details={'errors': feeds_pb2.RepeatedString(data=['Could not parse given link'])},\n )\n except ValidationError as e:\n exc = e\n logger(__name__, \"Could not add Feed Source due to {}\".format(str(exc)))\n errors = _get_errors(exc)\n return feeds_pb2.OperationStatus(\n op_status=feeds_pb2.Status.Value('FAILURE'),\n details={'errors': feeds_pb2.RepeatedString(data=errors)},\n )\n return feeds_pb2.OperationStatus(\n op_status=feeds_pb2.Status.Value('SUCCESS'),\n )", "def get_source(self, subset: str) -> Source:\n key, version = self._subsets[subset]\n return self._catalog[key][version]", "def create_om_sources(self): #TODO: Prob. should make file names specifiable\n if isinstance(self.sources, connectivity_module.Connectivity):\n sources_file = self._tvb_connectivity_to_txt(\"sources.txt\")\n om_sources = om.Matrix()\n elif isinstance(self.sources, surfaces_module.Cortex):\n sources_file = self._tvb_surface_to_tri(\"sources.tri\")\n om_sources = om.Mesh()\n else:\n LOG.error(\"sources must be either a Connectivity or Cortex.\")\n\n om_sources.load(sources_file)\n return om_sources", "def initialise_source(self, c, key):\n return 0" ]
[ "0.70331", "0.5826544", "0.571125", "0.56064165", "0.5491815", "0.5480783", "0.5440601", "0.54300314", "0.54277056", "0.5396629", "0.5375585", "0.5310123", "0.5300051", "0.5264698", "0.52542096", "0.5248735", "0.5194064", "0.5170257", "0.51294655", "0.5129411", "0.5090866", "0.50895363", "0.508571", "0.5058894", "0.5058546", "0.5053834", "0.50248283", "0.50248283", "0.50161153", "0.50151044", "0.50128245", "0.49964648", "0.49633336", "0.49552205", "0.49522233", "0.49521637", "0.49489933", "0.4947859", "0.49458188", "0.49359745", "0.49285012", "0.4906009", "0.49030674", "0.48982596", "0.4889142", "0.48538193", "0.48334184", "0.4831959", "0.48314956", "0.4816809", "0.4798767", "0.47861785", "0.47861785", "0.47600335", "0.47537816", "0.47357103", "0.4730384", "0.47221282", "0.47115597", "0.47036603", "0.47008798", "0.46916488", "0.46771336", "0.46747494", "0.46704233", "0.466331", "0.46606478", "0.46606478", "0.46606478", "0.46378607", "0.46259385", "0.46244043", "0.4617722", "0.46137652", "0.45977184", "0.4594658", "0.45893025", "0.45860693", "0.45856747", "0.4577831", "0.4573921", "0.45727068", "0.4560945", "0.45554987", "0.45485225", "0.45352557", "0.45239475", "0.45239475", "0.45239475", "0.45239475", "0.45239475", "0.45239475", "0.45239475", "0.4523453", "0.45230827", "0.45206052", "0.45130128", "0.4505313", "0.45032117", "0.45000914" ]
0.5692184
3
Creates a SnowflakeSource from a protobuf representation of a SnowflakeSource.
def from_proto(data_source: DataSourceProto): return SnowflakeSource( field_mapping=dict(data_source.field_mapping), database=data_source.snowflake_options.database, schema=data_source.snowflake_options.schema, table=data_source.snowflake_options.table, event_timestamp_column=data_source.event_timestamp_column, created_timestamp_column=data_source.created_timestamp_column, date_partition_column=data_source.date_partition_column, query=data_source.snowflake_options.query, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FromProto(cls, proto_obj):\n source = GameSource()\n source.type = proto_obj.type\n if proto_obj.update_time_utc_str:\n source.update_date_time = datetime.strptime(\n proto_obj.update_time_utc_str, tweets.DATE_PARSE_FMT_STR)\n else:\n source.update_date_time = datetime.now()\n if proto_obj.twitter_account:\n source.account_id = long(proto_obj.twitter_account.id_str)\n source.tweet_text = proto_obj.tweet_text\n if proto_obj.score_reporter_url:\n source.score_reporter_url = proto_obj.score_reporter_url\n if not (source.account_id or source.score_reporter_url):\n raise GameModelError('Converting GameSource from malformed proto')\n return source", "def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n type=DataSourceProto.BATCH_SNOWFLAKE,\n field_mapping=self.field_mapping,\n snowflake_options=self.snowflake_options.to_proto(),\n )\n\n data_source_proto.event_timestamp_column = self.event_timestamp_column\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto", "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def from_dict(cls, dikt) -> 'SourceSchema':\n return util.deserialize_model(dikt, cls)", "def make(self, source):\n if isinstance(source, str):\n return copy(self.get(source))\n elif self.PB_CLASS and isinstance(source, self.PB_CLASS):\n item = copy(self.get(source.name))\n item._pb = source\n return item\n else:\n return copy(source)", "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def FromWireFormat(cls, value):\n return _GetFactory(cls).FromWireFormat(value)", "def FromProto(cls, proto_obj):\n if not proto_obj.last_update_source:\n raise GameModelError('No update source specified in Game creation.')\n # TODO(P2): refactor all constructors into one base function like in tweets.\n return Game(id_str=proto_obj.id_str,\n teams=[Team.FromProto(tm) for tm in proto_obj.teams],\n scores=proto_obj.scores,\n name=proto_obj.name,\n tournament_id=proto_obj.tournament_id_str,\n tournament_name=proto_obj.tournament_name,\n game_status=proto_obj.game_status,\n division=proto_obj.division,\n league=proto_obj.league,\n age_bracket=proto_obj.age_bracket,\n sources=[GameSource.FromProto(proto_obj.last_update_source)],\n key=game_key(proto_obj))", "def from_dict(cls, dikt) -> 'SourceAudit':\n return util.deserialize_model(dikt, cls)", "def create_from_source(self):\n create_statement = self.source.create_statement\n self.create_from_statement(create_statement)\n # Add constraints\n constraints = self.source.constraints\n self.add_constraints(constraints)\n\n # Add indexes\n indexes = self.source.indexes\n self.add_indexes(indexes)\n\n # Add the non-referenced foreign keys\n non_referenced_fks = [x for x in self.source.foreign_keys if not x.referenced]\n self.add_foreign_keys(non_referenced_fks, override_table=self.name)", "def FromBytes(cls, value: bytes):\n precondition.AssertType(value, bytes)\n return _GetFactory(cls).FromBytes(value)", "def fromSerpent(\n cls, source, sourcename=None, postcheck=True, strict=True, names=None,\n ):\n return super().fromSerpent(\n source,\n sourcename=sourcename,\n postcheck=postcheck,\n strict=strict,\n names=names,\n )", "def from_proto(cls, hive_options_proto: Any):\n\n pass", "def from_serialized_proto(cls, proto_string: bytes) -> 'TableInfo':\n proto = schema_pb2.TableInfo.FromString(proto_string)\n if proto.HasField('signature'):\n signature = nested_structure_coder.decode_proto(proto.signature)\n else:\n signature = None\n return cls(\n name=proto.name,\n sampler_options=proto.sampler_options,\n remover_options=proto.remover_options,\n max_size=proto.max_size,\n max_times_sampled=proto.max_times_sampled,\n rate_limiter_info=proto.rate_limiter_info,\n signature=signature,\n current_size=proto.current_size,\n num_episodes=proto.num_episodes,\n num_deleted_episodes=proto.num_deleted_episodes,\n num_unique_samples=proto.num_unique_samples,\n table_worker_time=proto.table_worker_time,\n )", "def from_struct(cls, struct, source):\n try:\n if struct['pubDate'] != 'None':\n date = datetime.datetime.strptime(struct['pubDate'], \"%a, %d %b %Y %H:%M\")\n else:\n date = datetime.datetime.now()\n\n return cls.create(\n title=struct['title'],\n description=struct['description'],\n dec_description=struct['dec_description'],\n link=struct['link'],\n pubDate=date,\n media=json.dumps(struct['media']),\n source=source,\n links=json.dumps(struct['links']),\n dec_links=json.dumps(struct['dec_links'])\n )\n except peewee.IntegrityError:\n return None", "def from_proto(cls, feature_set_proto: FeatureSetProto):\n\n feature_set = cls(\n name=feature_set_proto.spec.name,\n features=[\n Feature.from_proto(feature)\n for feature in feature_set_proto.spec.features\n ],\n entities=[\n Entity.from_proto(entity) for entity in feature_set_proto.spec.entities\n ],\n max_age=(\n None\n if feature_set_proto.spec.max_age.seconds == 0\n and feature_set_proto.spec.max_age.nanos == 0\n else feature_set_proto.spec.max_age\n ),\n labels=feature_set_proto.spec.labels,\n source=(\n None\n if feature_set_proto.spec.source.type == 0\n else Source.from_proto(feature_set_proto.spec.source)\n ),\n project=None\n if len(feature_set_proto.spec.project) == 0\n else feature_set_proto.spec.project,\n )\n feature_set._status = feature_set_proto.meta.status # type: ignore\n feature_set._created_timestamp = feature_set_proto.meta.created_timestamp\n return feature_set", "def from_bytes(buf: bytes) -> 'ProposalInfo':\n proposal_info_in_dict: dict = json_loads(buf.decode())\n proposal_info_in_dict[\"id\"] = bytes.fromhex(proposal_info_in_dict[\"id\"])\n proposal_info_in_dict[\"proposer\"] = Address.from_string(proposal_info_in_dict[\"proposer\"])\n return ProposalInfo(**proposal_info_in_dict)", "def parse_pbobject(source, pb_class):\n if isinstance(source, str):\n return open_pbobject(source, pb_class)\n elif isinstance(source, bytes):\n pb_object = pb_class()\n pb_object.ParseFromString(source)\n return pb_object\n else:\n logging.error(f'cannot parse type {type(source)}')", "def create(self, saved_source_id):\n raw_saved_source_data = self.es.get(index='.kibana', doc_type='doc', id=saved_source_id)\n\n saved_source_type = raw_saved_source_data['_source']['type']\n\n if saved_source_type == 'search':\n return SavedSearch(saved_source_id, self.conf)\n elif saved_source_type == 'visualization':\n return SavedVisualization(saved_source_id, self.conf)\n else:\n # TODO: Raise some exception.\n pass", "def from_dict(cls: T, source: dict[str, Any], connection: Connection) -> T:\n return super(Entity, cls).from_dict(source=source, connection=connection)", "def from_caffe_solver_protoxt(cls, caffe_solver_prototxt_file: Path):\n solver_param = caffe_pb2.SolverParameter()\n with open(caffe_solver_prototxt_file, 'rt') as f:\n pb2.text_format.Merge(f.read(), solver_param)\n dictionary = {'lr_policy': solver_param.lr_policy,\n 'base_lr': solver_param.base_lr,\n 'gamma': solver_param.gamma,\n 'momentum': solver_param.momentum,\n 'max_iter': solver_param.max_iter,\n 'stepsize': solver_param.stepsize,\n 'stepvalues': solver_param.stepvalue,\n 'weight_decay': solver_param.weight_decay,\n 'iter_size': solver_param.iter_size,\n 'from_prototxt': caffe_solver_prototxt_file}\n return cls(**dictionary)", "def FromProto(cls, proto_obj):\n key=None\n if proto_obj.twitter_account:\n twitter_id = long(proto_obj.twitter_account.id_str)\n key = team_twitter_key(twitter_id)\n else:\n twitter_id = 0\n if proto_obj.score_reporter_account:\n score_reporter_id = proto_obj.score_reporter_account.id\n key = team_score_reporter_key(score_reporter_id)\n else:\n score_reporter_id = ''\n return Team(twitter_id=twitter_id, score_reporter_id=score_reporter_id,\n parent=key)", "def from_config(config: Dict[str, Any]):\n source_name = config[\"source\"]\n host = config.get(\"host\", \"localhost\")\n port = config.get(\"port\", 8081)\n api_key = (config.get(\"api_key_name\", \"\"), config.get(\"api_key\", \"\"))\n return KukurSource(source_name, host, port, api_key)", "def from_json(cls, point_source, json_text):\n data = json.loads(json_text)\n point = point_source.getPoint(data['index'])\n # Fairly limited amount of verification we can do\n assert point.label == data['label']\n return point", "def _set_source(source, context):\n if isinstance(source, (str, list, dict, Dataset)):\n return Source(source, context)\n elif isinstance(source, Source):\n return source\n else:\n raise ValueError('Wrong source')", "def fromBytes(cls, inBytes):\n return cls.fromJson(inBytes.decode())", "def fromBytes(cls, inBytes):\n return cls.fromJson(inBytes.decode())", "def fromBytes(cls, inBytes):\n return cls.fromJson(inBytes.decode())", "def parse(filename_or_obj):\n if isinstance(filename_or_obj, basestring):\n # Anything ObsPy can read.\n try:\n src = obspy.readEvents(filename_or_obj)\n except:\n pass\n else:\n return Source.parse(src)\n # CMT solution file.\n try:\n return Source.from_CMTSOLUTION_file(filename_or_obj)\n except:\n pass\n raise SourceParseError(\"Could not parse the given source.\")\n elif isinstance(filename_or_obj, obspy.Catalog):\n if len(filename_or_obj) == 0:\n raise SourceParseError(\"Event catalog contains zero events.\")\n elif len(filename_or_obj) > 1:\n raise SourceParseError(\n \"Event catalog contains %i events. Only one is allowed. \"\n \"Please parse seperately.\" % len(filename_or_obj))\n return Source.parse(filename_or_obj[0])\n elif isinstance(filename_or_obj, obspy.core.event.Event):\n ev = filename_or_obj\n if not ev.origins:\n raise SourceParseError(\"Event must contain an origin.\")\n if not ev.focal_mechanisms:\n raise SourceParseError(\"Event must contain a focal mechanism.\")\n org = ev.preferred_origin() or ev.origins[0]\n fm = ev.preferred_focal_mechanism() or ev.focal_mechansisms[0]\n if not fm.moment_tensor:\n raise SourceParseError(\"Event must contain a moment tensor.\")\n t = fm.moment_tensor.tensor\n return Source(\n latitude=org.latitude,\n longitude=org.longitude,\n depth_in_m=org.depth,\n m_rr=t.m_rr,\n m_tt=t.m_tt,\n m_pp=t.m_pp,\n m_rt=t.m_rt,\n m_rp=t.m_rp,\n m_tp=t.m_tp)\n else:\n raise NotImplementedError", "def _from_cpp(self, str_msg, cls):\n msg = cls()\n result = msg.deserialize(str_msg)\n return result", "def __init__(\n self,\n gcs_source: Union[str, Sequence[str]],\n import_schema_uri: str,\n data_item_labels: Optional[Dict] = None,\n ):\n super().__init__()\n self._gcs_source = [gcs_source] if isinstance(gcs_source, str) else gcs_source\n self._import_schema_uri = import_schema_uri\n self._data_item_labels = data_item_labels", "def from_pb(cls, instance_pb, client):\n match = _INSTANCE_NAME_RE.match(instance_pb.name)\n if match is None:\n raise ValueError('Instance protobuf name was not in the '\n 'expected format.', instance_pb.name)\n if match.group('project') != client.project:\n raise ValueError('Project ID on instance does not match the '\n 'project ID on the client')\n instance_id = match.group('instance_id')\n\n result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION_ID)\n result._update_from_pb(instance_pb)\n return result", "def from_protobuf(cls, msg):\n if not isinstance(msg, cls._protobuf_cls):\n raise TypeError(\"Expected message of type \"\n \"%r\" % cls._protobuf_cls.__name__)\n kwargs = {k: getattr(msg, k) for k in cls._get_params()}\n return cls(**kwargs)", "def create_source(cls, data):\n class SourceOutput(OutputPort):\n\n \"\"\"A port attached to a source task.\"\"\"\n\n name = '0'\n description = str(data)\n\n def emits(self):\n \"\"\"Return the type of the provided datum.\"\"\"\n return type(data)\n\n class Source(Task):\n\n \"\"\"Generated source task.\"\"\"\n\n output_ports = {'0': SourceOutput}\n\n def get_input_data(self, name='0'):\n \"\"\"Return the datum associated with this source.\"\"\"\n return data\n\n def run(self, *arg, **kw):\n \"\"\"Do nothing.\"\"\"\n super(Source, self).run(*arg, **kw)\n self._output_data['0'] = data\n\n return Source()", "def deserialize(cls, record):\n return cls(\n source=record.get(\"source\", \"\"),\n category=record.get(\"category\", \"\"),\n name=record.get(\"name\", \"\"),\n message=record.get(\"message\", \"\"),\n timestamp=record.get(\"timestamp\", \"\"),\n **record[\"data\"],\n )", "def SphinxDummySourceClass(source: Any, *args: Any, **kwargs: Any) -> Any:\n return source", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "def gen_from_source(source_id, *source_args, **source_kwargs):\n pass", "def __init__(self, source_id: str=None, source: List[StateSchema]=None): # noqa: E501\n self.swagger_types = {\n 'source_id': str,\n 'source': List[StateSchema]\n }\n\n self.attribute_map = {\n 'source_id': 'source_id',\n 'source': 'source'\n }\n\n self._source_id = source_id\n self._source = source", "def from_pb(cls, cell_pb):\n timestamp = _datetime_from_microseconds(cell_pb.timestamp_micros)\n if cell_pb.labels:\n return cls(cell_pb.value, timestamp, labels=cell_pb.labels)\n else:\n return cls(cell_pb.value, timestamp)", "def FromPb(pb):\n if isinstance(pb, str):\n real_pb = entity_pb.EntityProto()\n real_pb.ParseFromString(pb)\n pb = real_pb\n\n return Entity._FromPb(pb, require_valid_key=False)", "def __init__(\n self,\n name: Optional[str] = None,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n table: Optional[str] = None,\n query: Optional[str] = None,\n event_timestamp_column: Optional[str] = \"\",\n created_timestamp_column: Optional[str] = \"\",\n field_mapping: Optional[Dict[str, str]] = None,\n date_partition_column: Optional[str] = \"\",\n ):\n if table is None and query is None:\n raise ValueError('No \"table\" argument provided.')\n\n # If no name, use the table as the default name\n _name = name\n if not _name:\n if table:\n _name = table\n else:\n raise DataSourceNoNameException()\n\n super().__init__(\n _name,\n event_timestamp_column,\n created_timestamp_column,\n field_mapping,\n date_partition_column,\n )\n\n # The default Snowflake schema is named \"PUBLIC\".\n _schema = \"PUBLIC\" if (database and table and not schema) else schema\n\n self.snowflake_options = SnowflakeOptions(\n database=database, schema=_schema, table=table, query=query\n )", "def from_bytes(self, ???):", "def _from_protobuf(cls, aux_data):\n data = AuxData.serializer.decode(\n BytesIO(aux_data.data), aux_data.type_name\n )\n return cls(data=data, type_name=aux_data.type_name)", "def _create_input_data(self):\n SCHEMA = parse_table_schema_from_json(\n '{\"fields\": [{\"name\": \"data\", \"type\": \"BYTES\"}]}')\n\n def format_record(record):\n # Since Synthetic Source returns data as a dictionary, we should skip one\n # of the part\n import base64\n return {'data': base64.b64encode(record[1])}\n\n with TestPipeline() as p:\n ( # pylint: disable=expression-not-assigned\n p\n | 'Produce rows' >> Read(\n SyntheticSource(self.parse_synthetic_source_options()))\n | 'Format' >> Map(format_record)\n | 'Write to BigQuery' >> WriteToBigQuery(\n dataset=self.input_dataset,\n table=self.input_table,\n schema=SCHEMA,\n create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=BigQueryDisposition.WRITE_EMPTY))", "def load_stream(source):\n raise NotImplementedError(\"not implemented yet\")", "def FromDocument(cls, source):\n if not hasattr(source, 'read'):\n source = StringIO(source)\n start = source.read(1)\n source.seek(0)\n if start == \"<\":\n return cls.FromXML(source)\n elif start == \"!\" or start == \"#\":\n return cls.FromYAML(source)\n else:\n raise ParseError(\"unknown initial character %r\" % start)", "def from_dict(cls, dict_, archive_reader):\n\n obj = Source.__new__(cls)\n super().__init__(obj)\n obj.to_natives(dict_, archive_reader)\n return obj", "def create_from_kafka_message(\n kafka_message,\n envelope=None,\n force_payload_decoding=True,\n reader_schema_id=None\n):\n kafka_position_info = KafkaPositionInfo(\n offset=kafka_message.offset,\n partition=kafka_message.partition,\n key=kafka_message.key,\n )\n return _create_message_from_packed_message(\n packed_message=kafka_message,\n envelope=envelope or Envelope(),\n force_payload_decoding=force_payload_decoding,\n kafka_position_info=kafka_position_info,\n reader_schema_id=reader_schema_id\n )", "def fromString(line: Union[bytes, str]) -> SBSMessage:\n if isinstance(line, bytes):\n line = line.decode()\n\n values = line.rstrip(DELIMITER).split(\",\")\n\n if len(FieldNames) != len(values):\n raise Exception(\n \"Incorrect number of msg fields. \"\n f\"Expected {len(FieldNames)}, got {len(values)}. \"\n f\"values={values}, line={line}\"\n )\n\n attrs = {}\n for k, v in zip(FieldNames, values):\n v = v.strip() # remove any surrounding spaces\n if v:\n # perform type conversion if necessary\n if k in IntegerFields:\n v = int(v)\n elif k in FloatFields:\n v = float(v)\n elif k in BooleanFields:\n v = True if v == \"1\" else False\n elif k in DateFields:\n Y, M, D = [int(i) for i in v.split(\"/\")]\n v = datetime.date(Y, M, D)\n elif k in TimeFields:\n H, M, S = v.split(\":\")\n S, F = S.split(\".\")\n microsecond = int(int(F) * 1e3)\n v = datetime.time(\n hour=int(H), minute=int(M), second=int(S), microsecond=microsecond\n )\n # elif k in StringFields:\n # v = v.strip()\n # else:\n # # field is expected to be a string field\n # logger.warning(\n # 'Unexpected field name: {}'.format(k))\n else:\n v = None\n\n attrs[k] = v\n\n return SBSMessage(**attrs)", "def get_source(self, format: str) -> Source:\n if format in SOURCE_MAP:\n s = SOURCE_MAP[format]\n return s(self)\n else:\n raise TypeError(f\"{format} in an unrecognized format\")", "def __init__(\n self,\n gcs_source: Optional[Union[str, Sequence[str]]] = None,\n bq_source: Optional[str] = None,\n ):\n\n dataset_metadata = None\n\n if gcs_source and isinstance(gcs_source, str):\n gcs_source = [gcs_source]\n\n if gcs_source and bq_source:\n raise ValueError(\"Only one of gcs_source or bq_source can be set.\")\n\n if not any([gcs_source, bq_source]):\n raise ValueError(\"One of gcs_source or bq_source must be set.\")\n\n if gcs_source:\n dataset_metadata = {\"inputConfig\": {\"gcsSource\": {\"uri\": gcs_source}}}\n elif bq_source:\n dataset_metadata = {\"inputConfig\": {\"bigquerySource\": {\"uri\": bq_source}}}\n\n self._dataset_metadata = dataset_metadata", "def from_sdf(self, **kwargs):\n return self.__from_file(kwargs, _sdf)", "def __init__(self, source):\n self._source = source", "def from_flow(cls, flow: SequenceFlow, lane, backtrack_to, indent):\n instance = cls(\n spec_id=flow.id,\n name=flow.name,\n description=flow.name,\n lane=lane,\n backtrack_to=backtrack_to,\n indent=indent\n )\n instance.set_spec_type(flow)\n return instance", "def from_wire(cls, header, payload_bytes, *, sender_address=None,\n validate=False):\n return cls.from_components(header, b'', payload_bytes,\n sender_address=sender_address,\n validate=validate)", "def loads(cls, raw: bytes) -> 'Tag':\n meta = json.loads(raw.decode('utf-8'))\n return cls(\n training=cls.Training(\n timestamp=cls._strptime(meta['training']['timestamp']), ordinal=meta['training']['ordinal']\n ),\n tuning=cls.Tuning(timestamp=cls._strptime(meta['tuning']['timestamp']), score=meta['tuning']['score']),\n states=(uuid.UUID(s) for s in meta['states']),\n )", "def from_timestamp_pb(cls, stamp):\n microseconds = int(stamp.seconds * 1e6)\n bare = _from_microseconds(microseconds)\n return cls(\n bare.year,\n bare.month,\n bare.day,\n bare.hour,\n bare.minute,\n bare.second,\n nanosecond=stamp.nanos,\n tzinfo=datetime.timezone.utc,\n )", "def from_crasher(cls, s: Text) -> 'Sample':\n options = None\n args_batch = []\n input_lines = []\n for line in s.splitlines():\n m = re.match(r'\\s*//\\s*options:(.*)', line)\n if m:\n assert options is None\n options = SampleOptions.from_json(m.group(1))\n continue\n m = re.match(r'\\s*//\\s*args:(.*)', line)\n if m:\n args_batch.append(parse_args(m.group(1)))\n continue\n input_lines.append(line)\n input_text = '\\n'.join(input_lines)\n\n assert options is not None\n return Sample(input_text, options, tuple(args_batch))", "def from_file(cls, path_src):\n cp_cond = [os.path.exists(path_src), os.path.isfile(path_src),\n len(path_new) != 0]\n content = \"\"\n\n # read input from file\n if cp_cond[0] and cp_cond[1]:\n with open(path_src) as f:\n content = f.read()\n\n # connect object with file content\n return cls(path_src, inp_string=content, to_file=False)", "async def json_protocol_source(tmp_path: Path) -> ProtocolSource:\n simple_protocol = (\n get_shared_data_root() / \"protocol\" / \"fixtures\" / \"6\" / \"simpleV6.json\"\n )\n return await ProtocolReader().read_saved(files=[simple_protocol], directory=None)", "def from_dict(cls, data: Dict[str, Any]) -> \"ClassifierDocumentTypeDetails\":\n return cls(\n azure_blob_source=AzureBlobContentSource.from_dict(data.get(\"azure_blob_source\")) # type: ignore\n if data.get(\"azure_blob_source\") else None,\n azure_blob_file_list_source=AzureBlobFileListSource.from_dict(data.get(\"azure_blob_file_list_source\")) # type: ignore\n if data.get(\"azure_blob_file_list_source\") else None\n )", "def from_wire(cls, header, payload_bytes, *, sender_address=None,\n validate=False):\n\n return cls.from_components(header, b'', payload_bytes,\n sender_address=sender_address,\n validate=validate)", "def FromYAML(cls, source):\n\n # Late import to avoid a circular dependency.\n try:\n import bulletml.bulletyaml\n import yaml\n except ImportError:\n raise ParseError(\"PyYAML is not available\")\n else:\n try:\n return yaml.load(source)\n except Exception as exc:\n raise ParseError(str(exc))", "def from_text(cls, text):\n raw = json.loads(text)\n event_msg = EventMessage.from_text(raw[\"event_message\"])\n msg = cls(\n uri_name=raw[\"uri_name\"],\n event_message=event_msg,\n publisher_connection_id=raw[\"publisher_connection_id\"]\n )\n msg.publisher_node_id = raw[\"publisher_node_id\"]\n return msg", "def from_stanza(cls, doc, sentence_index, s):\n return Sentence(\n doc = doc,\n sentence_index = sentence_index,\n words = s.words,\n lemmas = s.lemmas,\n pos_tags = s.pos_tags,\n ner_tags = s.ner_tags,\n doc_char_begin = [t.character_span[0] for t in s],\n doc_char_end = [t.character_span[1] for t in s],\n dependencies = json.dumps(s.depparse().to_json()),\n gloss = s.text)", "def from_precomputed(kls, skelbuf, segid=None, vertex_attributes=None):\n if len(skelbuf) < 8:\n raise SkeletonDecodeError(\"{} bytes is fewer than needed to specify the number of verices and edges.\".format(len(skelbuf)))\n\n num_vertices, num_edges = struct.unpack('<II', skelbuf[:8])\n min_format_length = 8 + 12 * num_vertices + 8 * num_edges\n\n if len(skelbuf) < min_format_length:\n raise SkeletonDecodeError(\"The input skeleton was {} bytes but the format requires {} bytes.\".format(\n len(skelbuf), min_format_length\n ))\n\n vstart = 2 * 4 # two uint32s in\n vend = vstart + num_vertices * 3 * 4 # float32s\n vertbuf = skelbuf[ vstart : vend ]\n\n estart = vend\n eend = estart + num_edges * 4 * 2 # 2x uint32s\n\n edgebuf = skelbuf[ estart : eend ]\n\n vertices = np.frombuffer(vertbuf, dtype='<f4').reshape( (num_vertices, 3) )\n edges = np.frombuffer(edgebuf, dtype='<u4').reshape( (num_edges, 2) )\n\n skeleton = Skeleton(vertices, edges, segid=segid)\n\n if len(skelbuf) == min_format_length:\n return skeleton\n\n if vertex_attributes is None:\n vertex_attributes = kls._default_attributes()\n\n start = eend\n end = -1\n for attr in vertex_attributes:\n num_components = int(attr['num_components'])\n data_type = np.dtype(attr['data_type'])\n end = start + num_vertices * num_components * data_type.itemsize\n attrbuf = np.frombuffer(skelbuf[start : end], dtype=data_type)\n\n if num_components > 1:\n attrbuf = attrbuf.reshape( (num_vertices, num_components) )\n\n setattr(skeleton, attr['id'], attrbuf)\n start = end\n\n skeleton.extra_attributes = vertex_attributes\n\n return skeleton", "def from_wire(cls, header, payload_bytes, *, sender_address=None,\n validate=False):\n if not cls.HAS_PAYLOAD:\n return cls.from_components(header)\n payload = from_buffer(header.data_type, header.data_count,\n payload_bytes)\n return cls.from_components(header, *payload,\n sender_address=sender_address,\n validate=validate)", "def test_client_copy_from_protobuf_destination(self):\n destination = ProtoPlusFixture()\n destination = type(destination).pb(destination)\n origin = ProtoPlusFixture()\n origin.name = \"Test\"\n\n util.proto_copy_from(destination, origin)\n\n self.assertEqual(destination.name, \"Test\")\n self.assertIsNot(destination, origin)", "def test_prepare_source(source):\n assert isinstance(PseudoPotentialData.prepare_source(source), io.BytesIO)\n\n if isinstance(source, io.BytesIO):\n # If we pass a bytestream, we should get the exact same back\n assert PseudoPotentialData.prepare_source(source) is source", "def from_obj(cls, d: Mapping[Any, Any]) -> \"HintConfig\":\n\n return cls(\n d[\"sources\"],\n [SourceParameter.from_obj(o) for o in d[\"source_parameters\"]],\n [HintRow.from_obj(o) for o in d[\"hints\"]]\n )", "def from_dict(cls, dikt) -> 'Sentence':\n return util.deserialize_model(dikt, cls)", "def build(\n cls,\n file_descriptor: descriptor_pb2.FileDescriptorProto,\n file_to_generate: bool,\n naming: api_naming.Naming,\n opts: Options = Options(),\n prior_protos: Optional[Mapping[str, 'Proto']] = None,\n load_services: bool = True,\n all_resources: Optional[Mapping[str, wrappers.MessageType]] = None,\n ) -> 'Proto':\n return _ProtoBuilder(\n file_descriptor,\n file_to_generate=file_to_generate,\n naming=naming,\n opts=opts,\n prior_protos=prior_protos or {},\n load_services=load_services,\n all_resources=all_resources or {},\n ).proto", "def __init__(self, source_data: Dict[str, dict], verbose: bool = True):\n self.verbose = verbose\n self._validate_source_data(source_data=source_data, verbose=self.verbose)\n self.data_interface_objects = {\n name: data_interface(**source_data[name])\n for name, data_interface in self.data_interface_classes.items()\n if name in source_data\n }", "def from_buffer(cls, data, codec):\n new = cls()\n new.codec = codec\n new.data = codec.parse(data)\n return new", "def __init__(self, source):\n self.source = source", "def __init__(self, source):\n self.source = source", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def from_string(cls, dlstr):\n\n try:\n\n key, x0, kf, n_upd = dlstr.lower().split()\n if key != UmbrellaSampling.key:\n raise ValueError()\n\n x0 = float(x0)\n kf = float(kf)\n n_upd = int(n_upd)\n\n except ValueError:\n raise ValueError(\"Require 'us x0 kf n_upd' not {!r}\".format(dlstr))\n\n return UmbrellaSampling(x0, kf, n_upd)", "def __init__(self, bytes = None, timestamp = None):\n from socket import IPPROTO_TCP\n src = pcs.Field(\"src\", 32)\n dst = pcs.Field(\"dst\", 32)\n reserved = pcs.Field(\"reserved\", 8, default = 0)\n protocol = pcs.Field(\"protocol\", 8, default = IPPROTO_TCP)\n length = pcs.Field(\"length\", 16)\n pcs.Packet.__init__(self, [src, dst, reserved, protocol, length],\n bytes = bytes)\n # Description MUST be set after the PCS layer init\n self.description = inspect.getdoc(self)\n if timestamp == None:\n self.timestamp = time.time()\n else:\n self.timestamp = timestamp\n\n self.data = None", "def from_bytes(cls, data: bytes, mtime: str = \"\") -> \"TextDocument\":\n srcbuf = io.BytesIO(data)\n encoding, lines = tokenize.detect_encoding(srcbuf.readline)\n if not lines:\n return cls(lines=[], encoding=encoding, mtime=mtime)\n return cls.from_str(data.decode(encoding), encoding=encoding, mtime=mtime)", "def load_prescribing_data_from_file(\n dataset_name, table_name, source_file_name):\n return load_data_from_file(\n dataset_name, table_name,\n source_file_name, PRESCRIBING_SCHEMA, _transform=prescribing_transform)", "def _from_java(cls, java_stage):\n # Load information from java_stage to the instance.\n\n logger = cls._sc._jvm.org.apache.log4j.Logger.getLogger(cls.__name__)\n _logSrcLang = \"Py:\"\n\n methodname = str(inspect.stack()[0][3])\n logMsg = _logSrcLang + cls.__name__ + \":\" + methodname + \": [Params: \" + \" java_stage => \" + str(\n java_stage) + \"]\"\n logger.info(logMsg)\n\n py_stages = [PythonJavaConversions._from_java_stage(s) for s in java_stage.stages()]\n # Create a new instance of this stage.\n py_stage = IBMSparkPipelineModel(py_stages)\n\n py_stage = PythonJavaConversions._resetUid(py_stage, java_stage.uid())\n return py_stage", "def to_proto(self):\n filename_tensor = array_ops.placeholder(\n shape=[], dtype=dtypes.string, name=\"saver_filename\")\n save_tensor = self._traced_save(filename_tensor)\n restore_op = self._traced_restore(filename_tensor).op\n return saver_pb2.SaverDef(\n filename_tensor_name=filename_tensor.name,\n save_tensor_name=save_tensor.name,\n restore_op_name=restore_op.name,\n version=saver_pb2.SaverDef.V2)", "def from_label_file(cls, label_file_path, out_path=FEATURES_DATA_PATH, source_path=RAW_DATA_PATH):\n df = pd.read_csv(label_file_path)\n filenames = df['filename']\n labels = df['label']\n return cls(filenames, labels, out_path=out_path, source_path=source_path)" ]
[ "0.69648314", "0.6726757", "0.62188435", "0.6085031", "0.54869676", "0.53810257", "0.5301759", "0.52652085", "0.5256398", "0.52558035", "0.52312374", "0.5175181", "0.51523924", "0.51177007", "0.5046059", "0.5044137", "0.50330454", "0.50251067", "0.5004873", "0.49959263", "0.49941787", "0.49703214", "0.49626857", "0.49538267", "0.49383047", "0.49380133", "0.49380133", "0.49380133", "0.49323687", "0.49278316", "0.49244732", "0.48968378", "0.48748505", "0.48681098", "0.4863207", "0.4861788", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48425275", "0.48404694", "0.48404685", "0.47667024", "0.47642693", "0.47543278", "0.4747988", "0.474026", "0.47237673", "0.4711693", "0.47102717", "0.47088724", "0.47021425", "0.4698237", "0.46958145", "0.46886384", "0.46847877", "0.46774602", "0.4650953", "0.4641097", "0.46375266", "0.46340418", "0.4628673", "0.46256682", "0.4621777", "0.4602023", "0.45950794", "0.45849642", "0.4582143", "0.45799005", "0.4573241", "0.45672908", "0.455066", "0.45450172", "0.45446065", "0.45353758", "0.4528748", "0.45261812", "0.45236757", "0.451299", "0.451299", "0.45095176", "0.45053804", "0.44966453", "0.4492103", "0.44918004", "0.4489724", "0.44774482", "0.44758874" ]
0.8149464
0
Returns the database of this snowflake source.
def database(self): return self.snowflake_options.database
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_database(self):\n return self.database", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database\")", "def database(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database\")", "def database(self):\n return self._database", "def database(self):\n return self._database", "def database(self):\n return self._database", "def database(self):\n return self._database", "def get_database(self):\n if self._database is None:\n conn = self.get_connection()\n db = conn[self.database]\n self._database = db\n\n return self._database", "def database(self):\n\n return self._database", "def database(self):\n\n return self._database", "def dbname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dbname\")", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def db(self) -> str:\n return self._db", "def getDatabaseName(self):\n raise NotImplementedError", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def db(self):\n return self._project.db", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def getDb(self):\n return self.db", "def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")", "def database():\n return conf().database", "def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")", "def get_database(self, instance, name):\n return instance.get_database(name)", "def get_db_name(self):\n\t\treturn conf.db_name", "def schema(self):\n return self.snowflake_options.schema", "def database(self):\n try:\n return self._database\n except:\n database = self.application.connection[self.database_name]\n self._database = database\n return database", "def current_db(self):\n return self._current_db", "def get_db(self):\n return self._db", "def get_db(self):\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect(DATABASE)\n return db", "def getDatabaseName( self ):\n return self.mDbname", "def db(self):\n return self.application.db", "def db(self):\n return self.application.db", "def get_name(self) -> str:\n return self.dbname", "def get_database_url(self):\n return self.config['dbase_path']", "def db_name(self):\n return self._db_name", "def get_default_database(self):\n attr_name = mangle_delegate_name(self.__class__, '__default_database_name')\n default_db_name = getattr(self.delegate, attr_name)\n if default_db_name is None:\n raise ConfigurationError('No default database defined')\n\n return self[default_db_name]", "def db(self) -> Database:\n return self.impl.db", "def _get_database(self, options):\n database_key = options.get('database')\n if not database_key:\n if len(settings.DATABASES) >= 2:\n errmsg = \"Because this project contains more than one database, you\"\n errmsg += \" must specify the --database option.\"\n raise CommandError(errmsg)\n database_key = settings.DATABASES.keys()[0]\n return settings.DATABASES[database_key]", "def get_database(self, database=None):\n\t\tdatabase = database if database !=None else self.database\n\t\t\n\t\tif self._database is None:\n\t\t\tconn = self.get_connection()\n\t\t\tdb = conn[database]\n\t\t\tself._database = db\n\t\t\n\t\treturn self._database", "def source(self) -> Dict:\n return self._db_data.metadata[\"_source\"]", "def get_db():\n if not hasattr(g, \"sql_db\"):\n g.sql_db = connect_db()\n return g.sql_db", "def _get_db(self):\n gt_db = ...\n return gt_db", "def get_datasource_of():\n global datasource_of\n\n if not datasource_of:\n datasource_of = stixhelpers.datasource_of()\n \n return datasource_of", "def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(str(current_app.config['DATABASE']))\n return g.db", "def db(self):\r\n return self._db", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(current_app.config['DB_NAME'])\n return db", "def get_database_dsn():\n return getattr(config, f\"POSTGRES_DSN_{config.SERVER_MODE}\")", "def database_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_id\")", "def get_database_path():\n\treturn _paths[_DATABASE_PATH_KEY]", "def get_db(self):\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = self.connect_db()\n return g.sqlite_db", "def get_database(self, database, instance=None):\n return self._get(_database.Database, database)", "def get_db_path():\n \n return(db_run.db_abs_path)", "def get_sql_connection(self):\n return self.sql", "def get_database() -> Database:\n db_config = DatabaseConfig(DB_NAME)\n return connect_to_db(db_config)", "def database_id(self) -> str:\n return pulumi.get(self, \"database_id\")", "def db(cls):\n return getattr(db, cls.__name__)", "def get_tgis_database():\n global tgis_database\n return tgis_database", "def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname", "def get_db():\n if not hasattr(g, \"site_db\"):\n connection = pg.connect(\n dbname=\"dollsite\",\n user=\"dollsite\",\n password=app.config[\"DS_DB_PASSW\"]\n )\n g.site_db = connection\n return g.site_db", "def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()", "def database_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_id\")", "def get_db():\n if \"db\" not in g:\n g.db = sqlite3.connect(current_app.config[\"DATABASE\"], detect_types=sqlite3.PARSE_DECLTYPES)\n g.db.row_factory = sqlite3.Row\n\n return g.db", "def get_database(self, name='presentations.db'):\r\n if name not in self._databases:\r\n self._databases[name] = QtDBConnector(self.get_filepath(name), PluginManager(self))\r\n return self._databases[name]", "def get_db():\n if not hasattr(g, 'postgres_db'):\n g.postgres_db = connect_db()\n return g.postgres_db", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n db.row_factory = sqlite3.Row\n return db", "def database_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_id\")", "def database_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_url\")", "def database(self) -> str:\n\t\treturn os.getenv('APP_DATABASE', 'memory').lower()", "def get_db():\n db = getattr(g, 'database', None)\n if db is None:\n db = sqlite3.connect(app.config['DATABASE'])\n db.row_factory = sqlite3.Row\n g.database = db\n return db", "def table(self):\n return self.snowflake_options.table", "def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()", "def get_database_directory(self):\n pass", "def GetDatabase(self):\r\n\r\n if self.database:\r\n return self.database\r\n \r\n if not os.path.exists(self.GetDataDir()):\r\n # Create the data folder, it still doesn't exist\r\n os.makedirs(self.GetDataDir())\r\n\r\n self.database = os.path.join(self.GetDataDir(), \"NDT_Database.db\")\r\n return self.database", "def get_database (name, parent=None):\n if \".\" in name:\n parent, name = name.split(\".\")\n\n if parent is not None:\n if not isinstance(parent, DatabaseFolder):\n parent = globals().get(parent, None)\n\n if parent is None or not isinstance(parent, DatabaseFolder):\n return None\n\n return parent.get(name, None)\n\n return globals().get(name, None)", "def get_database():\n if not REPO:\n site = pwb.Site(\"wikidata\", \"wikidata\")\n repo = site.data_repository()\n return repo\n return REPO", "def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(\n current_app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n\n return g.db", "def source(self):\n for source in self.coordinator.data.sources:\n if source.SourceID == self.zone.SourceID:\n return source.Name\n return None", "def db_for_read(self, model, **hints):\n\n return self.db_name", "def mysql_database():\n return DATABASE", "def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')", "def get_db():\n\tif not hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db = connect_db()\n\treturn g.sqlite_db", "def get_db():\n\tif not hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db = connect_db()\n\treturn g.sqlite_db", "def database_name(self):\n try:\n return self._database_name\n except:\n pass\n\n if 'X-UnitTest' in self.request.headers:\n if self.request.headers['X-UnitTest'] == 'True':\n self._database_name = TEST_DATABASE\n return TEST_DATABASE\n default_database = self.application.databases['default']['NAME']\n self._database_name = default_database\n return default_database", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(\n current_app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n\n return g.db", "def getdb():\n if 'db' not in g:\n g.db = sqlite3.connect(\n config.DATABASE,\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n return g.db", "def get_db():\n if \"db\" not in g:\n host = current_app.config[\"HOST\"]\n dbname = current_app.config[\"DATABASE\"]\n #params = \"host='{}' dbname='{}' user=root\".format(host, dbname)\n params = \"dbname='{}' user=root\".format(dbname)\n g.db = psycopg2.connect(params)\n # 'g.db' corresponsds to a DB conn\n return g.db", "def db(self):\n return self._db or router.db_for_read(self.model, **self._hints)", "def get_db():\n if not hasattr(g, 'mongo_db'):\n g.db = get_mongo_db()\n\n return g.db", "def get_db(self, typename):\n return self._dbs[typename]", "def change_db(self):\n self.db = self.database.get()\n return self.db", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n db.execute('PRAGMA foreign_keys = ON')\n return db", "def _get_db(self):\n return DB(\n ClientStorage.ClientStorage((self.server, self.port))\n )", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def get(cls) -> SqliteDatabase:\n if cls.database_instance is None:\n cls.database_instance = SqliteDatabase(\n config.DATABASE_PATH, pragmas={\"journal_mode\": \"wal\"},\n )\n return cls.database_instance", "def get_database_name(self, data: dict) -> str: # pylint: disable=arguments-differ\n if not data[\"database_name\"] and self.service_connection.database:\n return self.service_connection.database\n return data[\"database_name\"]", "def get_db():\r\n if not hasattr(g, 'sqlite_db'):\r\n g.sqlite_db = connect_db()\r\n return g.sqlite_db", "def create_db_statement(self):\n return Engine.create_db_statement(self).replace(\"DATABASE\", \"SCHEMA\")" ]
[ "0.7446689", "0.7409722", "0.7409722", "0.7361835", "0.73268086", "0.70755297", "0.70755297", "0.70755297", "0.70755297", "0.7035698", "0.70008326", "0.70008326", "0.6984017", "0.69791114", "0.6932914", "0.69217163", "0.6915366", "0.6904952", "0.6886391", "0.68326914", "0.6831212", "0.68180186", "0.68058586", "0.67343223", "0.6707062", "0.6660032", "0.66552055", "0.6643545", "0.66404915", "0.66191036", "0.65855414", "0.6509208", "0.6509208", "0.65024555", "0.6493609", "0.6487258", "0.63931525", "0.63924396", "0.6386483", "0.6381118", "0.6380197", "0.63588357", "0.63389695", "0.6330122", "0.63064224", "0.6289847", "0.62615055", "0.6251084", "0.62143457", "0.6205681", "0.61918217", "0.618073", "0.6178663", "0.61699647", "0.6162427", "0.6151025", "0.6110464", "0.61031544", "0.6089271", "0.60881597", "0.6087585", "0.6086022", "0.6080814", "0.60807014", "0.60773045", "0.60672855", "0.60615116", "0.60575515", "0.60560983", "0.60452986", "0.6036049", "0.6032885", "0.6025378", "0.601832", "0.60148513", "0.60128516", "0.6006419", "0.6005859", "0.5999241", "0.59888524", "0.59855294", "0.5976704", "0.5976704", "0.5973971", "0.5970784", "0.5970784", "0.5949179", "0.5948948", "0.5943391", "0.5942529", "0.59359646", "0.59146416", "0.59115523", "0.5909281", "0.5897877", "0.58971554", "0.5887758", "0.5886561", "0.588267", "0.5878021" ]
0.85056674
0
Returns the schema of this snowflake source.
def schema(self): return self.snowflake_options.schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schema(self):\n return self.table_info.schema", "def get_source_schema(cls) -> dict:\n source_schema = get_base_schema(\n root=True,\n id_=\"source.schema.json\",\n title=\"Source data schema\",\n description=\"Schema for the source data, files and directories\",\n version=\"0.1.0\",\n )\n for interface_name, data_interface in cls.data_interface_classes.items():\n source_schema[\"properties\"].update({interface_name: unroot_schema(data_interface.get_source_schema())})\n return source_schema", "def get_schema(self):\r\n return self.__schema", "def schema(self):\n return self._schema", "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def _schema(self):\n\n self._check_compiled()\n return self._compiled._schema", "def schema(self):\n return _parse_schema_resource(self._properties.get(\"schema\", {}))", "def get_schema(cls):\n return cls.schema()", "def get_schema(): # noqa: WPS440\n return config.DEFAULT_SCHEMA", "def schema(self) -> str:\n return parse_schema(self._spec[\"schema\"])", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "def schema(self):\n # type: () -> object\n return self._schema", "def get_schema(self) -> dict:\n return schemas.get_object_schema(self.schema)", "def get_schema(self) -> ArchiveSchema:\n return self.schema", "def reference_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationReferenceDataSourceReferenceSchemaArgs']:\n return pulumi.get(self, \"reference_schema\")", "def _get_schema(self):\n self._pick()\n return Schema()", "def schema(self) -> graphql.GraphQLSchema:\n return self._schema", "def schema(self):\n return self.prov[PROV_SCHEMA]", "def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)", "def schema(self):\n pass", "def destination_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationOutputDestinationSchemaArgs']:\n return pulumi.get(self, \"destination_schema\")", "def get_schema(self) -> dict:", "def schema(cls):\n return Schema.get_instance(cls)", "def schema(self) -> Schema:\n return next(schema for schema in self.metadata.schemas if schema.schema_id == self.metadata.current_schema_id)", "def schema(self):\n raise NotImplementedError", "def schema_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schema_name\")", "def schema(self) -> Dict[str, Dict]:\n return self._schema", "def output_schema(self) -> Optional[str]:\n return pulumi.get(self, \"output_schema\")", "def get_meta_schema(self):\n return self._tc_meta_schema", "def getSchema( sourceDirectory ):\r\n if( sourceDirectory == settings.LEXISNEXIS_FILETAG ): return LexisNexisSchema()\r\n raise Exception( \"Filer for source <%s> is not registered in getSchema( source ).\" % ( sourceDirectory ) )", "def get_schema_defs():\n return SCHEMA_DEFS", "def get_schema(self, get_stats=False):\n query = \"schema {}\"\n\n results = self.run_dgraph_query_raw(query)\n\n schema = {}\n\n for row in results[\"schema\"]:\n table_name = row[\"predicate\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n return list(schema.values())", "def schema(self, name):\n return model.Schema(self, name)", "def input_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationInputInputSchemaArgs']:\n return pulumi.get(self, \"input_schema\")", "def getSchema(self):\n\n schema = [\n \"title\",\n \"body\",\n \"created_at\",\n \"id\",\n \"summary\",\n \"abstract\",\n \"keywords\",\n ]\n\n return schema", "def getDBSchema(self, desired=None):\n role = self.getRole(desired)\n schema = role[\"roleName\"]\n return schema", "def getSchema(cls):\n pass", "def table(self):\n return self.snowflake_options.table", "def schema() -> None:\n pass", "def get_schemas(self, conn):\n return conn.get_schemas()['table_schema']", "def get_schema() -> dict:\n raise NotImplementedError()", "def get_schema(self, repo, table):\n return self.user_con.get_schema(repo=repo, table=table)", "def schemas(self):\n if not self._schemas:\n self._schemas = get_schema(self.attributes.workspace.namespace, self.attributes.workspace.name)\n return self._schemas", "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "def get_schema(self):\n return ', '.join('%s:%s' % (col, self.schema[col]) for col in self.schema)", "def schemas(self):\n return model.Schemas(self)", "def database(self):\n return self.snowflake_options.database", "def schemas(self):\n return self.get_schemas()", "def sample_schema(self):\n if 'sample' not in self._schemas:\n logging.debug(f\"{self.id} - no schema? {self._schemas}\")\n return None\n return self._schemas['sample']", "def get_validation_schema(self):\n return self.validation_schema", "def get_schema(self):\n return ', '.join(\n '%s:%s' % (col, self.schema[col]) for col in self.schema)", "def source(self) -> List[StateSchema]:\n return self._source", "def _get_schema(self):\n self.to_dask()\n return Schema(dtype=self._df.dtypes,\n shape=(None, len(self._df.columns)),\n npartitions=self._df.npartitions,\n metadata=self.metadata)", "def schema(self):", "def schema_for_config(self) -> Dict[str, Any]:\n return self.rsimulator.schema_for_config()", "def get_schemas(self):\n query = mssqlqueries.get_schemas()\n logger.info(u'Schemas query: %s', query)\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def get_schema(self, engine, frame, name, keys=None):\n pandas_sql = SQLDatabase(engine, schema=None, meta=None)\n return pandas_sql._create_sql_schema(frame, name, keys=keys)", "def get_schema(schema): # noqa: E501\n return 'do some magic!'", "def schema_for_state(self) -> Dict[str, Any]:\n return self.rsimulator.schema_for_state()", "def get_default_schema(self):\n schema = self._connection.settings.get(\"schema\")\n if schema:\n res = (\n self.sql(_SELECT_SCHEMA_NAME_QUERY.format(escape(schema)))\n .execute()\n .fetch_all()\n )\n try:\n if res[0][0] == schema:\n return Schema(self, schema)\n except IndexError:\n raise ProgrammingError(\n f\"Default schema '{schema}' does not exists\"\n ) from None\n return None", "def _get_stored_schema(self, table: str) -> Optional[TableSchema]:\n try:\n with open(self.schemas / (table + '.json'), 'r') as f:\n return json.load(f)\n except FileNotFoundError:\n return None", "def source(self) -> Dict:\n return self._db_data.metadata[\"_source\"]", "def get_schema(db, sourcename):\n try:\n schema = db[\"tables\"][sourcename]\n schema[\"type\"] = constants.TABLE\n except KeyError:\n try:\n schema = db[\"views\"][sourcename]\n schema[\"type\"] = constants.VIEW\n except KeyError:\n raise ValueError(\"no such table/view\")\n return schema", "def get_schema() -> Dict[str, type]:\n schema: Dict[str, type] = {}\n\n # Add all columns from pipeline configs\n for pipeline in get_pipelines():\n schema.update(pipeline.schema)\n\n # Add new columns from adapter\n for col_old, col_new in OUTPUT_COLUMN_ADAPTER.items():\n if col_old in schema and col_new is not None:\n schema[col_new] = schema[col_old]\n\n return schema", "def source(self) -> XMLResource:\n return self.schema.source", "def schema_xml(self):\n return self.properties.get('SchemaXml', None)", "def get_local_schema(self, descriptor):\n return self._schemas[descriptor]['local']", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def schema(cls):\n schema = getattr(cls, \"__schema__\")\n if schema is None:\n raise Exception(f\"{cls.__name__}: not serializable; missing schema\")\n return schema", "def schema(self):\n schema_el = self.root.xpath(\n '/wsdl:definitions/wsdl:types/xsd:schema', namespaces=NS_MAP,\n )[0]\n return element_as_tree(schema_el)", "def JSONSchema(self, default=None):\n return self.data.get('metadata', {}).get('$schema', default)", "async def get_schema(self) -> AvroSchemaT:\n schema = None\n try:\n schema = await self._client.schema_by_topic(self._subject)\n except Exception:\n msg = f\"Could not retrieve schema for subject {self._subject}.\"\n raise SchemaException(msg)\n\n return schema", "def get_schemas(self):\n result = self.sql(\"SHOW DATABASES\").execute()\n return [row[0] for row in result.fetch_all()]", "def schema(self) -> Optional[TensorDatasetSchema]:\n try:\n features_schema = _infer_schema(self._features)\n targets_schema = None\n if self._targets is not None:\n targets_schema = _infer_schema(self._targets)\n return TensorDatasetSchema(features=features_schema, targets=targets_schema)\n except Exception as e:\n _logger.warning(\"Failed to infer schema for NumPy dataset. Exception: %s\", e)\n return None", "def get_schema_url(self):\n return self.NAME_TYPE_SCHEMAS.get(self.name_type, None)", "def schema(self):\n if self.key_type is None:\n return \"%s %s\" % (self.name, self.data_type)\n else:\n return \"%s %s %s KEY\" % (self.name, self.data_type, self.key_type)", "def get_schema_cls() -> t.Any:\n return None", "def subject_schema(self):\n return self.schemas.get(self.subject_property_name, None)", "async def get_schema(\n self, refresh: bool = False, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n # TODO: consider adding ttl logic for expiring schemas for long running services\n if self._schema is None or refresh:\n self._schema = await self.introspect(headers=headers)\n return self._schema", "def get_schema(self, name):\n return Schema(self, name)", "def schema(self, plan: pb2.Plan) -> StructType:\n logger.info(f\"Schema for plan: {self._proto_to_string(plan)}\")\n schema = self._analyze(method=\"schema\", plan=plan).schema\n assert schema is not None\n # Server side should populate the struct field which is the schema.\n assert isinstance(schema, StructType)\n return schema", "def get_details_schema(self) -> dict:\n return schemas.get_object_schema(self.details_schema)", "def getSchema(self, fp_id: int) -> Dict:\n # Identify the format\n formatquery = (\n \"\"\"select file_format from file_pattern_detail where fp_id = {0}\"\"\".format(\n fp_id\n )\n )\n formattype = self.getDataAsDict(formatquery)[0]\n if formattype[\"file_format\"] == \"DELIMITED\":\n schemaQuery = \"\"\"\n select col_name,\n col_datatype,\n col_ordinal,\n primary_key_pos,\n is_defining_col,\n is_audit_col\n from delimited_col_detail where fp_detail_id = {}\n \"\"\".format(\n fp_id\n )\n data = self.getDataAsDict(schemaQuery)\n return data", "def _schema_type(self) -> Optional[type]:\n return SeriesPanelMetaSchema", "def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))", "def get_schema_cls() -> t.Any:\n return SignupRequestSchema", "def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }", "def schema(self):\n return {\n # _id is the airport 'Code'\n 'name': { 'type': 'string', 'required': True},\n 'city': { 'type': 'string', 'nullable': True},\n 'state': { 'type': 'string', 'nullable': True},\n 'stateName':{ 'type': 'string', 'nullable': True},\n 'loc': { 'type': 'dict', 'schema': {\n 'type': {'type': 'string'},\n 'coordinates': {'type': 'list'}}, 'nullable': False},\n 'country': { 'type': 'integer', 'nullable': True},\n 'countryName': { 'type': 'string', 'nullable': True},\n 'globalRegion': { 'type': 'string', 'nullable': True},\n 'WAC': { 'type': 'integer', 'nullable': True},\n 'notes': { 'type': 'string', 'nullable': True}}", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def instance_schema(self):\n raise NotImplementedError", "def _get_schema(name):\n global SCHEMA\n\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = \"{}/{}.json\".format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n\n return SCHEMA.get(name)", "def get_schema(sid, did, scid):\n\n driver = get_driver(PG_DEFAULT_DRIVER)\n manager = driver.connection_manager(sid)\n conn = manager.connection(did=did)\n\n ver = manager.version\n server_type = manager.server_type\n\n # Fetch schema name\n status, schema_name = conn.execute_scalar(\n render_template(\n \"/\".join(['schemas',\n '{0}/#{1}#'.format(server_type, ver),\n 'sql/get_name.sql']),\n conn=conn, scid=scid\n )\n )\n\n return status, schema_name", "def schema(self):\n schema = \"%s %s %s %s('%s'\" % (self.name, self.data_type,\n self.index_type, self.key_type,\n self.index_name)\n if self.includes is not None:\n schema += ', ['\n schema += ', '.join((\"'%s'\" % i for i in self.includes))\n schema += ']'\n return schema + ')'", "def get_schemas(source: Path) -> Dict[str, Optional[dict]]:\n schema_cache = {}\n ext = '.schema'\n\n for filename in config.schemas:\n if filename.endswith(ext):\n schema_name = filename.rsplit(ext, maxsplit=1)[0]\n schema = validate_json_file(source / filename)\n schema_cache[schema_name] = schema\n if not isinstance(schema, dict):\n msg = 'BAD SCHEMA. A JSON schema must be of type \"dict\" (Python) / \"object\" (Javascript)'\n logger.write(filename, msg)\n\n return schema_cache", "def source(self):\n for source in self.coordinator.data.sources:\n if source.SourceID == self.zone.SourceID:\n return source.Name\n return None", "def getTableSchema(self, lsstLevel, dbName, tableName):\n return self._doRequest(self.httpClient.getTableSchema, lsstLevel, dbName, tableName)", "def get_model_schema(*, name: str) -> typing.Optional[types.Schema]:\n model = get_model(name=name)\n if model is None:\n return None\n return model._schema # pylint: disable=protected-access", "def _schema_type(self) -> Optional[type]:\n return PanelSchema", "def formatSchema(self):\n schema = json.loads(self.schemaSource)\n stack = [schema]\n # Strip out all the docs\n while len(stack) > 0:\n elm = stack.pop()\n if \"doc\" in elm:\n elm[\"doc\"] = \"\"\n for value in elm.values():\n if isinstance(value, dict):\n stack.append(value)\n elif isinstance(value, list):\n for dic in value:\n if isinstance(dic, dict):\n stack.append(dic)\n jsonData = json.dumps(schema)\n output = \"\\n\".join(textwrap.wrap(jsonData)) + \"\\n\"\n return output", "def config_schema() -> vol.Schema:\n return DISCOVERY_SCHEMA" ]
[ "0.7523006", "0.7474272", "0.731446", "0.72799426", "0.7248296", "0.72096664", "0.72077894", "0.7188622", "0.71739715", "0.71583384", "0.7152522", "0.71101445", "0.6935228", "0.68277", "0.6764784", "0.67362326", "0.67253757", "0.6720299", "0.67055655", "0.6627854", "0.66070616", "0.6565936", "0.6549127", "0.65442", "0.65237534", "0.65216213", "0.65009224", "0.6483633", "0.64834803", "0.6470147", "0.6455656", "0.64411", "0.64079034", "0.63735974", "0.63495964", "0.6339993", "0.633654", "0.63095665", "0.6309247", "0.62332267", "0.6214005", "0.61870253", "0.6160353", "0.6159445", "0.61471504", "0.6144849", "0.614215", "0.6111563", "0.6108518", "0.6104908", "0.6091509", "0.60822946", "0.60791975", "0.6073805", "0.6043102", "0.6005864", "0.60020787", "0.59708464", "0.595122", "0.5942502", "0.5929969", "0.59158635", "0.58951813", "0.58938074", "0.5875945", "0.58650285", "0.5838461", "0.58330303", "0.58247834", "0.58236164", "0.579599", "0.578599", "0.5779648", "0.57668966", "0.5755671", "0.5755477", "0.5753978", "0.57380885", "0.5734484", "0.5698478", "0.5656843", "0.5651311", "0.5650256", "0.564506", "0.56341654", "0.56259984", "0.5616584", "0.5610121", "0.558769", "0.55828303", "0.55768126", "0.55417264", "0.55406976", "0.55320483", "0.55079746", "0.5500487", "0.54985493", "0.5489346", "0.5482087", "0.54671806" ]
0.8667721
0
Returns the table of this snowflake source.
def table(self): return self.snowflake_options.table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTable(self):\n return self.table", "def _get_table(self):\n\t\treturn self._table", "def get_tablename(self):\n return self.ds_table", "def getTable(self):\n\n raise NotImplementedError", "def table(self):\n if not self.exists:\n return None\n return self._get_table()", "def table(self):\n return self.reference.table", "def getTable(self):\n return self.db.table(self.entity)", "def table(self):\n return self.generator.table", "def table(self):\n return self._table", "def table(self):\n return self._table", "def table(self):\r\n return self._table", "def table(self):\n return self._table_name", "def table(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"table\")", "def getTable(self, name: str):\n query = f\"SELECT * FROM '{name}';\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result", "def get_table_name(self):\n return self._table", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def __getTable(self):\n\n if not self.__table:\n tableConnectionParams = parseConnectionString(\n self.tableConnString);\n\n self.__table = Table(\n tableConnectionParams['name'],\n connection = getDbConnection(tableConnectionParams));\n\n return self.__table;", "def table(self, table_name):\n return self._get_storage().table(table_name)", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def table(self) -> 'outputs.PreventionJobTriggerInspectJobActionSaveFindingsOutputConfigTable':\n return pulumi.get(self, \"table\")", "def table(cls):\n return cls.__name__", "def get_target_table(self, source):\n target_tables = set()\n target_fields = [t[1] for t in self.mapping.items() if t[0].split('.')[0] == source]\n for f in target_fields:\n target_tables.update([c.split('.')[0] for c in f.keys()])\n self.target_tables = list(target_tables)\n return self.target_tables", "def getTableDefForTable(self, tableName):\n\t\tif not \".\" in tableName:\n\t\t\ttableName = \"public.\"+tableName\n\t\t\n\t\tfor row in self.readerConnection.queryToDicts(\n\t\t\t\t\"select sourcerd, tablename from dc.tablemeta where\"\n\t\t\t\t\" lower(tableName)=%(tableName)s\",\n\t\t\t\t{\"tableName\": tableName.lower()}):\n\t\t\tbreak\n\t\telse:\n\t\t\traise base.ui.logOldExc(\n\t\t\t\tbase.NotFoundError(tableName, \"table\", \"dc_tables\"))\n\n\t\treturn base.caches.getRD(row[\"sourcerd\"]\n\t\t\t).getById(row[\"tablename\"].split(\".\")[-1])", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table(self):\n if self._table is None:\n self._table = list(self._iter_rows())\n\n return self._table", "def table(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable':\n return pulumi.get(self, \"table\")", "def table(self):\n return self.t", "def getTable(self, tablename):\n tablename = self.prefix + tablename\n if not tablename in self.tables:\n self.tables[tablename] = Table( tablename, self.metadata, \\\n autoload=True, autoload_with=self.conn )\n\n return self.tables[tablename]", "def _select_table(self):\n\n return self.postgres.execute(f\"SELECT * FROM {self.table_name};\")", "def getTableByName(self, tablename):\n pass", "def return_astropy_table(self):\n table = Table()\n for name in self.hdfile.root.Data.Fibers.colnames:\n if hasattr(self, name):\n table[name] = getattr(self, name)\n\n return table", "def return_astropy_table(self):\n table = Table()\n for name in self.hdfile.root.Data.Fibers.colnames:\n if hasattr(self, name):\n table[name] = getattr(self, name)\n\n return table", "def _get_table(self, cursor):\n raise NotImplementedError", "def table(self, name: str) -> ir.TableExpr:\n qualified_name = self._qualify(name)\n return self.client.table(qualified_name, self.name)", "def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]", "def table_name(self) -> str:\n return \"OLTP\"", "def rawtable(self):\n return self.__rawtable", "def holoviews_table(self) -> holoviews.Table:\n if self._holoviews_table is None:\n self._holoviews_table = holoviews.Table(self.table)\n return self._holoviews_table", "def _get_table_obj(self, mode):\n return self.client[f\"bigquery_{mode}\"].get_table(self.table_full_name[mode])", "def table_name(self) -> str:\n return self.model._meta.db_table", "def table_name() -> str:\n pass", "def get_table_data(self):\n return self.table_data", "def table(self, name):\r\n if name in self._tables:\r\n return _tables[name]\r\n\r\n table = Table(name, self._storage)", "def get_table(self):\n return copy.deepcopy(self._table)", "def table(entity) -> sa.Table:\n return entity.__table__", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def table(self, name: str, database: str | None = None) -> ir.Table:\n alch_table = self._get_sqla_table(name, schema=database)\n node = self.table_class(source=self, sqla_table=alch_table)\n return self.table_expr_class(node)", "def get_table(base, engine):\n class w1_temp_table(base):\n __tablename__ = 'w1_temp'\n __table_args__ = {\"useexisting\": True}\n\n id = sa.Column(sa.types.Integer, primary_key=True)\n logger_id = sa.Column(sa.types.Integer)\n value = sa.Column(sa.types.String)\n datetime = sa.Column(sa.types.DateTime)\n return w1_temp_table", "def create_table(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"create_table\")", "def create_table(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"create_table\")", "def schema(self):\n return self.snowflake_options.schema", "def getSourceName(self, instance):\n mapping = IAnnotations(instance).setdefault(\n 'collective.table',\n PersistentMapping()\n )\n return mapping.get('source_name', self.defaultSourceName)", "def parse_single_table(source, **kwargs):\n if kwargs.get(\"table_number\") is None:\n kwargs[\"table_number\"] = 0\n\n votable = parse(source, **kwargs)\n\n return votable.get_first_table()", "def get_my_tables(self):\n qnum = self.master('sql', att={'type': 'table'}) # it's a Table._call_() function call\n if self.run():\n return (self.table_factory(self.get_table_info(result[0])) for result in self.results[qnum])\n else:\n print('An error has occurred when initializing the database.')", "def schema(self):\n return self.table_info.schema", "def creates_table(self):\n return self.statements[0].creates_table()", "def table_name(self):\n return self._new_table.name", "def source(self) -> Dict:\n return self._db_data.metadata[\"_source\"]", "def tables(self):\n if self.table is None:\n raise GiraffeError(\"Target table has not been set.\")\n return [\n \"{}_wt\".format(self.table),\n \"{}_log\".format(self.table),\n \"{}_e1\".format(self.table),\n \"{}_e2\".format(self.table),\n ]", "def table_reference(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference':\n return pulumi.get(self, \"table_reference\")", "def tables(self):\n result = self.execute(self.commands.get_tables(self.name))\n return [x[0] for x in result]", "def table(self):\n table = ExpressionProfile.__profile_to_table(json.loads(self.profile))\n\n return table", "def get_master_table(self):\n return Table('sqlite_master', 'name', self, {'type', 'name', 'tbl_name', 'rootpage', 'sql'})", "def load_source_table():\n src_file = os.path.join(data.Paths.arclines, 'sources', 'arcline_sources.ascii')\n # Load\n sources = Table.read(src_file, format='ascii.fixed_width', comment='#')\n # Return\n return sources", "def table(self, name):\r\n return NamedTable(self.name, name)", "def table_reference(self) -> 'outputs.TableReferenceResponse':\n return pulumi.get(self, \"table_reference\")", "def list_tables(self):\n return LIST_TABLES(db=self.db)", "def get_table(table_id: int) -> Table:\n table = Table.query.filter_by(id=table_id).first()\n return table", "def _get_tables(self) -> pd.DataFrame:\n return self.server._execute_extract(\n \"SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = '{}'\".format(\n self.name\n )\n )", "def table(self, table_name):\n return self._create_table(table_name)", "def get_transit_table():\n # from Seader et al.\n # \"The search includes a total of $198,646$ targets,\n # of which $112,001$ were observed in every quarter\n # and $86,645$ were observed in a subset of the 17 quarters.\"\n import astropy.io.ascii as ascii\n import pkgutil\n global transit_table\n if transit_table is None:\n logger.info(\"Reading Kepler TCE table\")\n transit_table = ascii.read(pkgutil.get_data(__name__, 'data/keplerTCE_DR24.txt'))\n return transit_table", "def get_tables(self):\n return self._get_types_from_default_ns(Table)", "def tables(self):\n return self.properties.get('tables',\n WorkbookTableCollection(self.context, ResourcePath(\"tables\", self.resource_path)))", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def tables(self) -> list:\n return self.list_tables()", "def get_tables(self):\n return list(self._metadata['tables'].keys())", "def read_table(self, table):\n return READ_TABLE(table, db=self.db)", "def _table_path(self):\n return self._switch.path_on_odl + \"flow-node-inventory:table/%d/\" % self._table_id", "def getTables(self):\n\treturn self.dbNames", "def get_table_byname(self, aTable):\n if aTable in self._tablesObjects.keys():\n oTable = self._tablesObjects[aTable]\n else:\n oTable = None\n return oTable", "def createTable(self):\n ## reading the source file\n\n \n ## building the hive script\n\n ## creating the metastore table by executing the Hive script on the remote machine (SSH)", "def tableName():\n return \"people\"", "def getTAPTables(self):\n\t\treturn [r[\"tablename\"] for r in\n\t\t\tself.readerConnection.queryToDicts(\n\t\t\t\t\"select tablename from dc.tablemeta where adql\")]", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "def do_get_table(tpath):\n table = provider.get_type_table(tpath)\n try:\n runs = run.split('-')\n ass = provider.get_assignment(tpath, runs[0], var)\n except:\n print \"no entry found\"\n return\n sys.stdout.write(ass.constant_set.vault)", "def chart_data_table(self):\n return self.container['chart_data_table']", "def get(self):\n return {\"tables\": public_tables}", "def _get_table_reflection(self, schema: str, table: str) -> Table:\n return self.sql_metadata.tables.get(f\"{schema}.{table}\",\n Table(table, self.sql_metadata, schema=schema, autoload=True))", "def getTable(table):\n\n return session.query(table).all()", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def getTableByIndex(self, index):\n pass", "def get_reliability_table_for_source(source):\n dq_file = 'DQ_Reliability_Scores_Table3-3fromERGreport.csv'\n df = (pd.read_csv(DATA_PATH.joinpath(dq_file),\n usecols=['Source', 'Code', 'DQI Reliability Score'])\n .query('Source == @source')\n .reset_index(drop=True)\n .drop(columns='Source'))\n return df", "def source(self):\n for source in self.coordinator.data.sources:\n if source.SourceID == self.zone.SourceID:\n return source.Name\n return None", "def sensorsTableName(self):\n return 'sensors'", "def show_data_table(self):\n return self.container['show_data_table']", "def table_id(self) -> str:\n return pulumi.get(self, \"table_id\")", "def show_tables(self) -> List[str]:\n return list(self.tb.keys())", "def get_table_attribute_from_base_class(self, source_table_name: str):\n return getattr(self.connection.base.classes, source_table_name)" ]
[ "0.7333813", "0.72519577", "0.7144599", "0.7141145", "0.6996592", "0.6968488", "0.6951184", "0.6948223", "0.69295055", "0.69295055", "0.68013984", "0.6795885", "0.674512", "0.66221476", "0.65775824", "0.6573351", "0.6545", "0.65317136", "0.6509364", "0.6500206", "0.6481945", "0.6466261", "0.64085484", "0.640222", "0.640222", "0.640222", "0.6378164", "0.636884", "0.634804", "0.63110393", "0.6240701", "0.622321", "0.6211897", "0.6211897", "0.6169696", "0.6169535", "0.6150819", "0.6142454", "0.6138414", "0.61200815", "0.6071805", "0.60537684", "0.6030443", "0.60277086", "0.60220903", "0.60148704", "0.6011849", "0.5947498", "0.5938555", "0.5926822", "0.5917158", "0.5917158", "0.5907627", "0.5902087", "0.5881717", "0.5879901", "0.5868613", "0.5865599", "0.5855684", "0.5855388", "0.5842517", "0.5839202", "0.5826741", "0.5822512", "0.5820844", "0.58073676", "0.580439", "0.5797312", "0.5791422", "0.57738173", "0.5773226", "0.5759734", "0.5728471", "0.5727682", "0.57151395", "0.5700794", "0.5699678", "0.56858313", "0.5669729", "0.56674117", "0.56668806", "0.5654746", "0.5642949", "0.563544", "0.56334573", "0.5624433", "0.56172127", "0.5590001", "0.5579936", "0.5579607", "0.55700886", "0.55586565", "0.55563384", "0.55454504", "0.554046", "0.5533113", "0.5530029", "0.5528438", "0.55282795", "0.55220366" ]
0.8042992
0
Returns the snowflake options of this snowflake source.
def query(self): return self.snowflake_options.query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_options(self):\n\t\treturn self.options", "def options(self):\r\n return self._options", "def options(self):\n return self.__options", "def options(self) -> Mapping[str, str]:\n return pulumi.get(self, \"options\")", "def _get_options(self):\n return self.options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def get_options(self):\n return []", "def _options(self):\n return", "def get_options(self):\r\n return self._option_values", "def options(self): # pragma: no cover\r\n return ''", "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def options(self):\n return list(self._moptions.keys())", "def get_options(self):\n return self._scoped_options", "def options(self) -> Union[object, Sequence]:\n return self._options", "def options(self):\n\t\treturn self.config_parser.options(self.section_name)", "def hive_options(self):\n return self._hive_options", "def _get_options(self) -> Dict[str, Any]:\n # TODO: handle holidays as well\n return {\n \"growth\": self.growth,\n \"changepoints\": self.changepoints and list(self.changepoints.astype('str')),\n \"n_changepoints\": self.n_changepoints,\n \"changepoint_range\": self.changepoint_range,\n \"changepoint_prior_scale\": self.changepoint_prior_scale,\n \"mcmc_samples\": self.mcmc_samples,\n \"interval_width\": self.interval_width,\n \"uncertainty_samples\": self.uncertainty_samples,\n \"yearly_seasonality\": self.yearly_seasonality,\n \"weekly_seasonality\": self.weekly_seasonality,\n \"daily_seasonality\": self.daily_seasonality,\n \"seasonality_mode\": self.seasonality_mode,\n \"seasonality_prior_scale\": self.seasonality_prior_scale,\n\n \"seasonalities\": self.seasonalities,\n \"extra_regressors\": self.extra_regressors\n }", "def opt_in(self) -> List[str]:\n return self.raw_config.get(\"opt_in\", [])", "def get_all_options(self): \n return self._options.items()", "async def get_options(self):", "def options(self) -> list[str] | None:\n if hasattr(self, \"_attr_options\"):\n return self._attr_options\n if hasattr(self, \"entity_description\"):\n return self.entity_description.options\n return None", "def schema(self):\n return self.snowflake_options.schema", "def options(self):\n options = {\n o.name: getattr(self, o.name)\n for o in _OPTIONS\n }\n return options", "def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args", "def common_options(self):\n return self._common_options", "def get_options(cls):\n return {\n \"name\": str,\n ConfigOption(\"install_files\", default=None): Or(None, list),\n ConfigOption(\"timeout\", default=300): int,\n ConfigOption(\"log_regexps\", default=None): Or(None, list),\n ConfigOption(\"stdout_regexps\", default=None): Or(None, list),\n ConfigOption(\"stderr_regexps\", default=None): Or(None, list),\n ConfigOption(\"file_logger\", default=None): Or(None, str),\n ConfigOption(\"async_start\", default=False): bool,\n ConfigOption(\"report_errors_from_logs\", default=False): bool,\n ConfigOption(\"error_logs_max_lines\", default=10): int,\n ConfigOption(\"path_cleanup\", default=True): bool,\n ConfigOption(\"pre_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"pre_stop\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_stop\", default=None): validate_func(\"driver\"),\n }", "def get_simulation_options(self):\n return self.opts", "def list(self):\n return self._options", "def options(self): # 获取火车票查询选项 ex: iquary -dgktz 上海 北京 返回dgktz\n arg = self.get(0) # -dgktz\n if arg.startswith('-') and not self.is_asking_for_help:\n return arg[1:] # dgktz\n return ''.join(x for x in arg if x in 'dgktz')", "def options(self, scriptable=None):\n options = list(Insert.KIND_OPTIONS.get(self.kind, []))\n if scriptable:\n if self.kind == 'var':\n options += scriptable.variables.keys()\n options += scriptable.project.variables.keys()\n elif self.kind == 'list':\n options += scriptable.lists.keys()\n options += scriptable.project.lists.keys()\n elif self.kind == 'costume':\n options += [c.name for c in scriptable.costumes]\n elif self.kind == 'backdrop':\n options += [c.name for c in scriptable.project.stage.costumes]\n elif self.kind == 'sound':\n options += [c.name for c in scriptable.sounds]\n options += [c.name for c in scriptable.project.stage.sounds]\n elif self.kind in ('spriteOnly', 'spriteOrMouse', 'spriteOrStage',\n 'touching'):\n options += [s.name for s in scriptable.project.sprites]\n elif self.kind == 'attribute':\n pass # TODO\n elif self.kind == 'broadcast':\n options += list(set(scriptable.project.get_broadcasts()))\n return options", "def options(self) -> list[str]:\n return [BYPASS, ARMED]", "def get_options(self):\n option_list = []\n if self.can_analyze():\n option_list.append((EpOp.TASK_ANALYZE, None))\n\n option_tup = self.predict_option()\n if option_tup:\n option_list.append(option_tup)\n\n option_tup = self.check_option()\n if option_tup:\n option_list.append(option_tup)\n\n return option_list", "def getServerOptions(self):\n pass", "def _opt_config(self):\n return self._opt_method.config", "def create_options(self):\n return []", "def options():\n mappings = {key: key for key in Schema.option_columns}\n return Schema(mappings)", "def options(self):\n pclass_options = self.get_product_class().options.all()\n return pclass_options | self.product_options.all()", "def _get_options(ret):\n attrs = {\"host\": \"host\", \"port\": \"port\", \"skip\": \"skip_on_error\", \"mode\": \"mode\"}\n\n _options = salt.returners.get_returner_options(\n __virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__\n )\n return _options", "async def get_options(self) -> List[Tuple[str, str]]:\n options = [\n (\"TRUE\", \"true\"),\n (\"FALSE\", \"false\"),\n ]\n if self.context.get(\"null\"):\n options.insert(0, (\"\", \"\"))\n\n return options", "def _options(self):\r\n xmi_file = self.tb_xmi_file_name.GetValue()\r\n topic = self.tb_pragma.GetValue()\r\n package = self.tb_package.GetValue()\r\n header = self.tb_file_header.GetValue()\r\n target_folder = self.tb_target_folder.GetValue()\r\n encoding = self.tb_encoding.GetValue()\r\n \r\n return {\"topic\" : topic, \r\n \"package\" : package, \r\n \"header\" : header, \r\n \"target_folder\" : target_folder,\r\n \"encoding\" : encoding,\r\n \"xmi_file\" : xmi_file}", "def options(self) -> tuple: # UserOptions\n return self._options", "def get_options():\r\n usage = \"usage: %prog -s SOURCE -o OUTDIR -t SOURCETYPE [folder|image] [-c CASENAME -q REPORT_QUERIES]\"\r\n options = OptionParser(usage=usage)\r\n options.add_option(\"-s\",\r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"source\",\r\n default=False,\r\n help=\"REQUIRED. The source directory or image containing fsevent files to be parsed\")\r\n options.add_option(\"-o\",\r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"outdir\",\r\n default=False,\r\n help=\"REQUIRED. The destination directory used to store parsed reports\")\r\n options.add_option(\"-t\",\r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"sourcetype\",\r\n default=False,\r\n help=\"REQUIRED. The source type to be parsed. Available options are 'folder' or 'image'\")\r\n options.add_option(\"-c\",\r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"casename\",\r\n default=False,\r\n help=\"OPTIONAL. The name of the current session, \\\r\n used for naming standards. Defaults to 'FSE_Reports'\")\r\n options.add_option(\"-q\",\r\n action=\"store\",\r\n type=\"string\",\r\n dest=\"report_queries\",\r\n default=False,\r\n help=\"OPTIONAL. The location of the report_queries.json file \\\r\n containing custom report queries to generate targeted reports.\"\r\n )\r\n\r\n # Return options to caller #\r\n return options", "def get_options(cls, player, context={}):\n\t\traise NotImplementedError()", "def getclsoptions(cls, tmpcls, session=None):\n if session is not None:\n cls.setclsoptions(tmpcls, session)\n return optionsdict[tmpcls]['OPTIONS']", "def options(self):\n if self._ast:\n for option in self._ast[1]:\n yield option", "def datastore_options(self) -> Optional['outputs.PreventionJobTriggerInspectJobStorageConfigDatastoreOptions']:\n return pulumi.get(self, \"datastore_options\")", "def settable(self):\r\n return Options([i for i in list(self.items())\r\n if i[0] in Options.versatileOptions()])", "def sqlalchemy_sft(self):\n return Config.DB_SFT_OPTIONS.copy()", "def get_default_options():\n out = _SFrame({'name': ['method', 'feature_model', 'verbose'],\n 'default_value' : ['lsh', 'auto', 'True'],\n 'lower_bound': [None, None, 0],\n 'upper_bound': [None, None, 1],\n 'description': ['Method for searching reference data',\n 'Trained model for extracting features from raw data objects',\n 'Whether progress output is printed'],\n 'parameter_type': ['string', 'model', 'boolean']})\n\n return out", "def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out", "def get_switching_options(self):\n\n return self._switch_opt_infos.iterkeys()", "def optionHelp(self):\n return {}", "def get_options():\n user_options = {}\n user_options['surface'] = {'label': 'Surface',\n 'type': 'stringList',\n 'default': 'bcc100',\n 'values': surface_selections}\n\n user_options['metal'] = {'label': 'Metal',\n 'type': 'string',\n 'default': 'Au'}\n\n user_options['a'] = {'label': 'Lattice Constant',\n 'type': 'float',\n 'precision': 3,\n 'suffix': 'Å'}\n\n user_options['size-x'] = {'label': 'Size X',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-y'] = {'label': 'Size Y',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-z'] = {'label': 'Size Z',\n 'type': 'integer',\n 'default': 3}\n\n user_options['vacuum'] = {'label': 'Vacuum distance',\n 'type': 'float',\n 'precision': 1,\n 'suffix': 'Å'}\n\n user_options['orthogonal'] = {'label': 'Orthogonal',\n 'type': 'stringList',\n 'default': 'True',\n 'values': ['True', 'False']}\n\n return {'userOptions': user_options }", "def get_runcompss_opts(self):\n return self.runcompss_opts", "def getOptionQueries(self):\n return self.getOptionQueriesObject().values()", "def get_options(self):\n return (\n Option('-H', '--host',\n dest='host',\n default=self.host,\n help='IP address or hostname of the Glancesync server.'),\n\n Option('-p', '--port',\n dest='port',\n type=int,\n default=self.port,\n help='Port in which the GlanceSync server is running'),\n\n Option('-w', '--workers',\n dest='workers',\n type=int,\n default=self.workers,\n help='Number of concurrent workers to be launched, usually 2*core numbers+1.'),\n )", "def help_opt(self):\n print(OPTIONS)", "def define_options(self):\n return {\n 'basename': OptionDef(required=True, default_value='keycloak', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='default', allowed_types=[str]),\n 'config': {\n 'service_port': OptionDef(required=True, default_value=8080, allowed_types=[int]),\n 'realm_import': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, bytes, KData_Secret]),\n 'proxy_address_forwarding': OptionDef(format=OptionDefFormat.KDATA_ENV,\n allowed_types=[bool, *KDataHelper_Env.allowed_kdata()]),\n 'frontend_url': OptionDef(allowed_types=[str]),\n 'admin': {\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n 'db': {\n 'vendor': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'addr': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'port': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[int, *KDataHelper_Env.allowed_kdata()]),\n 'database': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'schema': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n },\n 'container': {\n 'keycloak': OptionDef(required=True, default_value='quay.io/keycloak/keycloak:11.0.2', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'deployment': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def evasion(self):\n return self._eoptions", "def get_options(self, key):\n if key in self.options.get_option_names():\n return self.options\n\n try:\n scope, scoped_key = key.split('.')\n except ValueError:\n return None\n\n if scope == 'input' and scoped_key in self.input.options.get_option_names():\n return self.input.options\n elif scope == 'output' and scoped_key in self.output.options.get_option_names():\n return self.output.options\n elif scope == 'exploit' and scoped_key in self.exploit.options.get_option_names():\n return self.exploit.options\n else:\n return None", "def snowflake(self) -> Optional[pulumi.Input['ConnectorProfileConnectorProfileConfigConnectorProfilePropertiesSnowflakeArgs']]:\n return pulumi.get(self, \"snowflake\")", "def bigquery_options(self) -> 'outputs.BigQueryOptionsResponse':\n return pulumi.get(self, \"bigquery_options\")", "def get_options(self):\n options = dict()\n while True:\n line = self.rfile.readline().decode(\"utf8\").strip()\n if not line:\n break\n self.log.debug(\"Got line: %s\", line)\n if \":\" not in line:\n self.log.debug(\"Invalid option: %s\", line)\n error_msg = \"header not in 'Name: value' format\"\n raise oa.errors.InvalidOption(error_msg)\n name, value = line.split(\":\", 1)\n options[name.lower()] = value.strip()\n return options", "def supported_options(self):\n return ['person','filename','speed','pitch','volume']", "def getDataPointOptions(self):\n return [(dp.name(), dp.name())\n for dp in self.rrdTemplate.getRRDDataPoints()]", "def objective_options(self):\n return Optimizer.list_method_options(self.obj_creator.method_dict)", "def source_configuration(self) -> pulumi.Input['ServiceSourceConfigurationArgs']:\n return pulumi.get(self, \"source_configuration\")", "def salt_opts(self):\n if self._salt_opts is None:\n salt_conf = self.config.settings.get('salt.master_config',\n '/etc/salt/master')\n self._salt_opts = salt.config.master_config(salt_conf)\n return self._salt_opts", "def snowflake(self) -> Optional[pulumi.Input['FlowDestinationFlowConfigDestinationConnectorPropertiesSnowflakeArgs']]:\n return pulumi.get(self, \"snowflake\")", "def source_configuration(self) -> Optional[pulumi.Input['ServiceSourceConfigurationArgs']]:\n return pulumi.get(self, \"source_configuration\")", "def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)", "def options() -> List:\n return list(c.value for c in Plugin)", "def build_options(self):\n opts = [\n \"-k rpm.rpmva=off\",\n \"-k apache.log=True\",\n ]\n\n sensitive_keys = {\n self._engine_plugin: 'sensitive_keys',\n 'ovirt_engine_dwh': 'dwh_sensitive_keys',\n }\n if self.configuration['include_sensitive_data']:\n for plugin in sensitive_keys:\n self.configuration[sensitive_keys[plugin]] = ':'\n\n for plugin in sensitive_keys:\n if self.configuration.get(sensitive_keys[plugin]):\n opts.append(\n '-k {plugin}.sensitive_keys={keys}'.format(\n plugin=plugin,\n keys=self.configuration.get(sensitive_keys[plugin]),\n )\n )\n\n if self.configuration.get(\"ticket_number\"):\n opts.append(\n \"--ticket-number=%s\" % self.configuration.get(\"ticket_number\")\n )\n\n if self.sos_version < '30':\n opts.append('--report')\n\n if self.configuration.get(\"log_size\"):\n opts.append(\n \"--log-size=%s\" %\n self.configuration.get('log_size')\n )\n else:\n if self.sos_version < '30':\n opts.append('--report')\n opts.append(\"-k general.all_logs=True\")\n elif self.sos_version < '32':\n opts.append(\"-k logs.all_logs=True\")\n else:\n opts.append(\"--all-logs\")\n\n if self.configuration.get(\"upload\"):\n opts.append(\"--upload=%s\" % self.configuration.get(\"upload\"))\n return \" \".join(opts)", "def get_options(self, panel=\"\"):\n return dict()", "def _GclStyleSettings(self):\n settings = {\n 'port': self.GetCodeReviewSetting('TRYSERVER_HTTP_PORT'),\n 'host': self.GetCodeReviewSetting('TRYSERVER_HTTP_HOST'),\n 'svn_repo': self.GetCodeReviewSetting('TRYSERVER_SVN_URL'),\n 'gerrit_url': self.GetCodeReviewSetting('TRYSERVER_GERRIT_URL'),\n 'git_repo': self.GetCodeReviewSetting('TRYSERVER_GIT_URL'),\n 'project': self.GetCodeReviewSetting('TRYSERVER_PROJECT'),\n # Primarily for revision=auto\n 'revision': self.GetCodeReviewSetting('TRYSERVER_REVISION'),\n 'root': self.GetCodeReviewSetting('TRYSERVER_ROOT'),\n 'patchlevel': self.GetCodeReviewSetting('TRYSERVER_PATCHLEVEL'),\n }\n logging.info('\\n'.join(['%s: %s' % (k, v)\n for (k, v) in settings.iteritems() if v]))\n for (k, v) in settings.iteritems():\n # Avoid overwriting options already set using command line flags.\n if v and getattr(self.options, k) is None:\n setattr(self.options, k, v)", "def metric_options(self):\n return Optimizer.list_method_options(self.metric_creator.method_dict)", "def options(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'options', api_path, *args, **kwargs)", "def get_options(cls):\n for option in cls._general_options.items():\n yield option\n for option in cls._specific_options.items():\n yield option", "def _getoptions():\n parser = OptionParser()\n parser.add_option(\"-f\", \"--dwca_file\", dest=\"dwca_file\",\n help=\"Darwin Core Archive file\",\n default=None)\n return parser.parse_args()[0]", "def get_extension_options(self):\n options = []\n for extension in self.extensions:\n extension_options = getattr(extension, \"OPTIONS\", None)\n if extension_options:\n options.extend(extension_options)\n return options", "def get_options(self, key):\n # Get the set of options unique to the Workflow data model\n if key not in self.__workflow_options:\n raise KeyError(\"Key `{}` not understood.\".format(key))\n\n return copy.deepcopy(getattr(self.data, key))", "def getOptionsNames(self) -> List[unicode]:\n ...", "def _get_default_options():\n return {\n \"library_folders\": [],\n \"verbose\": False,\n \"check_balanced\": True,\n \"mtime_check\": True,\n \"cache\": False,\n \"codegen\": False,\n \"expand_mx\": False,\n \"unroll_loops\": True,\n \"inline_functions\": True,\n \"expand_vectors\": False,\n \"resolve_parameter_values\": False,\n \"replace_parameter_expressions\": False,\n \"replace_constant_expressions\": False,\n \"eliminate_constant_assignments\": False,\n \"replace_parameter_values\": False,\n \"replace_constant_values\": False,\n \"eliminable_variable_expression\": None,\n \"factor_and_simplify_equations\": False,\n \"detect_aliases\": False,\n \"allow_derivative_aliases\": True,\n \"reduce_affine_expression\": False,\n }", "def database(self):\n return self.snowflake_options.database", "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def option_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentOptionSettingArgs']]]]:\n return pulumi.get(self, \"option_settings\")", "def constraint_options(self):\n return Optimizer.list_method_options(self.const_creator.method_dict)", "def cloud_storage_options(self) -> Optional['outputs.PreventionJobTriggerInspectJobStorageConfigCloudStorageOptions']:\n return pulumi.get(self, \"cloud_storage_options\")", "def list_opts():\n return _make_opt_list([OPTS], 'tvdb')", "def get_current_options(self):\n return {k: self._state[k] for k in get_default_options()['name']}", "def option_settings(self) -> pulumi.Output[Optional[Sequence['outputs.EnvironmentOptionSetting']]]:\n return pulumi.get(self, \"option_settings\")", "def runoptions(self):\n # outstanding = self.missing_required()\n # if outstanding:\n # raise TypeError('Module missing required parameter: %s' % ', '.join(outstanding))\n return self._runopts", "def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))", "def options(self) -> list[str]:\n # If the translation key is \"zone_sleep\", we need to translate\n # the options to make them compatible with Home Assistant\n if self.translation_key == \"zone_sleep\":\n return list(STATE_ZONE_SLEEP_MAPPING)\n return list(self.capability.options.values())", "def big_query_options(self) -> Optional['outputs.PreventionJobTriggerInspectJobStorageConfigBigQueryOptions']:\n return pulumi.get(self, \"big_query_options\")", "def options_by_name(self):\n pass" ]
[ "0.7124274", "0.7121195", "0.70052445", "0.7004528", "0.70010746", "0.6985663", "0.6985663", "0.6985663", "0.6985663", "0.6985663", "0.67881876", "0.6759905", "0.6664762", "0.65246856", "0.64745015", "0.64547145", "0.64213645", "0.63637906", "0.6305649", "0.62842727", "0.6252382", "0.6210289", "0.6183034", "0.61684126", "0.61589503", "0.61026824", "0.60976124", "0.60454845", "0.60335046", "0.60267663", "0.59601897", "0.59565485", "0.59533554", "0.59507775", "0.59392154", "0.59363425", "0.59343624", "0.5918573", "0.5908001", "0.58953995", "0.5890182", "0.58899534", "0.5878792", "0.58765066", "0.5866309", "0.5856504", "0.585177", "0.58377737", "0.58113015", "0.57995987", "0.5785615", "0.57632565", "0.5745421", "0.5733751", "0.5717565", "0.56992435", "0.5691923", "0.56878906", "0.56817114", "0.5677918", "0.56770045", "0.5673824", "0.5667207", "0.56664944", "0.56517154", "0.563321", "0.55870926", "0.5583126", "0.557643", "0.55750066", "0.55737174", "0.55732936", "0.55714965", "0.5564877", "0.5548273", "0.55402684", "0.553896", "0.5537377", "0.5537149", "0.55367625", "0.55350816", "0.5528306", "0.55206317", "0.5509557", "0.55062336", "0.5505228", "0.5500807", "0.5488384", "0.5472116", "0.5468009", "0.5461476", "0.54579145", "0.5450803", "0.5427066", "0.5414293", "0.54121554", "0.5404845", "0.5402384", "0.53993577", "0.5396514" ]
0.59917724
30
Converts a SnowflakeSource object to its protobuf representation.
def to_proto(self) -> DataSourceProto: data_source_proto = DataSourceProto( type=DataSourceProto.BATCH_SNOWFLAKE, field_mapping=self.field_mapping, snowflake_options=self.snowflake_options.to_proto(), ) data_source_proto.event_timestamp_column = self.event_timestamp_column data_source_proto.created_timestamp_column = self.created_timestamp_column data_source_proto.date_partition_column = self.date_partition_column return data_source_proto
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_proto(data_source: DataSourceProto):\n return SnowflakeSource(\n field_mapping=dict(data_source.field_mapping),\n database=data_source.snowflake_options.database,\n schema=data_source.snowflake_options.schema,\n table=data_source.snowflake_options.table,\n event_timestamp_column=data_source.event_timestamp_column,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n query=data_source.snowflake_options.query,\n )", "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def to_proto(self) -> None:\n\n pass", "def FromProto(cls, proto_obj):\n source = GameSource()\n source.type = proto_obj.type\n if proto_obj.update_time_utc_str:\n source.update_date_time = datetime.strptime(\n proto_obj.update_time_utc_str, tweets.DATE_PARSE_FMT_STR)\n else:\n source.update_date_time = datetime.now()\n if proto_obj.twitter_account:\n source.account_id = long(proto_obj.twitter_account.id_str)\n source.tweet_text = proto_obj.tweet_text\n if proto_obj.score_reporter_url:\n source.score_reporter_url = proto_obj.score_reporter_url\n if not (source.account_id or source.score_reporter_url):\n raise GameModelError('Converting GameSource from malformed proto')\n return source", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def get_source_unicode(obj):\n return inspect.getsource(obj).decode(get_encoding(obj))", "def to_proto(self) -> FeatureSetReferenceProto:\n return self.proto", "def _object2proto(self) -> Metadata_PB:\n return Metadata_PB(\n name=self.name, id=serialize(self.id), node=serialize(self.node)\n )", "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n return StorableObject_PB", "def to_proto(self):\n filename_tensor = array_ops.placeholder(\n shape=[], dtype=dtypes.string, name=\"saver_filename\")\n save_tensor = self._traced_save(filename_tensor)\n restore_op = self._traced_restore(filename_tensor).op\n return saver_pb2.SaverDef(\n filename_tensor_name=filename_tensor.name,\n save_tensor_name=save_tensor.name,\n restore_op_name=restore_op.name,\n version=saver_pb2.SaverDef.V2)", "def to_proto(self) -> FeatureSetProto:\n\n meta = FeatureSetMetaProto(\n created_timestamp=self.created_timestamp, status=self.status\n )\n\n spec = FeatureSetSpecProto(\n name=self.name,\n project=self.project,\n max_age=self.max_age,\n labels=self.labels,\n source=self.source.to_proto() if self.source is not None else None,\n features=[\n field.to_proto()\n for field in self._fields.values()\n if type(field) == Feature\n ],\n entities=[\n field.to_proto()\n for field in self._fields.values()\n if type(field) == Entity\n ],\n )\n\n return FeatureSetProto(spec=spec, meta=meta)", "def toStr(self, protoObj):\n return text_format.MessageToString(protoObj)", "def toStr(self, protoObj):\n return text_format.MessageToString(protoObj)", "def _stringify_proto(obj):\n return obj.SerializeToString()", "def make(self, source):\n if isinstance(source, str):\n return copy(self.get(source))\n elif self.PB_CLASS and isinstance(source, self.PB_CLASS):\n item = copy(self.get(source.name))\n item._pb = source\n return item\n else:\n return copy(source)", "def get_source_unicode(obj):\n return inspect.getsource(obj)", "def _object2proto(self) -> GetGroupsMessage_PB:\n return GetGroupsMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def ToProto(self):\n game = scores_messages.Game()\n game.id_str = self.id_str\n game.teams = [team.ToProto() for team in self.teams]\n game.scores = self.scores\n game.name = self.name\n game.tournament_id_str = self.tournament_id\n game.tournament_name = self.tournament_name\n game.game_status = self.game_status\n game.division = self.division\n game.league = self.league\n game.age_bracket = self.age_bracket\n if self.sources:\n game.last_update_source = self.sources[0].ToProto()\n return game", "def _proto_to_string(self, p: google.protobuf.message.Message) -> str:\n return text_format.MessageToString(p, as_one_line=True)", "def _object2proto(self) -> UpdateGroupMessage_PB:\n return UpdateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _get_source_str(obj):\n # type: (Union[str, Source, Document]) -> str\n if isinstance(obj, str):\n source_str = obj\n elif isinstance(obj, Source):\n source_str = obj.body\n elif isinstance(obj, Document):\n source_str = obj.loc.source.body\n else:\n source_str = \"\"\n # remove new lines, tabs and extra whitespace from source_str\n return re.sub(r\"\\s+\", \" \", source_str).strip()", "def get_source_schema(cls) -> dict:\n source_schema = get_base_schema(\n root=True,\n id_=\"source.schema.json\",\n title=\"Source data schema\",\n description=\"Schema for the source data, files and directories\",\n version=\"0.1.0\",\n )\n for interface_name, data_interface in cls.data_interface_classes.items():\n source_schema[\"properties\"].update({interface_name: unroot_schema(data_interface.get_source_schema())})\n return source_schema", "def SphinxDummySourceClass(source: Any, *args: Any, **kwargs: Any) -> Any:\n return source", "def _object2proto(self) -> CreateGroupMessage_PB:\n return CreateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def render_source(output_dir, package_spec):\n path, name = package_spec.filepath\n destination_filename = '%s/%s.proto' % (output_dir, name)\n pb_template = JENV.get_template(MESSAGES_TEMPLATE_NAME)\n includes = [include[:-5] if include.endswith('.yaml') else include for include in package_spec.includes]\n if 'types' in includes:\n includes.remove('types')\n with open(destination_filename, 'w') as f:\n f.write(pb_template.render(\n name=name,\n package=package_spec.identifier,\n messages=package_spec.definitions,\n includes=includes,\n description=package_spec.description,\n ))", "def parse_pbobject(source, pb_class):\n if isinstance(source, str):\n return open_pbobject(source, pb_class)\n elif isinstance(source, bytes):\n pb_object = pb_class()\n pb_object.ParseFromString(source)\n return pb_object\n else:\n logging.error(f'cannot parse type {type(source)}')", "def _stringify_proto(obj):\n if isinstance(obj, str): return obj\n elif isinstance(obj, Message): return obj.SerializeToString()\n else: raise TypeError('Object can not be serialized as a string.')", "def source():\n\n source = models.Source(name=u\"Joe's Funerals.com\", url=u\"http://www.joesfunerals.com\")\n return source", "def from_dict(cls, dikt) -> 'SourceSchema':\n return util.deserialize_model(dikt, cls)", "def decode_proto(self):\n # Create output directory it does not exist\n if not os.path.exists(PROTO_CACHE):\n os.makedirs(PROTO_CACHE)\n\n # Compile proto (TODO: Assumes protoc is in PATH)\n cmd = \"protoc -I {} --python_out={} {}\".format(\n os.path.dirname(self.proto_file_path),\n PROTO_CACHE,\n self.proto_file_path)\n subprocess.check_call(cmd, shell=True)\n\n # Append compiled python module to Python's system path\n sys.path.insert(0, PROTO_CACHE)\n globals()[\"ProtoDefinition\"] = __import__(\"u_s_s_r_proto_pb2\")", "def __convert( source ):\n # Just in case things get this far but we don't know about the record\n if source['recordType'] not in definitions.RECORDS:\n return {\n 'rec_type': source['recordType']\n }\n\n # Create a flat wrapper\n record = estreamer.common.Flatdict( source )\n\n # Transform\n output = __selectWithNewKeys( record )\n\n return output", "def to_proto(self):\n prototxt = str()\n opts = self.options('solver')\n for opt in opts:\n val = self.get('solver',opt)\n prototxt += opt + ': ' + val + '\\n'\n return prototxt", "def encode_span(self, span):\n if not protobuf.installed():\n raise ZipkinError(\n \"protobuf encoding requires installing the protobuf's extra \"\n \"requirements. Use py-zipkin[protobuf] in your requirements.txt.\"\n )\n\n pb_span = protobuf.create_protobuf_span(span)\n return protobuf.encode_pb_list([pb_span])", "def _object2proto(self) -> RunFunctionOrConstructorAction_PB:\n return RunFunctionOrConstructorAction_PB(\n path=self.path,\n args=[serialize(x) for x in self.args],\n kwargs={k: serialize(v) for k, v in self.kwargs.items()},\n id_at_location=serialize(self.id_at_location),\n address=serialize(self.address),\n msg_id=serialize(self.id),\n )", "def encode(cls, query_protobuf_object, query_object: \"Query\") -> None:\n query_bytes = pickle.dumps(query_object) # nosec\n query_protobuf_object.query_bytes = query_bytes", "def test_prepare_source(source):\n assert isinstance(PseudoPotentialData.prepare_source(source), io.BytesIO)\n\n if isinstance(source, io.BytesIO):\n # If we pass a bytestream, we should get the exact same back\n assert PseudoPotentialData.prepare_source(source) is source", "def bfd_get_serializer():\n serializer = sl_bfd_common_pb2.SLBfdGetMsg()\n return serializer", "def ToWireFormat(value):\n return _GetFactory(type(value)).ToWireFormat(value)", "def getSource(self):\n return _libsbml.ExternalModelDefinition_getSource(self)", "def get_message_source_from_event(event):\n return event.message.annotations[\"iothub-message-source\".encode()].decode()", "def obj2bytes(obj: StunPacket) -> bytes:\n\n message = bytes()\n for [attribute_type, attribute_val] in obj.message.items():\n message += pack(\n '{}{}s'.format(ATTRIBUTE_HEADER_FORMAT, len(attribute_val)),\n attribute_type,\n len(attribute_val),\n attribute_val,\n )\n\n type_bits = (\n '00'\n + obj.type_method[0:5]\n + obj.type_class[0]\n + obj.type_method[5:8]\n + obj.type_class[1]\n + obj.type_method[8:]\n )\n return pack(\n '{}{}s'.format(HEADER_FORMAT, len(message)),\n int(type_bits, 2),\n len(message),\n obj.magic_cookie,\n obj.transaction_id,\n message,\n )", "def _serialize_event_source(event):\n parent = event.parent\n\n return {\n \"source_id\": parent.source_id,\n \"type\": MessagingEvent.SOURCE_SLUGS.get(parent.source, 'unknown'),\n \"name\": get_event_display_api(parent),\n }", "def give_source(self):\n has_src, src_sobj = self.get_sobj().ReferencedObject()\n if has_src:\n return self.__class__(self._std, self._bld, src_sobj.GetID())", "def get_source(self, format: str) -> Source:\n if format in SOURCE_MAP:\n s = SOURCE_MAP[format]\n return s(self)\n else:\n raise TypeError(f\"{format} in an unrecognized format\")", "def getsource(object):\n lines, lnum = getsourcelines(object)\n return string.join(lines, '')", "def create_from_source(self):\n create_statement = self.source.create_statement\n self.create_from_statement(create_statement)\n # Add constraints\n constraints = self.source.constraints\n self.add_constraints(constraints)\n\n # Add indexes\n indexes = self.source.indexes\n self.add_indexes(indexes)\n\n # Add the non-referenced foreign keys\n non_referenced_fks = [x for x in self.source.foreign_keys if not x.referenced]\n self.add_foreign_keys(non_referenced_fks, override_table=self.name)", "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob", "def getsource(object):\r\n lines, lnum = getsourcelines(object)\r\n return string.join(lines, '')", "def source_format(self):\n return self.get(\"source_format\", decode=True)", "def to_pb2(self) -> _DetectionProto:\n labels = []\n label_ids = []\n scores = []\n display_names = []\n relative_keypoints = []\n\n for category in self.categories:\n scores.append(category.score)\n if category.index:\n label_ids.append(category.index)\n if category.category_name:\n labels.append(category.category_name)\n if category.display_name:\n display_names.append(category.display_name)\n\n if self.keypoints:\n for keypoint in self.keypoints:\n relative_keypoint_proto = _LocationDataProto.RelativeKeypoint()\n if keypoint.x:\n relative_keypoint_proto.x = keypoint.x\n if keypoint.y:\n relative_keypoint_proto.y = keypoint.y\n if keypoint.label:\n relative_keypoint_proto.keypoint_label = keypoint.label\n if keypoint.score:\n relative_keypoint_proto.score = keypoint.score\n relative_keypoints.append(relative_keypoint_proto)\n\n return _DetectionProto(\n label=labels,\n label_id=label_ids,\n score=scores,\n display_name=display_names,\n location_data=_LocationDataProto(\n format=_LocationDataProto.Format.BOUNDING_BOX,\n bounding_box=self.bounding_box.to_pb2(),\n relative_keypoints=relative_keypoints,\n ),\n )", "def to_proto(self):\n return [annotations_pb2.KeyPoint2D(x=int(self.x[j]), y=int(self.y[j])) for j, k in enumerate(self.x)]", "def generate_proto(source):\n\n output = source.replace(\".proto\", \"_pb2.py\")\n\n if not os.path.exists(output) or (\n os.path.exists(source) and os.path.getmtime(source) > os.path.getmtime(output)\n ):\n print(\"Generating %s...\" % output)\n\n if not os.path.exists(source):\n sys.stderr.write(\"Can't find required file: %s\\n\" % source)\n sys.exit(-1)\n\n if protoc == None:\n sys.stderr.write(\n \"Protocol buffers compiler 'protoc' not installed or not found.\\n\"\n )\n sys.exit(-1)\n\n protoc_command = [protoc, \"-I.\", \"--python_out=.\", source]\n if subprocess.call(protoc_command) != 0:\n sys.exit(-1)", "def source(self) -> Dict:\n return self._db_data.metadata[\"_source\"]", "def convert(df: pd.DataFrame, outpath):\n labels = df['source'].apply(lambda x: LABEL_XINHUA if x == '新华社' else LABEL_NON_XINHUA)\n labels.to_pickle(outpath)", "def to_proto(self):\n # type: (...) -> List[beam_runner_api_pb2.DisplayData]\n def create_payload(dd):\n display_data_dict = None\n try:\n display_data_dict = dd.get_dict()\n except ValueError:\n # Skip if the display data is invalid.\n return None\n\n # We use 'label' or 'key' properties to populate the 'label' attribute of\n # 'LabelledPayload'. 'label' is a better choice since it's expected to be\n # more human readable but some transforms, sources, etc. may not set a\n # 'label' property when configuring DisplayData.\n label = (\n display_data_dict['label']\n if 'label' in display_data_dict else display_data_dict['key'])\n\n value = display_data_dict['value']\n if isinstance(value, str):\n return beam_runner_api_pb2.LabelledPayload(\n label=label,\n string_value=value,\n key=display_data_dict['key'],\n namespace=display_data_dict.get('namespace', ''))\n elif isinstance(value, bool):\n return beam_runner_api_pb2.LabelledPayload(\n label=label,\n bool_value=value,\n key=display_data_dict['key'],\n namespace=display_data_dict.get('namespace', ''))\n elif isinstance(value, int):\n return beam_runner_api_pb2.LabelledPayload(\n label=label,\n int_value=value,\n key=display_data_dict['key'],\n namespace=display_data_dict.get('namespace', ''))\n elif isinstance(value, (float, complex)):\n return beam_runner_api_pb2.LabelledPayload(\n label=label,\n double_value=value,\n key=display_data_dict['key'],\n namespace=display_data_dict.get('namespace', ''))\n else:\n raise ValueError(\n 'Unsupported type %s for value of display data %s' %\n (type(value), label))\n\n dd_protos = []\n for dd in self.items:\n dd_proto = create_payload(dd)\n if dd_proto:\n dd_protos.append(\n beam_runner_api_pb2.DisplayData(\n urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=dd_proto.SerializeToString()))\n return dd_protos", "def GenerateProto(source):\n\n output = source.replace(\".proto\", \"_pb2.py\")\n\n if not os.path.exists(output) or (\n os.path.exists(source) and\n os.path.getmtime(source) > os.path.getmtime(output)):\n print(\"Generating %s...\" % output)\n\n if not os.path.exists(source):\n sys.stderr.write(\"Can't find required file: %s\\n\" % source)\n sys.exit(-1)\n\n if PROTOC is None:\n sys.stderr.write(\n \"protoc is not found. Please compile it \"\n \"or install the binary package.\\n\"\n )\n sys.exit(-1)\n\n protoc_command = [PROTOC, \"-I%s\" % ACLOUD_DIR, \"--python_out=.\", source]\n if subprocess.call(protoc_command) != 0:\n sys.exit(-1)", "def get_source(self):\n\t\treturn self.source.get_source()", "def _object2proto(self) -> DeleteGroupMessage_PB:\n return DeleteGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def serialize_value(self, source_value):\n return serialize(self.get_mapping(), source_value)", "def proto(self) -> Proto:\n # Create a \"context-naïve\" proto.\n # This has everything but is ignorant of naming collisions in the\n # ultimate file that will be written.\n naive = Proto(\n all_enums=self.proto_enums,\n all_messages=self.proto_messages,\n file_pb2=self.file_descriptor,\n file_to_generate=self.file_to_generate,\n services=self.proto_services,\n meta=metadata.Metadata(\n address=self.address,\n ),\n )\n\n # If this is not a file being generated, we do not need to\n # do anything else.\n if not self.file_to_generate:\n return naive\n\n # Return a context-aware proto object.\n return dataclasses.replace(\n naive,\n all_enums=collections.OrderedDict(\n (k, v.with_context(collisions=naive.names))\n for k, v in naive.all_enums.items()\n ),\n all_messages=collections.OrderedDict(\n (k, v.with_context(collisions=naive.names))\n for k, v in naive.all_messages.items()\n ),\n services=collections.OrderedDict(\n # Note: services bind to themselves because services get their\n # own output files.\n (k, v.with_context(collisions=v.names))\n for k, v in naive.services.items()\n ),\n meta=naive.meta.with_context(collisions=naive.names),\n )", "def get_source(self):", "def _to_cpp(self, msg):\n buf = BytesIO()\n msg.serialize(buf)\n value = buf.getvalue()\n return value", "def to_scrip(self):\n raise NotImplementedError", "def getTroveSource(self):\n return self.source", "def serialize(self, obj):\n return dill.dumps(obj, 0).decode('latin-1')", "def decode(cls, query_protobuf_object) -> \"Query\":\n query = pickle.loads(query_protobuf_object.query_bytes) # nosec\n return query", "def _get_encoder_data_shapes(self, source_max_length: int) -> List[mx.io.DataDesc]:\n return [mx.io.DataDesc(name=C.SOURCE_NAME,\n shape=(self.encoder_batch_size, source_max_length),\n layout=C.BATCH_MAJOR)]", "def ToProto(self):\n team = scores_messages.Team()\n if self.twitter_id:\n account = scores_messages.TwitterAccount()\n account.id_str = str(self.twitter_id)\n team.twitter_account = account\n if self.score_reporter_id:\n account = scores_messages.ScoreReporterAccount()\n account.id = self.score_reporter_id\n team.score_reporter_account = account\n return team", "def export_tfx_schema(self) -> schema_pb2.Schema:\n schema = schema_pb2.Schema()\n\n # List of attributes to copy from fields in the FeatureSet to feature in\n # Tensorflow metadata schema where the attribute name is the same.\n attributes_to_copy_from_field_to_feature = [\n \"name\",\n \"presence\",\n \"group_presence\",\n \"shape\",\n \"value_count\",\n \"domain\",\n \"int_domain\",\n \"float_domain\",\n \"string_domain\",\n \"bool_domain\",\n \"struct_domain\",\n \"_natural_language_domain\",\n \"image_domain\",\n \"mid_domain\",\n \"url_domain\",\n \"time_domain\",\n \"time_of_day_domain\",\n ]\n\n for _, field in self._fields.items():\n if isinstance(field, Entity):\n continue\n feature = schema_pb2.Feature()\n for attr in attributes_to_copy_from_field_to_feature:\n if getattr(field, attr) is None:\n # This corresponds to an unset member in the proto Oneof field.\n continue\n if issubclass(type(getattr(feature, attr)), Message):\n # Proto message field to copy is an \"embedded\" field, so MergeFrom()\n # method must be used.\n getattr(feature, attr).MergeFrom(getattr(field, attr))\n elif issubclass(type(getattr(feature, attr)), (int, str, bool)):\n # Proto message field is a simple Python type, so setattr()\n # can be used.\n setattr(feature, attr, getattr(field, attr))\n else:\n warnings.warn(\n f\"Attribute '{attr}' cannot be copied from Field \"\n f\"'{field.name}' in FeatureSet '{self.name}' to a \"\n f\"Feature in the Tensorflow metadata schema, because\"\n f\"the type is neither a Protobuf message or Python \"\n f\"int, str and bool\"\n )\n # \"type\" attr is handled separately because the attribute name is different\n # (\"dtype\" in field and \"type\" in Feature) and \"type\" in Feature is only\n # a subset of \"dtype\".\n feature.type = field.dtype.to_tfx_schema_feature_type()\n schema.feature.append(feature)\n\n return schema", "def generate_proto(source, require=True):\n\n if not require and not os.path.exists(source):\n return\n\n output = source.replace(\".proto\", \"_pb2.py\").replace(\"./protobuf/\", \"\")\n\n if (not os.path.exists(output) or\n (os.path.exists(source) and\n os.path.getmtime(source) > os.path.getmtime(output))):\n print(\"Generating %s...\" % output)\n\n if not os.path.exists(source):\n sys.stderr.write(\"Can't find required file: %s\\n\" % source)\n sys.exit(-1)\n\n if protoc is None:\n sys.stderr.write(\n \"protoc is not installed\\n\")\n sys.exit(-1)\n\n protoc_command = [protoc, \"-I./protobuf\",\n \"-I.\", \"--python_out=.\", source]\n if subprocess.call(protoc_command) != 0:\n sys.exit(-1)", "def get_kafka_source_brokers(self) -> str:\n if self.source and self.source.source_type == \"Kafka\":\n return self.source.brokers\n raise Exception(\"Source type could not be identified\")", "def FromWireFormat(self, value):\n pass", "async def json_protocol_source(tmp_path: Path) -> ProtocolSource:\n simple_protocol = (\n get_shared_data_root() / \"protocol\" / \"fixtures\" / \"6\" / \"simpleV6.json\"\n )\n return await ProtocolReader().read_saved(files=[simple_protocol], directory=None)", "def get_source(self):\n return self.source", "def to_proto(self):\n proto = bounding_box_pb2.BoundingBox()\n proto.start.CopyFrom(geom_utils.ToVector3j(self.start))\n proto.size.CopyFrom(geom_utils.ToVector3j(self.size))\n return proto", "def __repr__(self):\n return '{} (source layer)'.format(self.name)", "def source_files(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___DebuggedSourceFile]:", "def _object2proto(self) -> UpdateGroupResponse_PB:\n return UpdateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "def create(self, saved_source_id):\n raw_saved_source_data = self.es.get(index='.kibana', doc_type='doc', id=saved_source_id)\n\n saved_source_type = raw_saved_source_data['_source']['type']\n\n if saved_source_type == 'search':\n return SavedSearch(saved_source_id, self.conf)\n elif saved_source_type == 'visualization':\n return SavedVisualization(saved_source_id, self.conf)\n else:\n # TODO: Raise some exception.\n pass", "def _dsc_to_sources(self, repository_file):\n filename = join(self.repository, repository_file.path)\n package = repository_file.package\n\n if not isfile(filename):\n log.critical(f'Cannot find file {filename}')\n return ''\n\n # Read the dsc file and get the deb822 form.\n dsc = Dsc(filename, repository_file.component)\n source = dsc._data\n\n # There are a few differences between a dsc file and a Sources entry,\n # listed and acted upon below:\n\n # Firstly, the \"Source\" field in the dsc is simply renamed to \"Package\".\n source['Package'] = source.pop('Source')\n\n # There needs to be a \"Directory\" field to tell the package manager\n # where to download the package from. This is in the format (for the\n # test package in the component \"main\"):\n # pool/main/t/test\n source['Directory'] = self._get_package_dir(\n package, repository_file.component)\n\n # The source file, its size, and its sha256sum needs to be added to the\n # \"Checksums-Sha256\" field. This is unsurprisingly not in the original\n # source file!\n source['Checksums-Sha256'].append({\n 'sha256': repository_file.sha256sum,\n 'size': str(repository_file.size),\n 'name': repository_file.path.split('/')[-1]\n })\n\n # Files and Checksums-Sha1 are deprecated. Removing them from the Source\n source.pop('Files')\n source.pop('Checksums-Sha1')\n\n # Get a nice rfc822 output of this source, now Sources, entry.\n return source.dump()", "def field_to_native(self, obj, field_name):\n if not obj is None:\n \treturn getattr(obj, self.source)", "def get_proto_serializer():\n def _serialize_proto(proto):\n return proto.SerializeToString()\n return _serialize_proto", "def message_to_python(self, raw_message):\n return self.Message(self, raw_message)", "def _object2proto(self) -> GetGroupsResponse_PB:\n return GetGroupsResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def convert(self):\n return", "def to_legacy(self) -> object:\n pass", "def _to_java(self):\n dmp = dill.dumps(self)\n dmp = zlib.compress(dmp)\n sc = SparkContext._active_spark_context\n pylist = [str(i) + ',' for i in bytearray(dmp)]\n # convert bytes to string integer list\n pylist = [''.join(pylist)]\n pylist.append(PysparkObjId._getPyObjId()) # add our id so PysparkPipelineWrapper can id us.\n java_class = sc._gateway.jvm.java.lang.String\n java_array = sc._gateway.new_array(java_class, len(pylist))\n java_array[0:2] = pylist[0:2]\n _java_obj = JavaParams._new_java_obj(PysparkObjId._getCarrierClass(javaName=True), self.uid)\n _java_obj.setStopWords(java_array)\n return _java_obj", "def source(self) -> \"VolumeAttachmentSource\":\n return typing.cast(\n \"VolumeAttachmentSource\",\n self._properties.get(\"source\"),\n )", "def snowflake(self) -> Optional[pulumi.Input['FlowDestinationFlowConfigDestinationConnectorPropertiesSnowflakeArgs']]:\n return pulumi.get(self, \"snowflake\")", "def cast(obj: 'itkLightObject') -> \"itkMeshSourcePSSS2 *\":\n return _itkMeshSourcePython.itkMeshSourcePSSS2_cast(obj)", "def serialize_message(self) -> bytes:\n return self.compile_message().serialize()", "def test_convert_proto_plus_to_protobuf(self):\n proto_plus = ProtoPlusFixture()\n converted = util.convert_proto_plus_to_protobuf(proto_plus)\n # Assert that the converted proto is an instance of the protobuf\n # protobuf message class.\n self.assertIsInstance(converted, ProtobufMessageType)", "def FromProto(cls, proto_obj):\n if not proto_obj.last_update_source:\n raise GameModelError('No update source specified in Game creation.')\n # TODO(P2): refactor all constructors into one base function like in tweets.\n return Game(id_str=proto_obj.id_str,\n teams=[Team.FromProto(tm) for tm in proto_obj.teams],\n scores=proto_obj.scores,\n name=proto_obj.name,\n tournament_id=proto_obj.tournament_id_str,\n tournament_name=proto_obj.tournament_name,\n game_status=proto_obj.game_status,\n division=proto_obj.division,\n league=proto_obj.league,\n age_bracket=proto_obj.age_bracket,\n sources=[GameSource.FromProto(proto_obj.last_update_source)],\n key=game_key(proto_obj))", "def ToProto(self):\n tourney = scores_messages.Tournament()\n tourney.id_str = self.id_str\n tourney.url = self.url\n tourney.name = self.name\n tourney.image_url_https = self.image_url_https\n age_brackets = set()\n divisions = set()\n for st in self.sub_tournaments:\n age_brackets.add(st.age_bracket)\n divisions.add(st.division)\n tourney.divisions = sorted(list(divisions))\n tourney.age_brackets = sorted(list(age_brackets))\n tourney.start_date = self.start_date.strftime(\n tweets.DATE_PARSE_FMT_STR)\n tourney.end_date = self.end_date.strftime(\n tweets.DATE_PARSE_FMT_STR)\n tourney.last_modified_at = self.last_modified_at.strftime(\n tweets.DATE_PARSE_FMT_STR)\n return tourney", "def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )" ]
[ "0.71726424", "0.6312265", "0.57572246", "0.56970835", "0.5353327", "0.5324823", "0.52810025", "0.5244373", "0.51959056", "0.51375407", "0.51266086", "0.51017046", "0.50727355", "0.50727355", "0.5006445", "0.5004341", "0.49543244", "0.48841015", "0.48770934", "0.48703986", "0.48410082", "0.47356263", "0.4732318", "0.47321752", "0.470947", "0.4648941", "0.464857", "0.4639523", "0.46279892", "0.45872542", "0.45786893", "0.4577238", "0.4576124", "0.45751184", "0.4558753", "0.45587182", "0.4515451", "0.44844988", "0.44796315", "0.44602308", "0.44462967", "0.44437712", "0.44366044", "0.4436101", "0.4425746", "0.44250077", "0.4416065", "0.44137585", "0.44104272", "0.44091845", "0.4409095", "0.44084415", "0.43985546", "0.43874502", "0.4385145", "0.43831375", "0.4382018", "0.43738934", "0.43717244", "0.43716565", "0.43660894", "0.43643087", "0.43621418", "0.4361054", "0.43584383", "0.43568814", "0.43497542", "0.43474334", "0.43407035", "0.434011", "0.4339202", "0.43242943", "0.43171796", "0.43042505", "0.43016636", "0.43010348", "0.4296064", "0.4291274", "0.42892626", "0.4286721", "0.42863968", "0.42863968", "0.42863968", "0.42768046", "0.42686453", "0.42577648", "0.4252232", "0.4250184", "0.42491293", "0.42479816", "0.42430952", "0.42418674", "0.4240617", "0.42384645", "0.42325535", "0.42240438", "0.42194957", "0.4213491", "0.4208128", "0.4198721" ]
0.7205786
0
Returns a string that can directly be used to reference this table in SQL.
def get_table_query_string(self) -> str: if self.database and self.table: return f'"{self.database}"."{self.schema}"."{self.table}"' elif self.table: return f'"{self.table}"' else: return f"({self.query})"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_name() -> str:\n pass", "def __repr__(self):\n cls_name = self.__class__.__name__\n conn_name = str(self._connection)\n tbl_name = self._table\n return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)", "def __repr__(self):\n cls_name = self.__class__.__name__\n conn_name = str(self._connection)\n tbl_name = self._table\n return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)", "def __str__(self):\n return (\n f'{self.__class__.__name__}'\n f'\\n> defined by: {self._str_meta_()}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self)} objects'\n f'\\n{APtable.__str__(self)}'\n )", "def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"", "def table(self):\n return self._table_name", "def name(self):\n if self.table:\n return \"{}.{}\".format(self.table, self.field_name)\n return self.field_name", "def autoname(self):\n ret = \"%(table)s_%(reftable)s_fkey\"%dict(\n table=self.table.name,\n reftable=self.reftable.name,\n )\n return ret", "def to_sql(self) -> str:\n sql = self.name + ' ' + self.value_type + ' '\n\n if self.primary:\n sql += 'PRIMARY KEY' + ' '\n\n if self.unique:\n sql += 'UNIQUE' + ' '\n\n if self.autoincrement:\n sql += 'AUTOINCREMENT' + ' '\n\n if self.not_null:\n sql += 'NOT NULL' + ' '\n\n return sql", "def table(self):\n return self.reference.table", "def __str__(self):\n tablename = self.tablename()\n attrs = {}\n if Registry.SCHEMAS.has_key(tablename):\n for key in Registry.SCHEMAS[tablename]:\n attrs[key] = getattr(self, key, None)\n return \"<%s object: %s>\" % (self.__class__.__name__, str(attrs))", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def table_name(self) -> str:\n return self.model._meta.db_table", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def __repr__(self):\n description = [self._str_meta_(), 'columns: '+self._str_colnames()]\n return f'{self.__class__.__name__}({\", \".join(description)})'", "def sql_for_tablespace(self, tablespace, inline=False):\n return \"ON %s\" % self.quote_name(tablespace)", "def __tablename__(cls) -> str:\n return inflection.underscore(cls.__name__)", "def get_table_name(self):\n return self._table", "def to_sql(self) -> str:\n sql = 'FOREIGN KEY(' + self.column\n sql += ') REFERENCES ' + self.target_table\n sql += '(' + self.target_column + ')'\n if self.delete == 'CASCADE':\n sql += ' ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED'\n return sql", "def __str__(self):\r\n tmp = \"\"\r\n for (name, value) in self.__table__.items():\r\n tmp += str(name) + \"\\n\" + str(value) + \"\\n\"\r\n return(tmp)", "def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")", "def __str__(self):\n\n table_list = [self.headers]\n\n for row in self.data:\n table_list.append([row[col] or \"\" for col in self.headers])\n\n return create_table_string(table_list)", "def encodeTableName(self, schema, table):\r\n return '\"{}\".\"{}\"'.format(schema, table)", "def table_name(self):\n return self._new_table.name", "def table_name(self) -> str:\n return \"OLTP\"", "def tablename(entity) -> str:\n return entity.__tablename__", "def __str__(self):\n return self.sql()", "def schema_ref(schema, table):\n return schema + '.' + table", "def _table_id(project: str, table: FeatureView) -> str:\n return f\"{project}_{table.name}\"", "def __tablename__(self):\n return sub(r\"(?<!^)(?=[A-Z])\", \"_\", self.__name__).lower()", "def table_id(self) -> str:\n return pulumi.get(self, \"table_id\")", "def table(cls):\n return cls.__name__", "def _table_name(self, name: AnyStr) -> bytes:\n name = ensure_bytes(name)\n if self.table_prefix is None:\n return name\n return self.table_prefix + self.table_prefix_separator + name", "def __str__(self):\n return (\n 'cosifer.collections.interactions_table.' +\n 'InteractionsTable\\n{}\\n{}'.format(self.labels, self.df)\n )", "def table_ref(self):\n return self._table_ref", "def dot_label(self) -> str:\n rows = \"\\n\".join(field.dot_row() for field in self.fields)\n return _table_template.format(name=self.name, rows=rows).replace(\"\\n\", \"\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def rawtable(self):\n return self.__rawtable", "def id_name(self):\n return self.table_name + '_id'", "def autoname(self):\n ret = \"%(table)s_pkey\"%dict(\n table=self.table.name,\n )\n return ret", "def get_string(self):\n this_column_specifier = \"l\" * self._num_cols\n this_column_headers = TABLE_COLSEP.join(\n [str(header_elt) for header_elt in self._header])\n this_table_header = TABLE_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = str(self._caption),\n tag = str(self._tag))\n if self._flip:\n this_table_content = (TABLE_ROWSEP_NOLINE + os.linesep).join(\n [TABLE_COLSEP.join(\n [self._header[row_num]] + [\n str(row_elt) for row_elt in self._rows[row_num]])\n for row_num in xrange(self._num_rows)])\n else:\n this_table_content = (TABLE_ROWSEP_NOLINE + os.linesep).join(\n [TABLE_COLSEP.join(self._header)] + [TABLE_COLSEP.join(\n [str(row_elt) for row_elt in row]) for row in self._rows])\n return os.linesep.join([this_table_header,\n ENDHEADER,\n this_table_content,\n TABLE_FOOTER])", "def get_tablename(self):\n return self.ds_table", "def __str__(self):\n key_type = self.__guess_key_type()\n return \"%s%s%s%s\" % ( key_type,\n self.getFullyQualifiedName(),\n COL_SEPARATOR,\n self.__key )", "def graph_queries_table_name(self) -> str:\n return pulumi.get(self, \"graph_queries_table_name\")", "def table_name(class_):\n try:\n return class_.__tablename__\n except AttributeError:\n return class_.__table__.name", "def colNames_string(self):\n # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'some_table';\n return \"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"", "def tableName():\n return \"people\"", "def key(self)->str:\n return \"{}:{}.{}.{}\".format(self.source, self.db, self.ed, self.rec)", "def __str__(self):\n return (\">%s\\n\" % self.name) + \\\n wrap(self.sequence, self.COLUMNS)", "def as_sql(self):\n distinct = 'DISTINCT ' if self._distinct else ''\n final = ' FINAL' if self._final else ''\n table_name = '`%s`' % self._model_cls.table_name()\n if self._model_cls.is_system_model():\n table_name = '`system`.' + table_name\n params = (distinct, self.select_fields_as_sql(), table_name, final)\n sql = u'SELECT %s%s\\nFROM %s%s' % params\n\n if self._prewhere_q and not self._prewhere_q.is_empty:\n sql += '\\nPREWHERE ' + self.conditions_as_sql(prewhere=True)\n\n if self._where_q and not self._where_q.is_empty:\n sql += '\\nWHERE ' + self.conditions_as_sql(prewhere=False)\n\n if self._grouping_fields:\n sql += '\\nGROUP BY %s' % comma_join('`%s`' % field for field in self._grouping_fields)\n\n if self._grouping_with_totals:\n sql += ' WITH TOTALS'\n\n if self._order_by:\n sql += '\\nORDER BY ' + self.order_by_as_sql()\n\n if self._limit_by:\n sql += '\\nLIMIT %d, %d' % self._limit_by\n sql += ' BY %s' % comma_join(string_or_func(field) for field in self._limit_by_fields)\n\n if self._limits:\n sql += '\\nLIMIT %d, %d' % self._limits\n\n return sql", "def get_column_name(self) -> str:\n if self.is_shared():\n assert self._shared_id is not None\n return self._shared_id\n else:\n return str(id(self))", "def __str__(self) -> str:\n\n return self._format_symbol_table_content(\"Symbol table\", self._symbols.items())", "def __repr__(self):\n\n name = self.__class__.__name__\n\n return '%s(\\'%s\\')' % (name, self.raw_field)", "def get_name(self):\n return self.col_name", "def typed_column(self) -> str:\n\n return \"{}:{}\".format(self.name, self.dtype)", "def html_data_table(self):\n return \"XXX\"", "def get_string(self):\n this_column_specifier = (\n TABLE_NUMROWS_SEP + \"l\" + TABLE_NUMROWS_SEP + TABLE_NUMROWS_SEP + \n TABLE_NUMROWS_SEP.join([\"c\" for col in xrange(self._num_cols)]) +\n TABLE_NUMROWS_SEP)\n this_column_headers = TABLE_COLSEP.join(\n [\"\"] + [str(top_header_elt) for top_header_elt in self._top_header])\n this_chart_header = CHART_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = self._caption,\n tag = self._tag,\n column_headers = this_column_headers)\n this_chart_content = (TABLE_ROWSEP + os.linesep).join(\n [TABLE_COLSEP.join([str(left_elt)] +\n [str(self._cells[top_elt][left_elt])\n for top_elt in self._top_header])\n for left_elt in self._left_header])\n return os.linesep.join([this_chart_header, this_chart_content,\n CHART_FOOTER])", "def to_string(self):\n return str(self.attribute_col) + self.symbol", "def get_status_sql(self, tablename):\n status_sql = f\"\"\" SELECT * FROM {tablename}\"\"\"\n return status_sql", "def __repr__(self):\n return \"[\" + \", \".join([str(member) for member in self.table]).rstrip(\",\") + \"]\"", "def table(self):\n return self.snowflake_options.table", "def table(self):\n return self._table", "def table(self):\n return self._table", "def __repr__(self):\n return '{}(\\'{}\\')'.format(self.__class__.__name__, str(self))", "def get_string(self):\n if not self._flip:\n this_column_specifier = TABLE_NUMROWS_SEP + TABLE_NUMROWS_SEP.join(\n [\"l\" for col in xrange(self._num_cols)]) + TABLE_NUMROWS_SEP\n this_table_header = TABLE_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = str(self._caption),\n tag = str(self._tag))\n this_table_column_headers = COLUMN_HEADERS_TEMPLATE.substitute(\n column_headers = TABLE_COLSEP.join(\n [str(header_elt) for header_elt in self._header]))\n this_table_content = (TABLE_ROWSEP + os.linesep).join(\n [TABLE_COLSEP.join([str(row_elt) for row_elt in row])\n for row in self._rows])\n return os.linesep.join([this_table_header,\n this_table_column_headers,\n ENDHEADER,\n this_table_content,\n TABLE_FOOTER])\n else:\n this_column_specifier = (\n TABLE_NUMROWS_SEP + \"l\" + TABLE_NUMROWS_SEP +\n TABLE_NUMROWS_SEP + TABLE_NUMROWS_SEP.join(\n [\"l\" for col in xrange(self._num_cols - 1)]) +\n TABLE_NUMROWS_SEP)\n this_table_header = TABLE_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = str(self._caption),\n tag = str(self._tag))\n this_table_content = (TABLE_ROWSEP + os.linesep).join(\n [TABLE_COLSEP.join([header_elt] + [str(elt) for elt in row])\n for (header_elt, row) in zip(self._header, self._rows)])\n return os.linesep.join([this_table_header,\n ENDHEADER,\n this_table_content,\n TABLE_FOOTER])", "def table_reference(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference':\n return pulumi.get(self, \"table_reference\")", "def tablename(klass):\n if not hasattr(klass, 'TABLENAME'):\n inf = Inflector()\n klass.TABLENAME = inf.tableize(klass.__name__)\n return klass.TABLENAME", "def __str__(self):\n return str([self.fields[col] for col in self.columns])", "def __repr__(self):\n return repr(self.table._root)", "def _table_name(cls, suffix, relative=False):\n mname = inspect.getmodule(cls).__name__ + '_' + suffix\n if relative:\n mname = mname.split('.')[-1]\n return mname", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__ (self):\n return \", \".join(str(row) for row in self.rows()).join(\"()\")", "def __str__(self):\n return '<table summary=\"%s\" class=\"%s\" %s>\\n%s </table>\\n' % (self.summary, self.cssclass, self.options, self.text)", "def table_reference(self) -> 'outputs.TableReferenceResponse':\n return pulumi.get(self, \"table_reference\")", "def _repr_html_(self):\n return (\n f'<b>{self.__class__.__name__}</b>'\n f'<br> <b>defined by:</b> {self._str_meta_()}'\n f'<br> <b>with columns:</b> {self._str_colnames()}'\n f'<br> {len(self)} objects'\n f'<br> {self._html_table()}'\n )", "def __repr__(self):\n return '%s(%s)' % (self.__class__.__name__, {\n column: value\n for column, value in self._to_dict().items()\n })", "def table(self):\r\n return self._table", "def __str__(self) -> str:\n\t\treturn \"{}{}; {}\".format(TAB, self.get_typestring(force_regen=True), self.get_description())", "def __str__(self):\n dictt = self.getFullDict()\n return \"SymbolTable(\\n{}\\n)\".format(pprint.pformat(dictt))", "def _create_table_sql(self) -> str:\n\n sql: List[str] = [f\"CREATE TABLE IF NOT EXISTS `{self.table}` (\"]\n\n for _field, _type in self.table_fields.items():\n sql.append(f\"[{_field}] {_type}, \")\n\n for _fields in getattr(self.record, _UNIQUES, []):\n sql.append(f\"UNIQUE ([{'], ['.join(_fields)}]), \")\n\n for _field, (f_key, _model) in self.foreigners.items():\n sql.append(f\"FOREIGN KEY ([{_field}]) REFERENCES [{_model.table}] ([{f_key}]), \")\n\n return \"\\n\".join(sql).strip(\", \") + \"\\n);\"", "def sql(self):\n return ';\\n'.join([x.sql() for x in self._statements]) + ';'", "def key(self) -> str:\n return f\"{self.model.__module__}.{self.model.__qualname__}\"", "def __unicode__(self):\n prepr = com.pprint_thing(self, escape_chars=('\\t', '\\r', '\\n'),\n quote_strings=True)\n return \"%s(%s, dtype='%s')\" % (type(self).__name__, prepr, self.dtype)", "def __str__(self):\n s = \"[{}] ({}) {}\".format(str(\n type(self).__name__), self.id, self.__dict__)\n return s", "def schema(self):\n attrs = self.attrs.copy()\n parts = ['CREATE', 'TABLE', self.name, '(%s,' % self.hash_key.schema]\n del attrs[self.hash_key.name]\n if self.range_key:\n parts.append(self.range_key.schema + ',')\n del attrs[self.range_key.name]\n if attrs:\n attr_def = ', '.join([attr.schema for attr in six.itervalues(attrs)])\n parts.append(attr_def + ',')\n\n parts.append(\"THROUGHPUT (%d, %d))\" % (self.read_throughput,\n self.write_throughput))\n parts.extend([g.schema for g in six.itervalues(self.global_indexes)])\n return ' '.join(parts) + ';'", "def dot_row(self) -> str:\n return _row_template.format(name=self.name, type_name=self.type_name)", "def __str__(self):\n return '{}({})'.format(type(self).__name__, self.__name)", "def get_qualified_name(self):\r\n return self.__schema + \".\" + self.__name", "def aliased_for_cypher(self):\n return '{} AS {}'.format(self.for_cypher(), self.alias_for_cypher)", "def __repr__(self):\r\n return '{} <{}>'.format(self.__class__.__name__,\r\n ', '.join(('{}={}'.format(k, getattr(self, k)) for k,v in self._primary_keys.iteritems()))\r\n )", "def __repr__(self) -> str:\n return f'{type(self).__name__}({self.name!r})'", "def __str__(self):\n s = '<Row';\n for column_number, value in enumerate(self.values):\n s += \"\\n \" + str(self.columns[column_number]) + \"=\" + str(value)\n s += \"\\n>\"\n return s", "def __str__(self):\r\n name = self.__class__.__name__\r\n return \"[{}] ({}) {}\".format(name, self.id, self.__dict__)", "def get_column_def(self):\r\n return '{} {}'.format(self.cql, self.db_type)", "def to_declaration(self):\n declaration = \"\\'\" + self.name + \"\\'\"\n declaration += \" \" + self.type\n if self.notnull:\n declaration += \" NOT NULL\"\n if self.primary:\n if self.autoincrement:\n declaration += \" PRIMARY KEY AUTOINCREMENT\"\n else:\n declaration += \" PRIMARY KEY\"\n if self.unique:\n declaration += \" UNIQUE\"\n if self.default:\n declaration += \" DEFAULT \\'%s\\'\" % self.default\n return declaration", "def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]", "def __str__(self):\n if self.row_count > 0:\n texttable = Texttable(200)\n texttable.add_rows(self.rows)\n texttable.set_deco(Texttable.HEADER)\n return texttable.draw()\n else:\n return '<empty table>'" ]
[ "0.72242546", "0.7145293", "0.7145293", "0.71200347", "0.70594376", "0.70300525", "0.6961341", "0.6907269", "0.6881874", "0.6875941", "0.68361664", "0.68174165", "0.68159", "0.6803258", "0.67897487", "0.67895746", "0.67832047", "0.6771481", "0.6731299", "0.67075336", "0.6672556", "0.6660589", "0.66221374", "0.6607308", "0.6571065", "0.65245473", "0.647171", "0.64346856", "0.64038736", "0.63869065", "0.6370912", "0.6370289", "0.6342009", "0.6327043", "0.6313244", "0.63025904", "0.6295725", "0.6295725", "0.6295725", "0.62853307", "0.6281695", "0.62630594", "0.6257254", "0.62300557", "0.6227173", "0.619556", "0.61927295", "0.61464584", "0.6134395", "0.6120395", "0.60919464", "0.607461", "0.607091", "0.6049311", "0.6040004", "0.6034443", "0.6021024", "0.6014423", "0.60010606", "0.5998356", "0.59982866", "0.59930456", "0.5992058", "0.5991204", "0.5991204", "0.5966781", "0.59662", "0.59640235", "0.5963542", "0.59485984", "0.5946554", "0.5923281", "0.5919791", "0.5917325", "0.5913937", "0.59051394", "0.5901211", "0.5898148", "0.58764505", "0.5873603", "0.585973", "0.5854869", "0.58492523", "0.58431506", "0.5835047", "0.58330184", "0.5824578", "0.58156455", "0.5814772", "0.58046776", "0.5803671", "0.5801752", "0.5793425", "0.5785423", "0.5784495", "0.57798326", "0.5776961", "0.5773394", "0.5771313", "0.5768486" ]
0.76032066
0
Returns a mapping of column names to types for this snowflake source.
def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: from feast.infra.offline_stores.snowflake import SnowflakeOfflineStoreConfig from feast.infra.utils.snowflake_utils import ( execute_snowflake_statement, get_snowflake_conn, ) assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig) snowflake_conn = get_snowflake_conn(config.offline_store) if self.database and self.table: query = f'SELECT * FROM "{self.database}"."{self.schema}"."{self.table}" LIMIT 1' elif self.table: query = f'SELECT * FROM "{self.table}" LIMIT 1' else: query = f"SELECT * FROM ({self.query}) LIMIT 1" result = execute_snowflake_statement(snowflake_conn, query).fetch_pandas_all() if not result.empty: metadata = result.dtypes.apply(str) return list(zip(metadata.index, metadata)) else: raise ValueError("The following source:\n" + query + "\n ... is empty")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_column_types(self, table_name):\n\n curs = self.cursor()\n curs.execute('PRAGMA table_info(%s)' % table_name)\n\n types = {str(d[1].lower()): _TYPE_MAP[d[2].split()[0]] for d in curs.fetchall()}\n\n curs.close()\n\n return types", "def to_schema(cls):\n result = {}\n\n for name in cls.types().columns:\n column = cls.__table__.c[name]\n\n try:\n column.type.python_type\n except NotImplementedError:\n result[name] = column.type.__class__.__name__\n else:\n result[name] = str(column.type)\n\n return result", "def get_types(self) -> ColumnTypes:\n if self._types is None:\n return {}\n return {\n key: Types.STRING if value == Types.NONE else value\n for key, value in self._types.items()\n }", "def get_types(self):\n return self.column_type", "def column_reflection_fallback(self):\n col_info_dict_list: List[Dict]\n if self.sql_engine_dialect.name.lower() == \"mssql\":\n type_module = self._get_dialect_type_module()\n # Get column names and types from the database\n # StackOverflow to the rescue: https://stackoverflow.com/a/38634368\n col_info_query: TextClause = sa.text(\n f\"\"\"\nSELECT\n cols.NAME, ty.NAME\nFROM\n tempdb.sys.columns AS cols\nJOIN\n sys.types AS ty\nON\n cols.user_type_id = ty.user_type_id\nWHERE\n object_id = OBJECT_ID('tempdb..{self._table}')\n \"\"\"\n )\n col_info_tuples_list = self.engine.execute(col_info_query).fetchall()\n col_info_dict_list = [\n {\"name\": col_name, \"type\": getattr(type_module, col_type.upper())()}\n for col_name, col_type in col_info_tuples_list\n ]\n else:\n query: Select = sa.select([sa.text(\"*\")]).select_from(self._table).limit(1)\n col_names: list = self.engine.execute(query).keys()\n col_info_dict_list = [{\"name\": col_name} for col_name in col_names]\n return col_info_dict_list", "def column_types(self):\n return self._hndl.column_types()", "def _get_col_type_dict(self):\n schema = []\n if isinstance(self.schema, str):\n schema = json.loads(self.schema)\n elif isinstance(self.schema, list):\n schema = self.schema\n elif self.schema is not None:\n self.log.warning(\"Using default schema due to unexpected type. Should be a string or list.\")\n\n col_type_dict = {}\n try:\n col_type_dict = {col[\"name\"]: col[\"type\"] for col in schema}\n except KeyError:\n self.log.warning(\n \"Using default schema due to missing name or type. Please \"\n \"refer to: https://cloud.google.com/bigquery/docs/schemas\"\n \"#specifying_a_json_schema_file\"\n )\n return col_type_dict", "def input_types_dict(self) -> Dict[str, Union[DataType, np.dtype]]:\n if not self.has_input_names():\n raise MlflowException(\"Cannot get input types as a dict for schema without names.\")\n return {x.name: x.type for x in self.inputs}", "def columns(cls):\n return { col.key: { 'python_type': col.type.python_type,\n 'type': str(col.type),\n 'primary_key': col.primary_key,\n 'default': col.default,\n 'nullable': col.nullable}\n for col in cls.__table__.columns }", "def test_get_column_types(self):\n table = 'test_table_cols'\n\n columnPythonTypes = {'col1': float,\n 'col2': datetime.datetime,\n 'col3': str,\n 'col4': str\n }\n columnDBTypes = {\n 'col1': 'integer',\n 'col2': 'date' if _dbType == 'oracle' else 'timestamp',\n 'col3': 'varchar2 (3)' if _dbType == 'oracle' else 'varchar (3)',\n 'col4': 'char (1)'\n }\n\n columnDefs = ','.join([c + ' ' + v for c, v in list(columnDBTypes.items())])\n with self.dbh.table_recreate(table, columnDefs):\n try:\n res = self.dbh.get_column_types(table)\n except Exception:\n self.dbh.rollback()\n raise\n\n self.assertEqual(res, columnPythonTypes)", "def get_column_types():\n\n def predicate(obj):\n \"\"\"A predicate to get all classes that are subclasses of\n MafColumnRecord\"\"\"\n return inspect.isclass(obj) and issubclass(obj, MafColumnRecord)\n\n # Get all available column types\n return inspect.getmembers(sys.modules[\"maflib.column_types\"], predicate)", "def getTableColumnDefs(self, schema, table):\r\n src_columns = self.fetchSqlRecords(\r\n \"select c.column_name, data_type, character_maximum_length, numeric_precision, numeric_scale from information_schema.columns c where c.table_schema = '{}' and c.table_name='{}'\".format(schema, table))\r\n return [dict(zip(('name', 'type', 'max_length', 'precision', 'scale'), c)) for c in src_columns]", "def _parse_col_constants() -> Dict[str, List[str]]:\n\n col_type_map = {\n k: [] for k in TEST_ARGUMENT_DATA_TYPES.keys()\n }\n return col_type_map", "def assign_column_types(self):\n type_list = [\"category\" if u_input == 1 else float for u_input in self.user_column_label]\n self.df = self.df.astype(dict(zip(self.df.columns, type_list)))\n df_types = pd.DataFrame(self.df.dtypes).reset_index()\n df_types.columns = [\"column_name\", \"dtype\"]\n df_types.dtype = df_types.dtype.astype(str)\n self.column_dtypes = {list(df_types.column_name)[i]: list(df_types.dtype)[i] for i in range(len(df_types))}", "def _deduct_types(cls, row: Row) -> ColumnTypes:\n return {\n key: get_value_type(cls.whole_number_to_int(value))\n for key, value in row.items()\n }", "def readDefinedTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\n typename, typetype = m.groups() \n if typetype in self.types.keys():\n types[typename] = typetype\n \n return types", "def _get_types(self):\n\n db = Database()\n self.c_built_ins = list(map(lambda tup: tup[0], db.select_built_types()))\n self.c_built_in_array_types = r'^(' + '|'.join(self.escaped(self.c_built_ins)) + ')\\[[0-9]*\\]'\n self.c_types = list(map(lambda tup: tup[0], db.select_types()))\n self.c_array_types = r'^(' + '|'.join(self.escaped(self.c_types)) + ')\\[[0-9]*\\]'\n db.close_connection()", "def dtypes(self) -> List[str]:\n\n return [column.dtype for column in self.plaincolumns]", "def field_types(self):\n if self._field_types is None:\n with open(self.csv_path, encoding=\"utf8\") as f:\n reader = csv.DictReader(f)\n for i, row in enumerate(reader):\n if i == 0:\n self._field_types = {k: v for k, v in row.items()}\n return self._field_types", "def _get_columns(source):\n return _get_tuple(source)", "def data_types(self):\n return self['data_types']", "def get_fields(self):\n\n fields = {}\n LOGGER.debug('Treating all columns as string types')\n if os.path.exists(self.data):\n with open(self.data) as src:\n data = json.loads(src.read())\n for key, value in data['features'][0]['properties'].items():\n if isinstance(value, float):\n type_ = 'number'\n elif isinstance(value, int):\n type_ = 'integer'\n else:\n type_ = 'string'\n\n fields[key] = {'type': type_}\n else:\n LOGGER.warning(f'File {self.data} does not exist.')\n return fields", "def column_types(self):\n if self.__type__ == VERTEX_GFRAME:\n return self.__graph__.__proxy__.get_vertex_field_types()\n elif self.__type__ == EDGE_GFRAME:\n return self.__graph__.__proxy__.get_edge_field_types()", "def seek_types(dataframe: pd.DataFrame) -> Dict[str, List[str]]:\r\n\r\n def _get_global_type(t):\r\n if \"obj\" in str(t):\r\n return \"cat\"\r\n elif \"float\" in str(t):\r\n return \"float\"\r\n elif \"int\" in str(t):\r\n return \"int\"\r\n elif \"date\" in str(t):\r\n return \"date\"\r\n else:\r\n return \"other\"\r\n\r\n found_types = (\r\n dataframe.dtypes.apply(_get_global_type)\r\n .reset_index()\r\n .groupby(0)\r\n .agg(lambda x: list(x))\r\n )\r\n found_types = {k: v for k, v in zip(found_types.index, found_types[\"index\"])}\r\n return found_types", "def pandas_typecast(self) -> dict:\n res = {}\n for feat in self.data_features:\n res[feat.key] = ApiForm.typecast(feat.dtype)\n return res", "def _read_csv_dtypes(table_meta):\n dtypes = dict()\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'id' and field.get('subtype', 'integer') == 'string':\n dtypes[name] = str\n\n return dtypes", "def transform_schema(pgschema):\n datatypes = {}\n for field in pgschema:\n if 'cartodb_id' in field:\n continue\n datatypes[field] = map_dtypes(pgschema[field]['type'])\n return datatypes", "def columns_type(self,table):\n with self.conn.cursor() as cur:\n #_logger.debug('Columns Query. sql: %r', self.table_columns_query)\n cur.execute(self.columns_info_query % (self.dbname,table))\n for row in cur:\n yield row", "def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.JSON()", "def readOtherTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (\\w*) = (.*);\", self.data):\n typename, type_string = m.groups() \n if typename not in self.types.keys():\n types[typename] = type_string\n \n return types", "def readTypes(self):\r\n types = {}\r\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\r\n typename, typetype = m.groups() \r\n if typetype in self.SIMPLETYPES:\r\n types[typename] = typetype\r\n else:\r\n types[typename] = \"#\" + typetype\r\n \r\n return types", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def _get_db_columns_dtypes(self, ukbcsv_file):\n\n logger.info('Getting columns types')\n\n filename = os.path.splitext(ukbcsv_file)[0] + '.html'\n\n logger.info('Reading data types from {}'.format(filename))\n with open(filename, 'r', encoding='latin1') as f:\n tmp = pd.read_html(f, match='UDI', header=0, index_col=1, flavor='html5lib')\n\n logger.debug('Filling NaN values')\n df_types = tmp[0].loc[:, 'Type']\n df_types = df_types.fillna(method='ffill')\n\n df_descriptions = tmp[0].loc[:, 'Description']\n df_descriptions = df_descriptions.fillna(method='ffill')\n del tmp\n\n db_column_types = {}\n column_types = {}\n column_descriptions = {}\n column_codings = {}\n\n # open just to get columns\n csv_df = pd.read_csv(ukbcsv_file, index_col=0, header=0, nrows=1)\n columns = csv_df.columns.tolist()\n del csv_df\n\n logger.debug('Reading columns')\n for col in columns:\n col_type = df_types[col]\n final_db_col_type = TEXT\n\n if col_type == 'Continuous':\n final_db_col_type = FLOAT\n\n elif col_type == 'Integer':\n final_db_col_type = INT\n\n elif col_type in ('Date', 'Time'):\n final_db_col_type = TIMESTAMP\n\n db_column_types[col] = final_db_col_type\n column_types[self._rename_columns(col)] = col_type\n column_descriptions[self._rename_columns(col)] = df_descriptions[col].split('Uses data-coding ')[0]\n\n # search for column coding\n coding_matches = re.search(Pheno2SQL.RE_FIELD_CODING, df_descriptions[col])\n if coding_matches is not None:\n column_codings[self._rename_columns(col)] = int(coding_matches.group('coding'))\n\n return db_column_types, column_types, column_descriptions, column_codings", "def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping", "def types(cls):\n mapper = class_mapper(cls)\n primary_keys = set()\n autoincrementing = set()\n columns = set()\n required = set()\n relationships = set(\n name for name, column in mapper.relationships.items())\n\n # TODO: it's possible though unlikely, based on our current tables,\n # that a relationship this could be some other than a list\n type_mapping = dict((name, list) for name in relationships)\n\n # create sets for all true columns, primary keys,\n # and required columns\n for name, column in mapper.c.items():\n columns.add(name)\n\n if column.primary_key:\n primary_keys.add(name)\n\n if column.autoincrement:\n autoincrementing.add(name)\n\n if column.primary_key and not column.autoincrement:\n required.add(name)\n\n if not column.nullable and column.default is None:\n required.add(name)\n\n # get the Python type(s)\n try:\n python_types = column.type.python_type\n except NotImplementedError: # custom type object\n python_types = column.type.json_types\n\n # if we're using Python 2.x be sure that we include\n # a couple of extra types that could potentially\n # come in with a request\n if PY2 and python_types is str:\n # pylint: disable=undefined-variable\n python_types = (python_types, unicode)\n\n elif PY2 and python_types is int:\n # pylint: disable=undefined-variable\n python_types = (python_types, long)\n\n type_mapping[name] = python_types\n\n return ModelTypes(\n primary_keys=primary_keys,\n autoincrementing=autoincrementing,\n columns=columns,\n required=required,\n relationships=relationships,\n mappings=type_mapping)", "def dtypes(self) -> 'DataFrame':\n dtype_list: List[str] = [utils.convert_kind_to_dtype(self._column_info[col].dtype)\n for col in self._columns]\n arr: ndarray = np.array(dtype_list, dtype='O')\n columns: List[str] = ['Column Name', 'Data Type']\n data, str_reverse_map = _va.convert_str_to_cat_list_2d([self._columns, arr])\n new_data: Dict[str, ndarray] = {'S': data}\n new_column_info: ColInfoT = {'Column Name': utils.Column('S', 0, 0),\n 'Data Type': utils.Column('S', 1, 1)}\n return self._construct_from_new(new_data, new_column_info, np.array(columns, dtype='O'),\n str_reverse_map)", "def get_user_defined_types(self):\n query = mssqlqueries.get_user_defined_types()\n logger.info(u'UDTs query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])", "def get_schema() -> Dict[str, type]:\n schema: Dict[str, type] = {}\n\n # Add all columns from pipeline configs\n for pipeline in get_pipelines():\n schema.update(pipeline.schema)\n\n # Add new columns from adapter\n for col_old, col_new in OUTPUT_COLUMN_ADAPTER.items():\n if col_old in schema and col_new is not None:\n schema[col_new] = schema[col_old]\n\n return schema", "def get_inferred_dtypes(self, dtypes_validated: TYPE_DSTR) -> TYPE_DSTR:\n\n dtypes_inferred = {}\n\n for column in self.df.columns:\n if column in dtypes_validated:\n continue\n\n dtypes_inferred[column] = self.inspect_dtype(self.df[column])\n\n return dtypes_inferred", "def get_redshift_data_types(data):\n\tdata_types = get_data_types(data)\n\tredshift_data_types = OrderedDict()\n\tfor column, data_type in data_types.items():\n\t\tif data_type.startswith('int'):\n\t\t\tredshift_data_types[column] = 'INTEGER'\n\t\telif data_type.startswith('float'):\n\t\t\tredshift_data_types[column] = 'REAL'\n\t\telif data_type.startswith('datetime'):\n\t\t\tredshift_data_types[column] = 'TIMESTAMP'\n\t\telif data_type.startswith('bool'):\n\t\t\tredshift_data_types[column] = 'BOOLEAN'\n\t\telse:\n\t\t\tmax_length = int(max([len(bytes(str(x), 'utf-8')) for x in data[column].values]))\n\t\t\tnearest_power_of_two = 2 ** ceil(log2(max_length+1))-1\n\t\t\tredshift_data_types[column] = f'VARCHAR({nearest_power_of_two})'\n\treturn redshift_data_types", "def _as_dict(self):\r\n values = self._dynamic_columns or {}\r\n for name, col in self._columns.items():\r\n values[name] = col.to_database(getattr(self, name, None))\r\n return values", "def convert_types(self, schema, col_type_dict, row) -> list:\n return [\n self.convert_type(value, col_type_dict.get(name), stringify_dict=self.stringify_dict)\n for name, value in zip(schema, row)\n ]", "def data_types():\n\n return ...", "def get_column_type(type_name: str) -> object:\n raise NotImplementedError", "def get_feature_types(self):\n with self._map_lock.read_lock():\n return self._feature2memory.keys()", "def getDataTypes(self, name: unicode) -> List[ghidra.program.model.data.DataType]:\n ...", "def get_columns(self):\n self.db._refresh_types_internal([Column])\n return self.find(type_cls=Column)", "def get_columns(self):\n self.db._refresh_types_internal([Column])\n return self.find(type_cls=Column)", "def convert_data_types(fields, src_db='mysql', dest_db='postgres'):\n\n data_type_map = {\n 'mysql': {\n 'postgres': {\n 'date': 'date',\n 'tinyint': 'smallint',\n 'smallint': 'smallint',\n 'mediumint': 'integer',\n 'int': 'bigint',\n 'bigint': 'numeric',\n 'float': 'real',\n 'double': 'double precision',\n 'tinytext': 'varchar',\n 'mediumtext': 'varchar',\n 'longtext': 'varchar',\n 'varchar': 'varchar',\n 'text': 'varchar',\n 'char': 'char',\n 'binary': 'bytea',\n 'varbinary': 'bytea',\n 'tinyblob': 'bytea',\n 'blob': 'bytea',\n 'mediumblob': 'bytea',\n 'longblob': 'bytea',\n 'datetime': 'timestamp',\n 'time': 'time',\n 'decimal': 'decimal',\n 'json': 'jsonb'\n }\n }\n }\n\n for elem in fields:\n elem['data_type'] = data_type_map[src_db][dest_db][elem['data_type']]\n\n if elem['data_type'] == 'decimal':\n elem['data_type'] += f'''{int(elem['numeric_precision']), int(elem['numeric_scale'])}'''\n\n fields = {e['column_name']: e['data_type'] for e in fields}\n\n return fields", "def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Text()", "def getTypes():\n\n\t\tquery = \"\\\n\t\t\tSELECT\\\n\t\t\t\tid_item_container_type,\\\n\t\t\t\tlabel\\\n\t\t\tFROM\\\n\t\t\t\titem_container_type\\\n\t\t\"\n\n\t\treturn {t['id_item_container_type']: t['label'] for t in Model.fetchAllRows(query)}", "def get_table_columns_list(self, table, dictionary=False):\n if \".\" in table:\n prefix = table.split(\".\")[0] + \".\"\n table = table.split(\".\")[1]\n else:\n # table = table\n prefix = \"\"\n cur = self._connection.cursor()\n\n if self.isMSSQL(): # pragma: no cover\n prf = \"\" if len(prefix) == 0 else prefix + \".\"\n sql = \"\"\"SELECT * FROM (SELECT OBJECT_NAME(c.OBJECT_ID) TableName,c.name AS ColumnName,t.name AS TypeName\n FROM sys.columns AS c\n JOIN sys.types AS t ON c.user_type_id=t.user_type_id\n ) AS ttt\n WHERE ttt.TableName = '%s%s'\"\"\" % (prf, table)\n cur.execute(sql)\n else:\n cur.execute(\"PRAGMA %stable_info(%s)\" % (prefix, table) + \";\")\n\n res = cur.fetchall()\n cur.close()\n res = [(r[1], DatabaseCore._SQL_conversion_types[r[2]]) for r in res]\n if dictionary:\n dic = {}\n for i in range(0, len(res)):\n dic[i] = res[i]\n return dic\n else:\n return res", "def _map_dtypes(type_names, field_widths):\n dtypes = []\n for i, name in enumerate(type_names):\n if name == 'int':\n dtypes.append('i8')\n elif name == 'double':\n dtypes.append('f8')\n elif name == 'char':\n dtypes.append('a{0}'.format(field_widths[i]))\n else:\n raise ValueError('Unexpected type name: {0}.'.format(name))\n return dtypes", "def get_data_types(data):\n\tdtypes = data.dtypes\n\treturn OrderedDict(zip(dtypes.index, dtypes.astype(str)))", "def get_sql_columns(self, request):\n cur = self.execute(request)\n col_name_list = [tuple[0] for tuple in cur.description]\n cur.close()\n return col_name_list", "def meta(cls):\n if getattr(cls, '__from_class__', None) is not None:\n cls = cls.__from_class__\n attribute_info = {}\n for name, value in cls.__table__.columns.items():\n attribute_info[name] = str(value.type).lower()\n\n return {cls.__name__: attribute_info}", "def map_dtypes(pgtype):\n # may not be a complete list, could not find CARTO SQL API documentation\n # about data types\n dtypes = {'number': 'float64',\n 'date': 'datetime64',\n 'string': 'object',\n 'geometry': 'object',\n 'boolean': 'bool'}\n try:\n return dtypes[pgtype]\n except KeyError:\n # make it a string if not in dict above\n return 'object'", "def names(self):\n return self._names_to_cols.keys()", "def _cast_types(self, input_dict):\n return cast_types(input_dict, self.params['dtype'])", "def _resolve_target_dtypes(self, dyf: DynamicFrame) -> DynamicFrame:\n resolve_choice_specs = [\n (col, f\"cast:{col_type}\") for col, col_type in self.target_table.get_dyf().toDF().dtypes\n ]\n\n return dyf.resolveChoice(resolve_choice_specs)", "def get_column_dtypes(self) -> Tuple[List[str], List[str]]:\n\n columns, pyspark_dtypes = zip(*self.df.dtypes)\n\n # check unsupported pyspark dtypes\n unsupported = set(pyspark_dtypes).difference(self.TYPE_MAPPING.keys())\n if unsupported:\n raise ValueError(\"Unsupported dtype encountered: {}. Supported\"\n \"dtypes are: {}.\"\n .format(unsupported, self.TYPE_MAPPING.keys()))\n\n dtypes = [self.TYPE_MAPPING[dtype] for dtype in pyspark_dtypes]\n\n return columns, dtypes", "def get_postgres_column_type(\n self, tablename: str, column_name: str\n ) -> str:\n return self.get_postgres_column_definition(\n tablename=tablename, column_name=column_name\n ).data_type.upper()", "def _get_precision_column_types(\n table_object: type[DeclarativeBase],\n) -> list[str]:\n return [\n column.key\n for column in table_object.__table__.columns\n if column.type is DOUBLE_TYPE\n ]", "def data_all_types(df):\n \n printmd (\"**Type of every column in the data**\")\n print(\"\")\n print(df.dtypes)", "def set_columns(self, types):\n if self._types:\n raise wandb.Error('TypedTable.set_columns called more than once.')\n try:\n for key, type_ in types:\n if type_ not in TYPE_TO_TYPESTRING:\n raise wandb.Error('TypedTable.set_columns received invalid type ({}) for key \"{}\".\\n Valid types: {}'.format(\n type_, key, '[%s]' % ', '.join(VALID_TYPE_NAMES)))\n except TypeError:\n raise wandb.Error(\n 'TypedTable.set_columns requires iterable of (column_name, type) pairs.')\n self._types = dict(types)\n self._output.add({\n 'typemap': {k: TYPE_TO_TYPESTRING[type_] for k, type_ in types},\n 'columns': [t[0] for t in types]})", "def columns(self):\n return self._names_to_cols.values()", "def get_column_def(self):\r\n db_type = self.db_type.format(\r\n self.key_type.db_type,\r\n self.value_type.db_type\r\n )\r\n return '{} {}'.format(self.cql, db_type)", "def getDatasetTypes(self):\n\n list = []\n for attr in dir(self):\n if attr.startswith(\"map_\"):\n list.append(attr[4:])\n return list", "def _get_columns_mapping_dict():\n\n columns_mapping_dict = {}\n for original_header in COLUMN_HEADERS_MAPPER:\n new_header = COLUMN_HEADERS_MAPPER[original_header]\n columns_mapping_dict[new_header] = [original_header]\n return columns_mapping_dict", "def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]", "def readSimpleTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\n typename, typetype = m.groups() \n if typetype in self.SIMPLETYPES:\n types[typename] = typetype\n \n return types", "def as_dict(self):\r\n values = {}\r\n for name, col in self._columns.items():\r\n values[name] = col.to_database(getattr(self, name, None))\r\n return values", "def datasource_types(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"datasource_types\")", "def type_support_map(self) -> Mapping[AllTypes, TypeSupport]:\n raw_config: Dict[str, str] = self.options.get(\"datatypes\", {})\n config = {x: TypeSupport[y.upper()] for x, y in raw_config.items()}\n return fallback(defaultdict(lambda: TypeSupport.UNSUPPORTED), config)", "def _map_data_types(dtype):\n return _data_type_map[dtype]", "def columns(self):\n return self._coldefs", "def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }", "def create_data_types(self):\n for col in self.all_columns:\n try:\n if float(self.train[col].iloc[-3]):\n self.train[col] = self.train[col].astype(np.float32)\n except:\n pass\n self.d_types = self.train.dtypes", "def javatype(s):\n return {\n 'int': 'int',\n 'String': 'String',\n 'Long': 'Long',\n 'boolean': 'boolean'\n }.get(s.name, s.name)", "def get_datatypes(self, tid):\n return self._parametersdict[\"DATATYPES\"].get(tid)", "def parse_def(self, sql):\n parsed = sqlparse.parse(sql)[0]\n\n # extract the parenthesis which holds column definitions\n _, par = parsed.token_next_by(i=sqlparse.sql.Parenthesis)\n columns = self.extract_definitions(par)\n\n r = []\n for column in columns:\n s = {}\n s['key'] = column[0]\n s['type'] = column[1:]\n r.append(s)\n #print('NAME: {name!s:12} DEFINITION: {definition}'.format(\n # name=column[0], definition=' '.join(str(t) for t in column[1:])))\n return r", "def read_inputs_field_types():\n inputs = yaml.load(\n open(os.path.join(os.path.dirname(__file__), 'inputs.yml')).read())\n\n for db in inputs.keys():\n inputs[db]['fieldnames'] = [field['name']for field in inputs[db]['fields']]\n return inputs", "def columns(self):\n return set(self.native_schema)", "def get_column_def(self):\r\n return '{} {}'.format(self.cql, self.db_type)", "def _getValues(self):\n res = {}\n for colname, column in self._iterNameColumn():\n res[colname] = column.toSql(self._values[colname])\n return res", "def get_columns(self):\n if self.dbtype == 'pg':\n q = \"select attname from pg_class, pg_attribute where relname = %s and attrelid = pg_class.oid and attnum > 0 and attisdropped = false;\"\n else:\n q = \"select columns.name from columns, tables where tables.name = %s and tables.id = columns.table_id;\"\n ret = []\n for (attr,) in self.query(q, self.tablename):\n ret.append(str(attr))\n return ret", "def _determine_column_types(self, column_types):\n if isinstance(column_types, list):\n # List based typing\n if len(column_types) != len(self.headers):\n raise ValueError(f\"You must provide as many column types as columns of data\\nFound {len(column_types)}\"\n f\" but expected {len(self.headers)}\")\n else:\n return [col_type if col_type != bool else self._string_to_bool for col_type in column_types]\n\n elif isinstance(column_types, type):\n # Uniform Typing of type column_types\n if column_types == bool:\n return [self._string_to_bool for _ in range(self.row_length)]\n else:\n return [column_types for _ in range(self.row_length)]\n\n elif not column_types:\n # None Typed operation\n return column_types\n\n else:\n raise TypeError(f\"Column_types takes a list[types], type or None. Yet {type(column_types)} was found\")", "def columns(self, table_name):\n table = self._create_table(table_name)\n return [c.name for c in table.c]", "def variable_types(self, data_key, only_type=None):\r\n if self[data_key].meta['columns'] is None:\r\n return 'No meta attached to data_key: %s' %(data_key)\r\n else:\r\n types = {\r\n 'int': [],\r\n 'float': [],\r\n 'single': [],\r\n 'delimited set': [],\r\n 'string': [],\r\n 'date': [],\r\n 'time': [],\r\n 'array': []\r\n }\r\n not_found = []\r\n for col in self[data_key].data.columns:\r\n if not col in ['@1', 'id_L1', 'id_L1.1']: \r\n try:\r\n types[\r\n self[data_key].meta['columns'][col]['type']\r\n ].append(col)\r\n except:\r\n not_found.append(col) \r\n for mask in self[data_key].meta['masks'].keys():\r\n types[self[data_key].meta['masks'][mask]['type']].append(mask)\r\n if not_found:\r\n print '%s not found in meta file. Ignored.' %(not_found)\r\n if only_type:\r\n return types[only_type]\r\n else:\r\n return types", "def _types(cls):\n return {}", "def getColumnDictionary(self):\n try:\n column_dictionary = []\n con = self.getMetadataDatabaseConnection()\n column_values = con.cursor()\n con.cursor().callproc('qiime_assets.get_column_dictionary', [column_values])\n for row in column_values:\n # Skip if no column name is found\n if row[0] is None:\n continue\n\n # Some variables to allow for re-assignment should any of them be None\n column_name = row[0].lower()\n expected_values = row[1]\n description = row[2]\n data_type = row[3]\n max_length = row[4]\n min_length = row[5]\n active = row[6]\n \n if row[1] == None:\n expected_values == ''\n elif row[2] == None:\n description == ''\n elif row[3] == None:\n data_type = ''\n elif row[4] == None:\n max_length = ''\n elif row[5] == None:\n min_length = ''\n elif row[6] == None:\n min_length = ''\n \n list_item = (column_name, expected_values, description, data_type, max_length, min_length, active)\n column_dictionary.append(list_item)\n return column_dictionary\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def _get_typedefs_header(self):\n atlas_endpoint = self.endpoint_url + \"/types/typedefs/headers\"\n getHeaders = requests.get(\n atlas_endpoint,\n headers=self.authentication.get_authentication_headers()\n )\n results = self._handle_response(getHeaders)\n\n output = dict()\n for typedef in results:\n active_category = typedef[\"category\"].lower()+\"Defs\"\n if active_category not in output:\n output[active_category] = []\n\n output[active_category].append(typedef[\"name\"])\n\n return output", "def get_sql_fields(self):\n return self.mapping_db, 'GeneNames', 'EnsembleID', 'symbol', 'name'", "def field_feature_dtypes(self):\n\n dtypes_grp = self.h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR)]\n\n field_paths = _iter_field_paths(dtypes_grp)\n\n dtypes = {}\n for field_path in field_paths:\n dtype_str = dtypes_grp[field_path][()]\n # if there is 'None' flag for the dtype then return None\n if dtype_str == NONE_STR:\n dtypes[field_path] = None\n else:\n dtype_obj = json.loads(dtype_str)\n dtype_obj = [tuple(d) for d in dtype_obj]\n dtype = np.dtype(dtype_obj)\n dtypes[field_path] = dtype\n\n return dtypes", "def collect_type_names(types: Iterable[UnresolvedType]) -> TypeMap:\n return StrictDict({\n type_.name: type_ for type_ in visit_types(types)\n if not isinstance(type_, DeferredType)\n })", "def get_types(self):\n return self.types", "def data_types(self):", "def readAggregatedSimpleTypes(self):\n types = {}\n # SETs\n for m in re.finditer(\"TYPE (\\w*) = SET (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'SET ' + typetype\n \n # BAGs\n for m in re.finditer(\"TYPE (\\w*) = BAG (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'BAG ' + typetype\n \n # LISTs\n for m in re.finditer(\"TYPE (\\w*) = LIST (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'LIST ' + typetype\n \n # ARRAYs\n for m in re.finditer(\"TYPE (\\w*) = ARRAY (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'ARRAY ' + typetype\n \n # STRING vectors\n for m in re.finditer(\"TYPE (\\w*) = STRING\\((.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'STRING(' + typetype\n \n return types", "def make_converters(meta):\n converters = {}\n\n for k, v in meta['columns'].items():\n converters[k] = type_to_converter(v['type'])\n\n return converters", "def dtypes(self):\n return self.to_pandas().dtypes" ]
[ "0.73652345", "0.6958067", "0.69160265", "0.6754187", "0.6642904", "0.66274935", "0.6622891", "0.6481625", "0.6360409", "0.63488346", "0.63274217", "0.6270515", "0.6258899", "0.6238819", "0.61439264", "0.6115824", "0.6091439", "0.6059644", "0.59835035", "0.59506655", "0.5928293", "0.5925507", "0.5910565", "0.5840974", "0.58360434", "0.58225924", "0.58079875", "0.5772235", "0.5755933", "0.57344145", "0.57332593", "0.57317954", "0.57001084", "0.5683586", "0.5681534", "0.5658304", "0.56562245", "0.56211287", "0.5610259", "0.5547241", "0.55236834", "0.55105966", "0.5499854", "0.54997367", "0.54869634", "0.54843485", "0.5477991", "0.5477991", "0.5471682", "0.54707414", "0.5463405", "0.5453299", "0.5448863", "0.5440296", "0.5436804", "0.54250145", "0.5419243", "0.5416149", "0.541344", "0.54098624", "0.5402753", "0.5402043", "0.5398632", "0.5397996", "0.53953975", "0.5392285", "0.5390117", "0.53892356", "0.53755015", "0.53754663", "0.536116", "0.5359253", "0.53584945", "0.53438574", "0.5335131", "0.53196526", "0.5306954", "0.52997977", "0.529189", "0.52913594", "0.52876556", "0.526804", "0.5267684", "0.52664334", "0.52656156", "0.5261532", "0.52607065", "0.5260342", "0.52551925", "0.52463615", "0.52379334", "0.523646", "0.52286243", "0.5222742", "0.52205163", "0.5218743", "0.52134424", "0.5213139", "0.5212176", "0.52101433" ]
0.6247801
13
Returns the snowflake SQL query referenced by this source.
def query(self): return self._query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query(self):\n return self.snowflake_options.query", "def sql_query(self):\n return self._project.sql_query", "def get_sql_connection(self):\n return self.sql", "def sql(self):\n return ';\\n'.join([x.sql() for x in self._statements]) + ';'", "def get_query(self):\n columns = ','.join(['\"{}\"'.format(x) for x in self.columns])\n query = 'SELECT {} FROM \"{}\"'.format(columns, self.table)\n filter_params = []\n if self.filters:\n filter_sql, filter_params = filter_postgis(self.filters)\n query += ' WHERE {}'.format(filter_sql)\n query += ';'\n return str(text(query)), filter_params", "def get_query(self):\n columns = ','.join(['\"{}\"'.format(x) for x in self._columns])\n query = 'SELECT {} FROM \"{}\"'.format(columns, self._table)\n filter_params = []\n if self._filters:\n filter_sql, filter_params = filter_postgis(self._filters)\n query += ' WHERE {}'.format(filter_sql)\n query += ';'\n return str(text(query)), filter_params", "def construct_query(self):\n reader = QueryReader(filepath=self.filepath, filename=self.filename, raw_sql=self.raw_sql, params=self.params)\n return reader.sql", "def determine_query():\n return query if query is not None \\\n else f\"SELECT * FROM '{table}';\"", "def get_sql(self, table_name):\n\t\tcolNames,sql = self._generate_sql_parts(table_name)\n\t\treturn get_query(table_name, colNames, sql)", "def build(self):\n return self._sql.strip()", "def query(self):\n return self.details[KEY_QUERY]", "def to_sql(self):\n return self._grammar.compile_select(self)", "def getQuery(self):\n return self._query", "def sql(self):\n # First run validation for the query on all widgets\n self._validate()\n\n metrics = find_metrics_for_widgets(self._widgets)\n operations = find_operations_for_widgets(self._widgets)\n share_dimensions = find_share_dimensions(self._dimensions, operations)\n\n return make_slicer_query_with_totals_and_references(\n self.dataset.database,\n self.table,\n self.dataset.joins,\n self._dimensions,\n metrics,\n operations,\n self._filters,\n self._references,\n orders=self.orders,\n share_dimensions=share_dimensions,\n )", "def query(self):\n \n return self._query", "def query(self):\n return self.__query", "def base_query(self) -> Optional[str]:\n return pulumi.get(self, \"base_query\")", "def sql(self):\n # Return value.\n ret = ''\n\n # Variables for figuring out dependencies between tables.\n done = []\n dependencies = {}\n\n # Run through all tables.\n for table in self.tables.values():\n # Assume no references.\n reference = False\n\n # Check fields for foreign keys.\n for field in table.fields.values():\n if field['foreign'] is not False:\n # Add the reference to the dependencies of the table.\n if table.name not in dependencies.keys():\n dependencies[table.name] = []\n dependencies[table.name].append(\n field['foreign'].split('.')[0])\n reference = True\n\n # If the table has no dependencies, just print it.\n if not reference:\n ret += '\\n' + table.sql()\n done.append(table.name)\n\n # Solve dependencies.\n while (len(dependencies) > 0):\n # Run through all dependencies.\n for table, deplist in dependencies.items():\n # Check is some has been solved since the last run.\n for solved in done:\n if solved in deplist:\n # Bingo. Remove it.\n deplist.remove(solved)\n # If there are no more dependencies\n if len(deplist) == 0:\n # Add thw SQL to the return value,\n ret += '\\n' + self.tables[table].sql()\n # Add the table name to the solved list.\n done.append(table)\n\n # Remove all tables that have had its dependencies solved.\n for solved in done:\n if solved in dependencies.keys():\n del dependencies[solved]\n\n return ret", "def _get_query(self):\n\n endpoint = ENDPOINTS[self.sync_data.endpoint_index]\n marker = self.sync_data.markers.get(self.sync_data.endpoint_index, START_OF_TIME)\n\n query_string = \"select \" + \",\".join(ENDPOINT_QUERY_FIELDS[endpoint])\n\n # ZOQL does not support the `order by` sorting\n query_string = query_string + \" from {} where UpdatedDate > '{}'\".format(\n endpoint,\n marker\n )\n\n return query_string", "def sql(self):\n if self._sql is None:\n try:\n sql_dict = get_report(\n self.idfname,\n self.simulation_dir,\n output_report=\"sql\",\n output_prefix=self.output_prefix,\n )\n except FileNotFoundError:\n # check if htm output is in file\n sql_object = self.anidfobject(\n key=\"Output:SQLite\".upper(), Option_Type=\"SimpleAndTabular\"\n )\n if sql_object not in self.idfobjects[\"Output:SQLite\".upper()]:\n self.addidfobject(sql_object)\n return self.simulate().sql()\n except Exception as e:\n raise e\n else:\n self._sql = sql_dict\n return self._sql", "def get_source_query(self) -> QuerySet:\n raise NotImplementedError", "def run_query_target_snowflake(self, query):\n return db.run_query_snowflake(\n query,\n account=self.get_conn_env_var('TARGET_SNOWFLAKE', 'ACCOUNT'),\n database=self.get_conn_env_var('TARGET_SNOWFLAKE', 'DBNAME'),\n warehouse=self.get_conn_env_var('TARGET_SNOWFLAKE', 'WAREHOUSE'),\n user=self.get_conn_env_var('TARGET_SNOWFLAKE', 'USER'),\n password=self.get_conn_env_var('TARGET_SNOWFLAKE', 'PASSWORD'),\n )", "def get_table_query_string(self) -> str:\n if self.database and self.table:\n return f'\"{self.database}\".\"{self.schema}\".\"{self.table}\"'\n elif self.table:\n return f'\"{self.table}\"'\n else:\n return f\"({self.query})\"", "def schema(self):\n return self.snowflake_options.schema", "def generate_query(self):\n return", "def database(self):\n return self.snowflake_options.database", "def get_query(self):\n return self.query_class(self)", "def get_catalog_query(self):\n # Implemented from kitosid template for -\n # osid.resource.BinQuerySession.get_bin_query_template\n return self._get_provider_session('catalog_query_session').get_catalog_query()", "def sql_query(self, query_text, edges = False):\n q = QueryWrapper(self.graph, QueryString(query_text, 'sql'),\n edges = edges, debug = self._debug)\n return q", "def to_sql(self):\n\n if not self._action:\n self.set_action(\"select\")\n for scope in self._global_scopes.get(self.owner, {}).get(self._action, []):\n if not scope:\n continue\n\n scope(self.owner, self)\n\n grammar = self.get_grammar()\n sql = grammar.compile(self._action).to_sql()\n self.boot()\n return sql", "def graph_queries_table_name(self) -> str:\n return pulumi.get(self, \"graph_queries_table_name\")", "def get_sql(database_name, table_name, sql_id):\n db = get_xml_dict(database_name, table_name)\n sql = db.get(sql_id)\n return sql", "def get_sql(database_name, table_name, sql_id):\n db = get_xml_dict(database_name, table_name)\n sql = db.get(sql_id)\n return sql", "def get_query(self):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n if len(split) == 1: return \"\"\r\n else: return split[1]", "def table(self):\n return self.snowflake_options.table", "def query(self) -> Optional[str]:\n return pulumi.get(self, \"query\")", "def sql(self, db, sql, args=()):\n assert db in ('source', 'target'), u\"First arg of sql() should be 'source' or 'target'\"\n connection = self.target_connection if db == 'target' else self.source_connection\n with connection.cursor() as cursor:\n cursor.execute(sql, args)\n return cursor.fetchall() if 'select ' in sql.lower() else ()", "def print_query(query: Query) -> str:\n regex = re.compile(r\":(?P<name>\\w+)\")\n params = query.statement.compile().params\n sql = regex.sub(r\"'{\\g<name>}'\", str(query.statement)).format(**params)\n from flexmeasures.data.config import db\n\n print(f\"\\nPrinting SQLAlchemy query to database {db.engine.url.database}:\\n\\n\")\n print(sql)\n return sql", "def q(self):\n from ..app import config\n return r.db(config.RDB_DB).table(self.__model__._table)", "def _sql_to_string(self, psql):\n pcon = self.__engine.raw_connection()\n try:\n pcur = pcon.cursor()\n xxx = psql.as_string(pcur)\n finally:\n pcon.close()\n return xxx", "def soql_query(self, query):\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n return self.cumulusci.sf.query_all(query)", "def generate_dry_run_query(self) -> str:\n return self.connector.dry_run_query(self.traversal_node)", "def query_text(self) -> str:\n return self._query_text", "def getQuery(self):\n return _libsbml.SBMLUri_getQuery(self)", "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "def sql_select(self, sqlstr):\n return get_connection()._raw_sql(sqlstr)", "def get_editor_query(sql):\n sql = sql.strip()\n\n # The reason we can't simply do .strip('\\e') is that it strips characters,\n # not a substring. So it'll strip \"e\" in the end of the sql also!\n # Ex: \"select * from style\\e\" -> \"select * from styl\".\n pattern = re.compile(r'(^\\\\\\e|\\\\\\e$)')\n while pattern.search(sql):\n sql = pattern.sub('', sql)\n\n return sql", "def q(cls) -> Query:\n if not cls.s:\n raise M2Error('No DB session defined')\n return cls.s.query(cls)", "def read_sql(name=\"total_trips.sql\"):\n template = pkg_resources.resource_filename(\"gojek\", join(\"sql\", name))\n with open(template, 'r') as myfile:\n query = myfile.read()\n return query", "def query(self):\n return self.session.query", "def get_sql_statement(self, start_time: datetime, end_time: datetime) -> str:\n return self.sql_stmt.format(\n start_time=start_time,\n end_time=end_time,\n result_limit=self.config.sourceConfig.config.resultLimit,\n filters=self.filters, # pylint: disable=no-member\n )", "def get_query(self, row_id):\n return self.get(row_id).query", "def GetSourceReference(self):\n #return self.reactants[0].compound._GetDGSource()\n source_names = set(map(lambda x : str(x.compound._GetDGSource()), self.reactants))\n return ', '.join([self.GetSourceReferenceLink(s) for s in source_names])", "def sql(self, q):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'sql')\r\n\r\n return http.Request('POST', url, params), parsers.parse_json", "def to_sql(self):\n if self._action == \"create\":\n return self.platform().compile_create_sql(self.table)\n else:\n if not self._dry:\n # get current table schema\n table = self.platform().get_current_schema(\n self.connection, \"table_schema\"\n )\n self.table.from_table = table\n\n return self.platform().compile_alter_sql(self.table)", "def gen_q_stmt(name, query):\n return \"query {} `{}`;\\n\".format(name, query)", "def sql_endpoint_path(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sql_endpoint_path\")", "def get_query(self, minimal: bool = False) -> Optional[str]:\n if minimal:\n return self.minimal_query\n return self.query", "def reference_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationReferenceDataSourceReferenceSchemaArgs']:\n return pulumi.get(self, \"reference_schema\")", "def get_query_output(self):\n\n return self.query_output_file", "def get_raw_query(self, row_id):\n cursor = self.connection.cursor()\n cursor.execute(\"\"\"\n SELECT raw_query FROM queries WHERE rowid=(?);\n \"\"\", (row_id,))\n return cursor.fetchone()[0]", "def reference_data_source(self) -> Optional[pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationReferenceDataSourceArgs']]:\n return pulumi.get(self, \"reference_data_source\")", "def get_query():\r\n table = query_queue_table\r\n\r\n s = table.select(order_by = sa.asc(table.c.date), limit = 1)\r\n s.append_whereclause(sa.and_(*[table.c.iden != i for i in running]))\r\n r = s.execute().fetchone()\r\n\r\n if r:\r\n return r.iden, r.query\r\n else:\r\n return None, None", "def get_status_sql(self, tablename):\n status_sql = f\"\"\" SELECT * FROM {tablename}\"\"\"\n return status_sql", "def as_sql(self):\r\n assert len(self.query.tables) == 1, \\\r\n \"Can only delete from one table at a time.\"\r\n qn = self.quote_name_unless_alias\r\n #=======================================================================\r\n # self._hasConstraints(self.query.tables[0])\r\n #=======================================================================\r\n \r\n result = ['DELETE FROM %s' % qn(self.query.tables[0])]\r\n where, params = self.query.where.as_sql(qn=qn, connection=self.connection)\r\n if where:\r\n result.append('WHERE %s' % where)\r\n ##DOTO: Delete after test\r\n #=======================================================================\r\n # print '>>>',result,params\r\n # if result[0] == 'DELETE FROM \"django_flatpage_sites\"' :\r\n # import pdb; pdb.set_trace()\r\n #=======================================================================\r\n return ' '.join(result), tuple(params)", "def execute(self, context):\n logging.info(f\"Running SQL :{self.sql}\")\n self.hook = TrinoHook()\n query = self.hook.run(self.sql, autocommit=self.autocommit, parameters=self.parameters)\n if self.xcom_push:\n return query", "def query(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"query\")", "def stock_3_query(self):\n return f\"\"\"\n SELECT '{self.stock_3}'\n FROM closing_prices;\"\"\"", "def sql(self):\n if not self._selects:\n raise ValueError('No SELECT statements are specified')\n\n sql = []\n param_values = []\n\n # MySQL SELECT syntax as of 5.7:\n #\n # SELECT ...\n # UNION [ALL | DISTINCT] SELECT ...\n # [UNION [ALL | DISTINCT] SELECT ...]\n\n if self.query_options:\n sql.extend(self.query_options)\n\n for stmt in self._selects:\n if isinstance(stmt, mysqlstmt.Select):\n select_sql, select_params = stmt.sql()\n stmtsql = select_sql\n if select_params is not None:\n param_values.extend(select_params)\n else:\n stmtsql = stmt\n\n if sql:\n if self._distinct is False:\n sql.append('UNION ALL')\n else:\n sql.append('UNION')\n\n sql.append(u'({0})'.format(stmtsql))\n\n if self._orderby_conds:\n sql.append('ORDER BY')\n sql.append(', '.join(self._orderby_conds))\n\n if self._limit is not None:\n row_count, offset = self._limit\n if offset > 0:\n sql.append('LIMIT {0},{1}'.format(offset, row_count))\n else:\n sql.append('LIMIT {0}'.format(row_count))\n\n if self.placeholder:\n return ' '.join(sql), param_values if param_values else None\n assert not param_values\n return ' '.join(sql)", "def get_query(self, q_id: str) -> Optional[str]:\n for query in self.example_queries:\n if query['@id'] == q_id:\n return query\n return None", "def query(self) -> pulumi.Output[Optional['outputs.JobQuery']]:\n return pulumi.get(self, \"query\")", "def read_sql(self):\n pass", "def get_comment_query(self):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resource_query_template\n return queries.CommentQuery(runtime=self._runtime)", "def getExampleQueries(self, selectedID = 0):\r\n return \"\"", "def asSQL(self) -> str:\n # To modify this method, pay attentin to SQL injection.\n # For example, if `self.x` is assumed to be integer\n # but is not guaranteed to be,\n # `:d` must always be specified in format strings:\n # `expressions.append(f\"x > {self.x:d}\")`\n expressions = []\n if self.date_start is not None:\n if self.date_start.tzinfo is None:\n datestr = self.date_start.isoformat()\n else:\n datestr = self.date_start.astimezone(datetime.timezone.utc).replace(tzinfo=None).isoformat()\n expressions.append(f\"pfs_visit.issued_at >= '{datestr}'\")\n if self.date_end is not None:\n if self.date_end.tzinfo is None:\n datestr = self.date_end.isoformat()\n else:\n datestr = self.date_end.astimezone(datetime.timezone.utc).replace(tzinfo=None).isoformat()\n expressions.append(f\"pfs_visit.issued_at < '{datestr}'\")\n if self.visit_start is not None:\n expressions.append(f\"pfs_visit.pfs_visit_id >= '{self.visit_start:d}'\")\n if self.visit_end is not None:\n expressions.append(f\"pfs_visit.pfs_visit_id < '{self.visit_end:d}'\")\n\n if expressions:\n return \"(\" + \" AND \".join(expressions) + \")\"\n else:\n return \"TRUE\"", "def sql(q, database_url):\r\n output, cur_description = Q(q, database_url, out=True, description=True)\r\n # print(cur_description)\r\n cols = [i[0] for i in cur_description]\r\n return pd.DataFrame(output, columns=cols)", "def stock_1_query(self):\n return f\"\"\"\n SELECT Date, '{self.stock_1}'\n FROM closing_prices;\"\"\"", "def cloud_sql_proxy_path() -> Optional[str]:\n return u.resource(CLOUD_SQL_WRAPPER_SCRIPT)", "def select_query(self):\n query = db.select([self.tables])\n print(query)\n ResultProxy = self.connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n return ResultSet", "def get_sql_from_file(self, sql_path):\n with open(sql_path, 'r') as f:\n sql = \"\"\n for line in f.readlines():\n line = line.strip()\n if not line.startswith(\"--\"):\n sql += \" \" + line\n return sql", "def _full_check_sql(self, sql: str) -> str:\n return f\"SELECT col_name, check_type, check_result FROM ({sql}) AS check_columns\"", "def doQueryString(self, query) :\n\t\tqr = self.doQuery(query)['results']['bindings']\n\t\tif qr :\n\t\t\treturn qr[0].values()[0]['value']\n\t\telse :\n\t\t\treturn None", "def version_query_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_query_name\")", "def q(self):\n return self._q", "def get_resource_query(self):\n pass", "def get_resource_query(self):\n pass", "def deploySQL(self):\n\t\tsql = \"APPLY_KMEANS({} USING PARAMETERS model_name = '{}', match_by_pos = 'true')\"\n\t\treturn (sql.format(\", \".join(self.X), self.name))", "def source(self):\n for source in self.coordinator.data.sources:\n if source.SourceID == self.zone.SourceID:\n return source.Name\n return None", "def build_query(self):\n query = self.action.build_query()\n if self.sandboxoption != None:\n query['Sandbox'] = self.sandboxoption\n else:\n query['Sandbox'] = '0'\n return query", "def conditions_as_sql(self, prewhere=False):\n q_object = self._prewhere_q if prewhere else self._where_q\n return q_object.to_sql(self._model_cls)", "def sql_context(self):\n if not self._sql_context:\n import pyspark.sql\n self._sql_context = pyspark.sql.SQLContext(self.context())\n return self._sql_context", "def text(self):\n return self.query.query", "def get_query_execution(QueryExecutionId=None):\n pass", "def _get_snowflake_connection_string() -> str:\n sfUser = os.environ.get(\"SNOWFLAKE_USER\") # noqa: TID251\n sfPswd = os.environ.get(\"SNOWFLAKE_PW\") # noqa: TID251\n sfAccount = os.environ.get(\"SNOWFLAKE_ACCOUNT\") # noqa: TID251\n sfDatabase = os.environ.get(\"SNOWFLAKE_DATABASE\") # noqa: TID251\n sfSchema = os.environ.get(\"SNOWFLAKE_SCHEMA\") # noqa: TID251\n sfWarehouse = os.environ.get(\"SNOWFLAKE_WAREHOUSE\") # noqa: TID251\n sfRole = os.environ.get(\"SNOWFLAKE_ROLE\") or \"PUBLIC\" # noqa: TID251\n\n url = f\"snowflake://{sfUser}:{sfPswd}@{sfAccount}/{sfDatabase}/{sfSchema}?warehouse={sfWarehouse}&role={sfRole}\"\n\n return url", "def get_query():\n query = \"\"\"{\n repository(name: \"flux\", owner: \"fluxcd\") {\n forkCount\n issues {\n totalCount\n }\n pullRequests {\n totalCount\n }\n releases {\n totalCount\n }\n stargazers {\n totalCount\n }\n watchers {\n totalCount\n }\n }\n}\n \"\"\"\n return query", "def parse_sql (self):\n sql = self.sql.lower()\n \n split = sql.split('from')\n select = split[0]\n rows = [r.strip().replace(')','').replace('(','') for r in \\\n select.split('select')[1].split(',')]\n \n #~ print rows\n if len(rows) != 1 and '*' in rows:\n return 'UNPARSEABLE', []\n elif len(rows) == 1 and '*' in rows:\n schema, table = split[1].lstrip().split(' ')[0].split('.')\n return 'IMPLICIT', [schema, table]\n else:\n return 'EXPLICIT', rows", "def _home_network_query(self):\n return sql.SQL(\"\"\"SELECT operator_id\n FROM {mcc_mnc_table}\n WHERE imsi LIKE mcc_mnc_pattern\n LIMIT 1\n \"\"\").format(mcc_mnc_table=sql.Identifier(self._mnc_mcc_new_tblname))" ]
[ "0.7604513", "0.7548185", "0.644608", "0.636173", "0.62809396", "0.6275104", "0.6264084", "0.62564576", "0.6220409", "0.60543776", "0.59246194", "0.5899583", "0.58373946", "0.57994354", "0.57862854", "0.5758391", "0.57509285", "0.5746539", "0.5732655", "0.56832737", "0.56728673", "0.56605136", "0.5642296", "0.5638014", "0.5635155", "0.56274277", "0.5627086", "0.56182265", "0.56171507", "0.55921674", "0.5580464", "0.5556395", "0.5556395", "0.55525225", "0.55108076", "0.54797035", "0.54699814", "0.5449978", "0.5443473", "0.5438601", "0.54230785", "0.5404633", "0.5396651", "0.53938365", "0.5360486", "0.5360486", "0.5353002", "0.53511333", "0.530837", "0.5287619", "0.52687234", "0.52405906", "0.52046376", "0.5184962", "0.5145373", "0.5145305", "0.514427", "0.5144041", "0.51403356", "0.51161593", "0.51134753", "0.5097546", "0.5096719", "0.5093685", "0.50855094", "0.5080906", "0.50791854", "0.507713", "0.5074626", "0.5054805", "0.505036", "0.5042957", "0.5032276", "0.5032155", "0.5027621", "0.50226766", "0.5022208", "0.5021962", "0.50170976", "0.5015985", "0.5010197", "0.5007537", "0.500705", "0.5006184", "0.5005327", "0.5003504", "0.5003504", "0.499767", "0.49963564", "0.49854136", "0.49776363", "0.49658793", "0.49551025", "0.49412635", "0.49359757", "0.49334675", "0.49305484", "0.49125668" ]
0.57847404
17
Sets the snowflake SQL query referenced by this source.
def query(self, query): self._query = query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sql_query(self, new_query):\n self._project.sql_query = new_query", "def set_sa_query(self, query):\n self.sa_query = query", "def query(self):\n return self.snowflake_options.query", "def set_query(self, query):\n query = pylastica.query.Query.create(query)\n data = query.to_dict()\n return self.set_param('query', data['query'])", "def set_session_query_tag(self) -> None:\n if self.service_connection.queryTag:\n self.engine.execute(\n SNOWFLAKE_SESSION_TAG_QUERY.format(\n query_tag=self.service_connection.queryTag\n )\n )", "def set_query(self, query):\n return self.set_param(\"query\", query)", "def sql_query(self):\n return self._project.sql_query", "def run_query_target_snowflake(self, query):\n return db.run_query_snowflake(\n query,\n account=self.get_conn_env_var('TARGET_SNOWFLAKE', 'ACCOUNT'),\n database=self.get_conn_env_var('TARGET_SNOWFLAKE', 'DBNAME'),\n warehouse=self.get_conn_env_var('TARGET_SNOWFLAKE', 'WAREHOUSE'),\n user=self.get_conn_env_var('TARGET_SNOWFLAKE', 'USER'),\n password=self.get_conn_env_var('TARGET_SNOWFLAKE', 'PASSWORD'),\n )", "def query(self, query):\n\n self._query = query", "def query(self, query):\n\n self._query = query", "def query(self, query):\n\n self._query = query", "def setup(self, query: str, docache=False, recache=False) -> None:\n self.q = Query(query, docache, recache)", "def set_psql(tsql):\n global compiled_subs\n\n # Part of the string following the `SET` keyword\n parameters = tsql.split(maxsplit=1)[1]\n\n # See if there's a pattern that matches the parameters\n for regex, replacement in compiled_subs:\n # If so, apply the corresponding substitution\n if regex.match(parameters):\n return regex.sub(replacement, parameters)\n\n # By default, just comment out the line adding the note that it came from T-SQL\n return '-- (from T-SQL) SET ' + parameters.replace('\\n', '\\n-- ') + ';'", "def run_new_sql(self):\n\n pass", "def queries(self, queries):\n\n self._queries = queries", "def setQuery(self, tester=None):\n if self.testing:\n self.user = tester\n self.r3.user = self.user\n self.query, self.artist = self.user.search()\n self.case = tester.case\n else:\n self.user = User()\n self.r3.user = self.user\n self.query, self.artist = self.user.search()\n self.case = case(caseFromQuery(self))\n self.DB.open_connection()", "def setQ(self,Q):\n self.Q = Q", "def determine_query():\n return query if query is not None \\\n else f\"SELECT * FROM '{table}';\"", "def construct_query(self):\n reader = QueryReader(filepath=self.filepath, filename=self.filename, raw_sql=self.raw_sql, params=self.params)\n return reader.sql", "def set_ast(self, ast_sq_rename_dict, query):\n\n ast, sq_rename_dict = ast_sq_rename_dict\n #print \"QUERY PLAN (before optimization):\"\n #print ast.dump()\n new_query = query.copy()\n\n # Final rename\n if sq_rename_dict:\n ast.rename(sq_rename_dict)\n\n removed_fields = set(self.foreign_key_fields.keys())\n additional_fields = reduce(lambda x, y: x | y, self.foreign_key_fields.values(), set())\n\n new_query.fields |= additional_fields\n new_query.fields -= removed_fields\n\n if query.get_action() == ACTION_CREATE:\n ast.inject_insert(query.get_params())\n self.inject_at(new_query)\n ast.optimize(new_query)\n self.ast = ast\n \n # Update the main query to add applicative information such as action and params\n # XXX THIS IS WRONG !! XXX\n # NOTE: I suppose params cannot have '.' inside\n if query.get_action() in [ACTION_UPDATE, ACTION_DELETE]:\n for from_node in self.froms:\n q = from_node.get_query()\n if q.get_from() == query.get_from():\n q.action = query.get_action()\n q.params = query.get_params()\n\n # For example \"UPDATE slice SET resource\", since we have a backwards relation, we need update in the children query\n # This should be done when the query is forwarded through the query plan (routerv2)\n # In the mean time... we can assume unique names and continue looking at FROms... that will break for sure...", "def set_dag_name(self, name: str) -> None:\n super().execute(f\"set hive.query.name={name}\")", "def query_id(self, query_id):\n\n self._query_id = query_id", "def set_queries(self, **kwargs):\n for k, v in kwargs.items():\n self._query_dict[k] = v", "def setup_query(self):\n super(SQLCopyToCompiler, self).setup_query()\n if self.query.copy_to_fields:\n self.select = []\n for field in self.query.copy_to_fields:\n # raises error if field is not available\n expression = self.query.resolve_ref(field)\n selection = (\n expression,\n self.compile(expression),\n field if field in self.query.annotations else None,\n )\n self.select.append(selection)", "def get_sql_connection(self):\n return self.sql", "def query_table_is(self, query_table):\n self.query_table = query_table\n logger.info(\"Query table is: {query_table}\".format(query_table=query_table))", "def sql_log(cls, sql_query, data=None):\n\t\t# if data exists , I replace them into `complete_sql_query`\n\t\tif data:\n\t\t\tfor key, value in data.items():\n\t\t\t\tsearch = ':{}'.format(key)\n\t\t\t\treplace = '`{}`'.format(value)\n\t\t\t\tsql_query = sql_query.replace(search, replace)\n\n\t\tprint('\\t{}'.format(sql_query))", "def sql_query(self, query_text, edges = False):\n q = QueryWrapper(self.graph, QueryString(query_text, 'sql'),\n edges = edges, debug = self._debug)\n return q", "def sql(self):\n return ';\\n'.join([x.sql() for x in self._statements]) + ';'", "def _query_frame_set(self, value):\n self._query_frame = self._prep_frame(value)", "def soql_query(self, query):\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n return self.cumulusci.sf.query_all(query)", "def query(self, value: str):\n self._query = value", "def query_runtime(self, query_runtime):\n\n self._query_runtime = query_runtime", "def _reset_query(self):\n self.query = pysnow.QueryBuilder()\n self.desired_response_fields = list()", "def setQuery(self, query):\n libxml2mod.xmlURISetQuery(self._o, query)", "def setupQuery(self, file):\n file.write(\"QUERY(FALSE);\\n\")\n file.write(\"COUNTEREXAMPLE;\\n\")\n return", "def generate_query(self):\n return", "def register_sql_proceedures(self):", "def query_str(self, new_query_str):\n self.query_buffer.text = new_query_str", "def tweak_q(self, q):\n self._q = q\n self.reset()", "def run_sql(self, sql):\n def mk_run_sql_q(sql):\n return {\n 'type' : 'run_sql',\n 'args': {\n 'sql' : sql\n }\n }\n return self.v1q(mk_run_sql_q(sql))", "def make_query(self):", "def __init__(__self__, *,\n query: Optional[str] = None):\n if query is not None:\n pulumi.set(__self__, \"query\", query)", "def get_source_query(self) -> QuerySet:\n raise NotImplementedError", "def sql(self, q):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'sql')\r\n\r\n return http.Request('POST', url, params), parsers.parse_json", "def do_add_query_field(self):\n raise NotImplementedError", "def execute_query(self, query):\n self.recordset = self.con.execute(query)\n return self", "def run_query_target_postgres(self, query: object) -> object:\n return db.run_query_postgres(\n query,\n host=self.get_conn_env_var('TARGET_POSTGRES', 'HOST'),\n port=self.get_conn_env_var('TARGET_POSTGRES', 'PORT'),\n user=self.get_conn_env_var('TARGET_POSTGRES', 'USER'),\n password=self.get_conn_env_var('TARGET_POSTGRES', 'PASSWORD'),\n database=self.get_conn_env_var('TARGET_POSTGRES', 'DB'),\n )", "def _make_query(self):\r\n raise NotImplementedError()", "def send_sql_query(self, query):\n # TODO: MAKE SQL SELECT and SQL INSERT modules\n self.cursor.execute(query)\n self.cnx.commit()", "def print_query(query: Query) -> str:\n regex = re.compile(r\":(?P<name>\\w+)\")\n params = query.statement.compile().params\n sql = regex.sub(r\"'{\\g<name>}'\", str(query.statement)).format(**params)\n from flexmeasures.data.config import db\n\n print(f\"\\nPrinting SQLAlchemy query to database {db.engine.url.database}:\\n\\n\")\n print(sql)\n return sql", "def _run_query(self):", "def open (self, sql_file):\n fd = open(sql_file, 'r')\n sql = fd.read()\n fd.close()\n self.sql = sql.replace(UTF_8_STR, \"\")", "def preQuery(self):\n self.request_url = self.url\n pass", "def sql(self):\n # First run validation for the query on all widgets\n self._validate()\n\n metrics = find_metrics_for_widgets(self._widgets)\n operations = find_operations_for_widgets(self._widgets)\n share_dimensions = find_share_dimensions(self._dimensions, operations)\n\n return make_slicer_query_with_totals_and_references(\n self.dataset.database,\n self.table,\n self.dataset.joins,\n self._dimensions,\n metrics,\n operations,\n self._filters,\n self._references,\n orders=self.orders,\n share_dimensions=share_dimensions,\n )", "def read_sql(self):\n pass", "def set_query(self, query, site_id=None, unique=False):\n return self.get_queryset().set_query(query, site_id, unique)", "def _send_query(self, query) -> None:\n Cli3App.instance().session.send_query(query)", "def query(self, query):", "def _jdbc_query_origin(self):\n self.origin_system = self.environments['database'].engine.dialect.name\n self._setup_origin_table()\n pipeline_builder = self.sdc_builder.get_pipeline_builder()\n jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer', type='origin')\n jdbc_query_consumer.set_attributes(incremental_mode=False,\n sql_query=f'SELECT * FROM {self.dataset}')\n return jdbc_query_consumer, pipeline_builder", "def __prepare_query(self, query, stopwords=[], stemming_func=None):\n pass", "def data_source_name(self, data_source_name):\n\n self._data_source_name = data_source_name", "def gen_q_stmt(name, query):\n return \"query {} `{}`;\\n\".format(name, query)", "def to_sql(self):\n return self._grammar.compile_select(self)", "def _wrap_initial(initial, query):\n\t\trepl = query.replace('initial = ?', 'initial is ?')\n\t\treturn repl if initial is None else query", "def defer_query(self, query):\n\n if not isinstance(query, (sql.Insert, sql.Update, sql.Delete)):\n raise errors.InvalidQueryType()\n\n self._deferred_queries.append(query)\n\n return self", "def mogrify_sql_statement(self, content, mapset=None):\n if mapset is None:\n mapset = self.current_mapset\n\n mapset = decode(mapset)\n if mapset not in self.tgis_mapsets.keys():\n self.msgr.fatal(_(\"Unable to mogrify sql statement. \" +\n self._create_mapset_error_message(mapset)))\n\n return self.connections[mapset].mogrify_sql_statement(content)", "def __init__(self):\n self._masquarade('odbc')", "def query(self, *sql):\n self.cursor.execute(*sql)\n self.conn.commit()", "def set_source(self, source_name):\n self.source = source_name", "def __init__(self, sql_engine: sqlalchemy.engine.Engine) -> None:\n self._sql_engine = sql_engine", "def _read_query(self):\n try:\n # Open Google Drive and read the sql file\n self.query = GDrive().read_drive_file(self.input_source_id)\n except Exception as e:\n raise e", "def setQueryRaw(self, query_raw):\n libxml2mod.xmlURISetQueryRaw(self._o, query_raw)", "def send_sql_query2(self, query):\n # TODO: MAKE SQL SELECT and SQL INSERT modules\n self.cursor.execute(query)\n # self.cnx.commit()", "def build(self):\n return self._sql.strip()", "def visit_query(self, query):\n return query", "def to_sql(self):\n\n if not self._action:\n self.set_action(\"select\")\n for scope in self._global_scopes.get(self.owner, {}).get(self._action, []):\n if not scope:\n continue\n\n scope(self.owner, self)\n\n grammar = self.get_grammar()\n sql = grammar.compile(self._action).to_sql()\n self.boot()\n return sql", "def required_query_description(self, required_query_description):\n\n self._required_query_description = required_query_description", "def run_query_target_redshift(self, query):\n return db.run_query_redshift(\n query,\n host=self.get_conn_env_var('TARGET_REDSHIFT', 'HOST'),\n port=self.get_conn_env_var('TARGET_REDSHIFT', 'PORT'),\n user=self.get_conn_env_var('TARGET_REDSHIFT', 'USER'),\n password=self.get_conn_env_var('TARGET_REDSHIFT', 'PASSWORD'),\n database=self.get_conn_env_var('TARGET_REDSHIFT', 'DBNAME'),\n )", "def run_query(self):\n return _run_query(self.query)", "def run_eicu_query(query, conn):\n query_schema = \"set search_path to eicu_crd;\"\n query = query_schema + query\n return pd.read_sql_query(query, conn)", "def database(self):\n return self.snowflake_options.database", "def query(self, value):\n \n self._query = str(value) if value else None", "def get_table_query_string(self) -> str:\n if self.database and self.table:\n return f'\"{self.database}\".\"{self.schema}\".\"{self.table}\"'\n elif self.table:\n return f'\"{self.table}\"'\n else:\n return f\"({self.query})\"", "def set_query_output(self, path):\n\n file = f'sql_query_R{str(self.time_span).replace(\".\", \"_\")} ({str(self.date_time).replace(\":\",\"_\")}).csv'\n self.query_output_file = path_inc(path, file)", "def __init__(self, sql_config = {}, verbose = True):\n\n self.sql_config = sql_config\n self.verbose = verbose\n self._setup()", "def limit_path_query(self, limit_path_query):\n\n self._limit_path_query = limit_path_query", "def _multi_query_execution(self):\n multi_query_staging = self.query.split(';')\n for query in multi_query_staging:\n self.query = query\n self._execute_command()", "def limit_package_query(self, limit_package_query):\n\n self._limit_package_query = limit_package_query", "def __init__(self, query_string, parent = None):\n self.statement = query_string.strip()\n self.parent = parent\n # self.child is explicitly set by operators\n self.child = None", "def prepare_sql(self, sql):\n # Oracle doesn't like trailing semicolons. So remove them.\n # To do this properly we need to strip comments.\n # See issue5.\n sql = sqlparse.format(sql, strip_comments=True)\n sql = sql.strip()\n if sql.endswith(';'):\n sql = sql[:-1]\n return sql", "def filter(self, **kwargs):\n kwargs['query'] += ' FROM {0}'\n return kwargs", "def process_query(self, query):\n query._cache_region = self", "def query_save_data_frame(self, query):\n self.recordset_df = pd.read_sql_query(query, self.con)\n return self", "def graph_queries_table_name(self) -> str:\n return pulumi.get(self, \"graph_queries_table_name\")", "def add_instance_id_to_query(self, query: str) -> str:\n FIND_FROM_STATEMENT_REGEX_PATTERN = r'(?i)FROM `'\n query = re.sub(FIND_FROM_STATEMENT_REGEX_PATTERN, f'FROM `{self.instance_id}.', query)\n return query", "def setSourceName(self, instance, value):\n mapping = IAnnotations(instance).setdefault(\n 'collective.table',\n PersistentMapping()\n )\n mapping['source_name'] = value", "def __init__(self, db):\n self.table_name = \"query_latent_space\"\n self.db = db", "def run_cypher_query(self, query):\n with self._driver.session() as session:\n session.write_transaction(self.add_input_graph, query)", "def _query_start_set(self, value):\n self._query_start = self._prep_coord(value, \"query_end\", le)" ]
[ "0.72115916", "0.6802297", "0.6187164", "0.6164766", "0.60703343", "0.6025312", "0.5980297", "0.5747391", "0.57371366", "0.57371366", "0.57371366", "0.5711442", "0.55724436", "0.55319154", "0.5473519", "0.54341775", "0.54232275", "0.5422666", "0.5417376", "0.5378323", "0.5360879", "0.5340419", "0.533736", "0.5333954", "0.5288397", "0.5276507", "0.52709234", "0.52522105", "0.5235079", "0.52203", "0.52088016", "0.51862615", "0.5182718", "0.51766384", "0.51741755", "0.51428294", "0.51427007", "0.51421416", "0.511031", "0.5103824", "0.50603795", "0.5052019", "0.504148", "0.5040483", "0.5039997", "0.50393105", "0.5026361", "0.49639228", "0.49515682", "0.49495417", "0.49145865", "0.49053466", "0.48972082", "0.4893027", "0.48830694", "0.48814252", "0.48803115", "0.48752874", "0.48493943", "0.48371303", "0.4835023", "0.48341116", "0.4829614", "0.4824456", "0.4821401", "0.48208296", "0.48089352", "0.47751567", "0.47702345", "0.47665417", "0.47613055", "0.47596762", "0.47582808", "0.4740433", "0.47400588", "0.47322807", "0.47123677", "0.47071216", "0.46962744", "0.46871856", "0.46758664", "0.46755484", "0.46741286", "0.46718168", "0.46708995", "0.46702123", "0.46638656", "0.4658032", "0.46506312", "0.46474797", "0.46347705", "0.46252316", "0.45974734", "0.45967633", "0.4594736", "0.4589127", "0.45802528", "0.45793036", "0.45748815", "0.45707121" ]
0.59033763
7
Returns the database name of this snowflake table.
def database(self): return self._database
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def getDatabaseName(self):\n raise NotImplementedError", "def db_name(self):\n return self._db_name", "def database(self):\n return self.snowflake_options.database", "def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")", "def get_name(self) -> str:\n return self.dbname", "def get_db_name(self):\n\t\treturn conf.db_name", "def getDatabaseName( self ):\n return self.mDbname", "def get_table_name(self):\n return self._table", "def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")", "def dbname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dbname\")", "def table_name() -> str:\n pass", "def get_database_name(self, data: dict) -> str: # pylint: disable=arguments-differ\n if not data[\"database_name\"] and self.service_connection.database:\n return self.service_connection.database\n return data[\"database_name\"]", "def table_name(self) -> str:\n return self.model._meta.db_table", "def table(self):\n return self._table_name", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def table(self):\n return self.snowflake_options.table", "def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"", "def table_name(self):\n return self._new_table.name", "def database(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def database_name(self):\n try:\n return self._database_name\n except:\n pass\n\n if 'X-UnitTest' in self.request.headers:\n if self.request.headers['X-UnitTest'] == 'True':\n self._database_name = TEST_DATABASE\n return TEST_DATABASE\n default_database = self.application.databases['default']['NAME']\n self._database_name = default_database\n return default_database", "def database_id(self) -> str:\n return pulumi.get(self, \"database_id\")", "def db(self) -> str:\n return self._db", "def database(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database\")", "def database_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_id\")", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def get_tablename(self):\n return self.ds_table", "def graph_queries_table_name(self) -> str:\n return pulumi.get(self, \"graph_queries_table_name\")", "def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname", "def backend_name(self) -> str:\n return self._db_data.backend", "def keyspace(self, name):\n if self.options.db_prefix:\n return \"{}_{}\".format(self.options.db_prefix, name)\n return name", "def table_name(self) -> str:\n return \"OLTP\"", "def database_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_id\")", "def get_database_filename() -> str:\n config_dir = get_config_dir()\n return os.path.join(config_dir, DATABASE_FILENAME)", "def dbName(self, code) -> str:\n return f'{code}{self.name}'", "def get_database(self):\n return self.database", "def table_id(self) -> str:\n return pulumi.get(self, \"table_id\")", "def db_proxy_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"db_proxy_name\")", "def database_version(self) -> str:\n return pulumi.get(self, \"database_version\")", "def database_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_id\")", "def table(cls):\n return cls.__name__", "def get_db_server_name(self):\n if self.db_config_file.key_exists(\"server_name\"):\n return self.db_config_file_value(\"server_name\").strip('\"')\n return self.get_system_id()", "def get_database_url(self):\n return self.config['dbase_path']", "def table_name(class_):\n try:\n return class_.__tablename__\n except AttributeError:\n return class_.__table__.name", "def get_database(self, instance, name):\n return instance.get_database(name)", "def mysql_database_name():\n return 'test'", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def database_key(database_name='mnddb01'):\n return ndb.Key('db', database_name)", "def schema(self):\n return self.snowflake_options.schema", "def _extract_db_name_from_db_path(self):\n return os.path.basename(self.db_path).rsplit('.', 1)[0]", "def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"", "def get_database(self):\n if self._database is None:\n conn = self.get_connection()\n db = conn[self.database]\n self._database = db\n\n return self._database", "def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = self.service_connection.__dict__.get(\n \"database\", custom_database_name or \"default\"\n )\n # By default, set the inspector on the created engine\n self.inspector = inspect(self.engine)\n yield database_name", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def delta_table_name(self):\n if len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 10:\n return constant.DELTA_TABLE_PREFIX + self._old_table.name\n elif (\n len(self._old_table.name) >= constant.MAX_TABLE_LENGTH - 10\n and len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 2\n ):\n return constant.SHORT_DELTA_TABLE_PREFIX + self._old_table.name\n else:\n return constant.DELTA_TABLE_PREFIX + constant.GENERIC_TABLE_NAME", "def tablename(entity) -> str:\n return entity.__tablename__", "def db_for_write(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def table(self):\n if not self.exists:\n return None\n return self._get_table()", "def table(self, table_name):\n return self._get_storage().table(table_name)", "def getTable(self, name: str):\n query = f\"SELECT * FROM '{name}';\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result", "def filename(self):\n return self._dbfile", "def database():\n return conf().database", "def db_for_write(self, model, **hints):\n\n return self.db_name", "def connection_name(self) -> str:\n return pulumi.get(self, \"connection_name\")", "def db_for_read(self, model, **hints):\n\n return self.db_name", "def schema(self):\n return self.table_info.schema", "def db_for_write(self, model, **hints):\n if model._meta.app_label == self.app_label:\n return self.db_name\n return None", "def get_context_table_name(self, table):\r\n return self.context_table_name or \"table\"", "def get_tgis_database_string():\n global tgis_database_string\n return tgis_database_string", "def create_db_statement(self):\n return Engine.create_db_statement(self).replace(\"DATABASE\", \"SCHEMA\")", "def database(self):\n\n return self._database", "def database(self):\n\n return self._database", "def db_for_read(self, model, **hints):\n if hasattr(model, 'db_name'):\n return model.db_name\n return 'default'", "def get_table_name(model_id: Text) -> Text:\n return model_id if not cfg.app.db.schema else cfg.app.db.schema + \".\" + model_id", "def name(self):\n if self.table:\n return \"{}.{}\".format(self.table, self.field_name)\n return self.field_name", "def database(self) -> str:\n\t\treturn os.getenv('APP_DATABASE', 'memory').lower()", "def dc_name(self):\n return self.container_name", "def current_db(self):\n return self._current_db", "def _make_database_name(model_version):\n short_version = _version_to_shorthand(model_version)\n return 'pedsnet_dcc_v{}'.format(short_version)", "def name(self):\n return self._config.backend_name", "def getTable(self):\n return self.db.table(self.entity)", "def database_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_version\")", "def mysql_database():\n return DATABASE", "def data_center_name(self) -> str:\n return pulumi.get(self, \"data_center_name\")", "def table(self):\n return self.reference.table", "def db_for_write(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_write({}): {}'.format(state_db, name))\n return name", "def get_database_path():\n\treturn _paths[_DATABASE_PATH_KEY]", "def repo_name(conn):\n name = conn.repository.database_name\n conn.close()\n return name", "def getConnectionName(self):\n return self.system", "def getTable(self):\n return self.table", "def db_for_read(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_read({}): {}'.format(state_db, name))\n return name" ]
[ "0.8153906", "0.79219604", "0.7853548", "0.77648026", "0.7695105", "0.76031363", "0.7507584", "0.74824643", "0.74470735", "0.7436588", "0.7392899", "0.73886544", "0.7282045", "0.7271273", "0.7229642", "0.72007596", "0.7197087", "0.7124021", "0.7117816", "0.7098378", "0.70015603", "0.6859517", "0.6817982", "0.6817982", "0.6817982", "0.67727274", "0.6748707", "0.67092466", "0.6676321", "0.6580918", "0.6504913", "0.6504913", "0.6419244", "0.6387462", "0.6278995", "0.6272918", "0.6212638", "0.6212301", "0.6196581", "0.6174631", "0.61302096", "0.61251605", "0.6117737", "0.6068227", "0.60537475", "0.60465145", "0.60420895", "0.6018027", "0.6003702", "0.5982767", "0.5971688", "0.59575915", "0.59437215", "0.5941065", "0.5926845", "0.59209865", "0.59186715", "0.5917395", "0.5894661", "0.58944863", "0.5871217", "0.5864599", "0.58457243", "0.5844622", "0.58399814", "0.5826103", "0.5802626", "0.5796504", "0.57800907", "0.576035", "0.5759704", "0.5758198", "0.5752608", "0.57472396", "0.5739447", "0.5739256", "0.5736009", "0.5736009", "0.57203335", "0.5715398", "0.56931597", "0.56866956", "0.5684879", "0.56848073", "0.56792617", "0.56783366", "0.56704056", "0.5666717", "0.566284", "0.5661535", "0.5659784", "0.56554663", "0.5654671", "0.5651654", "0.5649762", "0.56414104", "0.5636829" ]
0.5763545
71
Sets the database ref of this snowflake table.
def database(self, database): self._database = database
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_ref(self, table_ref):\n self._table_ref = table_ref", "def set_db(self, db):\n self._db = db", "def db_name(self, db_name):\n\n self._db_name = db_name", "def setDB(dbname):\n global DBNAME\n DBNAME = dbname", "def change(cls, db):\n cls.configs['db'] = db\n\n if cls.conn and cls.conn.open:\n cls.conn.select_db(db)", "def reference(self, reference):\n\n self._reference = reference", "def reference(self, reference):\n\n self._reference = reference", "def set_ref(self, new_ref):\n self.__ref = new_ref", "def table_ref(self):\n return self._table_ref", "def change_db(cls, dbname):\n setattr(cls, 'db', staticmethod(lambda: Db(dbname)))", "def set_db(db):\n global db_run # Imports the DB from the simulator\n db_run=db", "def database(self):\n return self.snowflake_options.database", "def setTable(self, tabledef):\n if isinstance(tabledef, str):\n self._table = Table.Get ( tabledef )\n elif isinstance(tabledef, Table):\n self._table = tabledef\n else:\n raise ValueError (\"table - must be table name or Table instance.\" )", "def change_db(self):\n self.db = self.database.get()\n return self.db", "def setDb(self, db_file):\n self.db_file = db_file\n self.db = sqlite3.connect(self.db_file, isolation_level=None)\n self._initDb()", "def table(self, table):\n self._table = table", "def table(self, table):\n self._table = table", "def setPortRef(self, *args):\n return _libsbml.SBaseRef_setPortRef(self, *args)", "def set_tablename(self, name):\n self.ds_table = name", "def use_reference(self, ref):\n self._ref = ref\n self._ref_name = (\n get_pretty_var_names(\n target_vars=[ref],\n local_vars=inspect.currentframe().f_back.f_locals.items(),\n fallback_name_prefix=\"Ref\",\n )[0]\n if self._ref_name is None\n else self._ref_name\n )\n return self", "def reference_id(self, reference_id):\n\n self._reference_id = reference_id", "def switch(self, database, collection=None):\n self.connection.switchDatabase(database, collection if collection else self.coll_name)", "def table_reference(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference':\n return pulumi.get(self, \"table_reference\")", "def table(self, table):\n self._table = table\n return self", "def set_reference(self, refobj, reference):\n refnodeattr = \"%s.referencenode\" % refobj\n if reference:\n cmds.connectAttr(\"%s.message\" % reference, refnodeattr, force=True)\n ns = cmds.referenceQuery(reference, namespace=True)\n cmds.setAttr(\"%s.namespace\" % refobj, ns, type=\"string\")\n else:\n conns = cmds.listConnections(refnodeattr, plugs=True)\n if not conns:\n return\n for c in conns:\n cmds.disconnectAttr(c, refnodeattr)", "def initRef(self, obj_pkgs):\n if self.comment and self.comment.startswith('@'):\n i = self.comment.index('.')\n name = self.comment[1:i]\n table = obj_pkgs.get(name, None)\n if table:\n self.ref = MySqlRef(self, obj_pkgs)\n self.query = True\n else:\n self.ref = None\n self.query = True\n else:\n self.ref = None\n self.query = False", "def set_reference_id(self, reference_id):\n self.reference_id = reference_id", "def set_object_database (self, file_name):\n try:\n self.object_database=file_name\n self.filepath_object_database = os.path.join(self.filepath, self.object_database)\n print(\"set object_database filename to\", file_name)\n except:\n print(\"setting object database failed\")\n self.object_database=\"Object_database.db\"\n self.filepath_object_database = os.path.join(self.filepath, self.object_database)\n print(\"set object database name to default:\", self.object_database)\n return", "def setPortRef(self, *args):\n return _libsbml.Port_setPortRef(self, *args)", "def set_feature_table(self, feature_table):\n\n if self.feature_table is not None:\n logger.warning(\n 'Feature table is already set, changing it now will not recompile '\n 'existing rules')\n self.feature_table = feature_table", "def set_reference(self, reference):\n\t\tif ((reference == 0) or (reference == 1)):\n\t\t\tself._reference = reference\n\t\telse:\n\t\t\tsys.stderr.write(\"\\nERROR : %s reference must be 0 (internal) or 1 (external) so it can't be %s !\\n\" % (self._target_id, reference))\n\t\t\tsys.exit(1)", "def set_output_database (self, file_name):\n try:\n self.object_database=file_name\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n print(\"set output_database filename to\", file_name)\n except:\n print(\"setting object database failed\")\n self.output_database=\"Output_database.db\"\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n print(\"set output database name to default:\", self.object_database)\n return", "def set_db(self, db_file=predicted_actions_db, cache_size=30):\n\n db = TinyDB(db_file)\n self.table = db.table(\"positions\", cache_size=cache_size)", "async def set_db_name_field(self, db_name_field):\n self.db_name_field = db_name_field", "def setRevertable(self, b):\n\n self.revertable = b", "def setSBaseRef(self, *args):\n return _libsbml.SBaseRef_setSBaseRef(self, *args)", "def refu(self, refu):\n\n self._refu = refu", "def set_db(self, dbid: str, account: Optional[str] = None) -> str:\n self._check_connection(check_db=False)\n\n if account is None:\n account = self._account\n\n return self.connect(\n account=account,\n db=dbid,\n remote_auth=self._remote_auth,\n key=self._key,\n user=self._user,\n branch=self._branch,\n ref=self._ref,\n repo=self._repo,\n )", "def set_db(self, db_file=predicted_actions_db, cache_size=None):\n\n db = TinyDB(db_file)\n self.table = db.table(\"scenarios\", cache_size=cache_size)", "def database(self, database):\n if database is not None and len(database) > 256:\n raise ValueError(\"Invalid value for `database`, length must be less than or equal to `256`\")\n if database is not None and len(database) < 0:\n raise ValueError(\"Invalid value for `database`, length must be greater than or equal to `0`\")\n if database is not None and not re.search('[\\\\w\\\\_]{0,}', database):\n raise ValueError(\"Invalid value for `database`, must be a follow pattern or equal to `/[\\\\w\\\\_]{0,}/`\")\n\n self._database = database", "def set_db_connexion(self, db_name='local_backup'):\r\n\r\n dbapi = pd.read_json(os.path.join(self.config_path, \"db_access_credentials.json\"), orient='index')\r\n\r\n # Let's create a connexion from the DBAPI variables\r\n connexion = 'mysql+mysqlconnector://{0}:{1}@{2}:{3}/{4}?auth_plugin={5}'\r\n connexion = connexion.format(dbapi['user'][db_name],\r\n dbapi['password'][db_name],\r\n dbapi['server_url'][db_name],\r\n dbapi['port'][db_name],\r\n dbapi['schema'][db_name],\r\n dbapi['authentication_plugin'][db_name])\r\n\r\n self.mariadb_engine = create_engine(connexion)", "def schema_ref(schema, table):\n return schema + '.' + table", "def hmdb(self, hmdb):\n\n self._hmdb = hmdb", "def setup_database(self):\n self.db.setup_database()", "def table(self):\n return self.snowflake_options.table", "def reference(self, new_reference):\n\n # Check a type of 'new_reference' parametr\n if not isinstance(new_reference, basestring):\n raise TypeError('string type expected')\n self._reference = new_reference", "def close_database(self) -> None:\n self.api = None\n self.base = None\n self.table = None", "def setReference(self, *args):\n return _libsbml.Association_setReference(self, *args)", "def setReference(self,ref):\n rospy.wait_for_service('/myo/myo_muscle0_controller/set_ref')\n try:\n sDsp = rospy.ServiceProxy('/myo/myo_muscle0_controller/set_ref',myo_msgs.srv.SetReference)\n sDsp(ref)\n except(rospy.ServiceException, e):\n print(\" \")", "def setreferencevalue(self, *args, **kwargs):\n return _coordsys.coordsys_setreferencevalue(self, *args, **kwargs)", "def databases(self, databases):\n\n self._databases = databases", "def set_test_database(self):\n db_manager.db = SqliteDatabase(settings.UNITTEST_DATABASE_NAME)\n db_manager.Employee._meta.database = db_manager.db\n db_manager.LogEntry._meta.database = db_manager.db", "def set_designator(self, ref):\n self.ref = ref", "def set(self, name):\n self.rpc.call(MsfRpcMethod.DbSetWorkspace, [name])", "def __set__( self, client, value ):\n\t\tif isinstance( value, dbrow.DBRow):\n\t\t\t# we set the refered-to value, not the object itself\n\t\t\tconstraint = self.schema.foreign()\n\t\t\tfields = constraint.getForeignFields()\n\t\t\tassert len(fields) == 1, \"\"\"Attempt to set %r to %r, this is a multi-field constraint somehow?\"\"\"%(\n\t\t\t\tself.name, value,\n\t\t\t)\n\t\treturn super( ReferenceProperty, self ).__set__( client, value )", "def add_reference(self,ref):\n \n master = None #for the table that is referenced\n slave = None #for the table that uses the reference\n new_root = None\n \n for child in ref:\n local_tag = child.tag.split(\"}\")[1] #gets the local part of the tag\n if local_tag == 'connections':\n new_root = child\n break\n \n #new_root == None means the connection exists but is not properly\n #connected to any table in the diagram\n if new_root == None: ###\n self.err.print_error(\"dia:ref_not_closed\") ###\n e_code = self.err.exit_code[\"diagram\"] ###\n ###\n exit(e_code) ###\n \n \n for child in new_root: \n #master table\n if child.attrib['handle'] == \"0\":\n master_id = child.attrib['to'] #gets the master id\n master = self.table_dict[master_id] \n \n #slave table\n elif child.attrib['handle'] == \"1\":\n slave_id = child.attrib['to'] #gets the slave id\n slave = self.table_dict[slave_id] \n \n \n #error check if either table not found\n if master == None or slave == None: ###\n self.err.print_error(\"dia:ref_not_closed\") ###\n e_code = self.err.exit_code[\"diagram\"] ###\n ###\n exit(e_code) ###\n \n \n #updating both tables\n master.add_slave(slave)\n slave.add_foreign_key(master)\n \n return", "def set_inspector(self, database_name: str) -> None:\n logger.info(f\"Ingesting from database: {database_name}\")\n\n new_service_connection = deepcopy(self.service_connection)\n new_service_connection.database = database_name\n self.engine = get_connection(new_service_connection)\n self.inspector = inspect(self.engine)\n self._connection = None # Lazy init as well", "def set_render_database_name(self, file_name):\n try:\n self.render_database=file_name\n self.filepath_render_database = os.path.join(self.filepath, self.render_database)\n print(\"set render_database filename to\", file_name)\n except:\n print(\"setting render database failed\")\n self.render_database=\"Render_database.db\"\n self.filepath_object_database = os.path.join(self.filepath, self.render_database)\n print(\"set render database name to default:\", self.render_database)\n return", "def class_ref(self, class_ref):\n\n self._class_ref = class_ref", "def ref_name(self, name):\n self._ref_name = name if self._ref_name is None else self._ref_name\n return self", "def set_master_table(filepath):\n my_globals['master_table_path'] = filepath\n my_globals['master_table_data'] = None", "def set_reference(self, traj):\n self.ref_traj_aa = traj[0]\n self.ref_traj = self.mapping.map_traj(traj[0])", "def acceptDB(self, db):\n self._db = db", "def set_db_file():\n\n return os.path.join(db_path, db_file)", "def make_reference2(self):\n self.ref = Snapshot()", "def reference_number(self, reference_number):\n\n self._reference_number = reference_number", "def get_database_url(self):\n return self.config['dbase_path']", "def reference(self, name):\n pass", "def gtfsdb_main(ctx, database):\n ctx.obj = dict()\n if not database and os.path.exists(DEFAULT_CONFIG_FILE):\n conf = json.load(open(DEFAULT_CONFIG_FILE, 'r'))\n database = conf['database']\n ctx.obj.update(dict(conf=conf))\n else:\n click.echo(\"No database selected!!\")\n sys.exit(1)\n ctx.obj.update(dict(database=Database(url=database), db_url=database))", "def setRef(self,reference):\n (iMod,iObj) = reference\n self.rnam.setData(struct.pack('i',iObj)[:3] + struct.pack('B',iMod))\n self.setChanged()", "def table_reference(self) -> 'outputs.TableReferenceResponse':\n return pulumi.get(self, \"table_reference\")", "def _set_database_version(db, version):\n if not isinstance(version, int):\n raise TypeError(\"Version must be integer, not %s : %s\" % (\n version, type(version)))\n create_metadata = \\\n \"CREATE TABLE %s (version INT)\" % METADATA_COLUMN_NAME\n execute_sql(db, create_metadata)\n insert_version = \\\n \"INSERT INTO %s VALUES (%s)\" % (METADATA_COLUMN_NAME, version)\n execute_sql(db, insert_version)", "def set_parent_table(self, table):\n self.__parent_table = table", "def create_table_db(self):\n table_name = self.name_table.get()\n if len(table_name) > 0:\n self.table_db = table_name\n self.sql_database.db_name = self.db\n if self.sql_database.create_table(self.table_db):\n msg.showinfo(\n message=\"\".join([str(self.table_db), \" created\"]))\n self.name_table.delete(0, tk.END)\n self.show_table_combobox()\n else:\n msg.showinfo(message=\"Failed\")\n else:\n msg.showinfo(message=\"Write table name!\")", "def set_external_ref(self):\n ref_set_success = synth.set_ref_select(e_not_i=1)\n if not ref_set_success:\n raise RuntimeError('External reference request failed:\\nfunction %s, line no %s\\n' %(__name__, inspect.currentframe().f_lineno))\n return ref_set_success", "def references(self, references):\n\n self._references = references", "def XPLMSetDatab_f(inRefcon, inValue, inOffset, inLength):", "def __init__(self, database_name):\n self.conn = sqlite3.connect(\"output/%s.db\" % database_name)", "def table(self):\n return self.reference.table", "def db_name(self):\n return self._db_name", "def visit_table(self, sytable):\n self.current.update(sytable)", "def visit_table(self, sytable):\n self.current.update(sytable)", "def ref(self, refid: Optional[str] = None) -> str:\n if refid:\n self._ref = refid\n return self._ref", "def setdb():\n\n if not database_exists(DB_URL):\n print('Creating database.')\n create_database(DB_URL)\n\n print('Creating tables.')\n db.create_all()\n print('Shiny!')", "def sit(self, table):\n self.table = table", "def set_service(service_name, reference):\n Container.services[service_name] = reference", "def update_reference_value(self, reference_value: float):\n self.__reference_value = reference_value", "def setMetaIdRef(self, *args):\n return _libsbml.SBaseRef_setMetaIdRef(self, *args)", "def db(self) -> str:\n return self._db", "def setUnitRef(self, *args):\n return _libsbml.SBaseRef_setUnitRef(self, *args)", "def set_frame_of_reference(self, p, q):\n self.sw.set_frame_of_reference(p, q, 0.0, 180.0, 0.0)", "def setCompartmentReference(self, *args):\n return _libsbml.MultiSimpleSpeciesReferencePlugin_setCompartmentReference(self, *args)", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def set_db(self, dbtype=SQLITE, drop_table=True, **kwargs):\n\n from .dbmodels import proxy\n\n if dbtype == SQLITE:\n dbfile = kwargs.get(\"dbfile\", \":memory:\")\n\n self._db = SqliteDatabase(\n dbfile,\n pragmas={\n \"journal_mode\": \"wal\",\n \"journal_size_limit\": 1024,\n \"cache_size\": -1024 * 64, # 64MB\n \"foreign_keys\": 1,\n \"ignore_check_constraints\": 0,\n \"synchronous\": 0,\n },\n )\n\n elif dbtype == MYSQL:\n app = kwargs[\"app\"]\n del kwargs[\"app\"]\n self._db = MySQLDatabase(app, **kwargs)\n\n elif dbtype == POSTGRESQL:\n app = kwargs[\"app\"]\n del kwargs[\"app\"]\n self._db = PostgresqlDatabase(app, **kwargs)\n\n proxy.initialize(self._db)\n self._db_manager.set_db(self._db)\n self._db_manager.set_dropped(drop_table)", "def setCompartmentReference(self, *args):\n return _libsbml.SpeciesTypeInstance_setCompartmentReference(self, *args)", "def set_db_session():\n g.s = database.db_session()", "def reference(self, reference):\n if reference is not None and len(reference) > 100:\n raise ValueError(\"Invalid value for `reference`, length must be less than or equal to `100`\")\n if reference is not None and len(reference) < 3:\n raise ValueError(\"Invalid value for `reference`, length must be greater than or equal to `3`\")\n\n self._reference = reference", "def set_refclk(self, refclk):\n raise NotImplementedError", "def set(self, table):\n if table is None:\n return\n for name in table.dtype.names:\n self._set_column(name, table[name])", "def setup_db_conn():\n # TODO update so DB does not have to be hard coded\n # Currently DB is hardcoded" ]
[ "0.69317377", "0.6532754", "0.6392685", "0.5966977", "0.5806949", "0.5773162", "0.5773162", "0.57592016", "0.56473815", "0.56473684", "0.5647304", "0.55943197", "0.5568484", "0.54553974", "0.54033595", "0.53913724", "0.53913724", "0.5374103", "0.5366539", "0.5345408", "0.52705973", "0.52526855", "0.5252092", "0.52380913", "0.5234088", "0.52148235", "0.520435", "0.519425", "0.51662296", "0.51108766", "0.5109027", "0.51055586", "0.5065137", "0.5059115", "0.50503826", "0.5035013", "0.5029149", "0.50201017", "0.5006159", "0.49889523", "0.49853212", "0.49847496", "0.49818638", "0.49749476", "0.49704647", "0.4960461", "0.4953353", "0.49502736", "0.49392587", "0.49222144", "0.49103862", "0.4888385", "0.488695", "0.4883217", "0.48810163", "0.48768112", "0.48656237", "0.4862814", "0.48550302", "0.48529866", "0.47936127", "0.4792158", "0.478675", "0.47658274", "0.4758913", "0.47373495", "0.4718089", "0.47133192", "0.47091064", "0.4708968", "0.47086316", "0.47075275", "0.47015113", "0.46940592", "0.46924418", "0.46901584", "0.4685964", "0.4684769", "0.46806768", "0.467841", "0.46473244", "0.46473244", "0.4644114", "0.46304077", "0.46162656", "0.46161276", "0.46121046", "0.4612037", "0.46049064", "0.4584491", "0.458361", "0.45828044", "0.4580593", "0.45711023", "0.45701578", "0.45663312", "0.45658717", "0.4564532", "0.45605648", "0.45602885" ]
0.63701606
3
Returns the schema name of this snowflake table.
def schema(self): return self._schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schema(self):\n return self.table_info.schema", "def get_table_name(self):\n return self._table", "def schema(self):\n return self.snowflake_options.schema", "def schema_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schema_name\")", "def table(self):\n return self.snowflake_options.table", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def table(self):\n return self._table_name", "def table_name() -> str:\n pass", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> str:\n return self.model._meta.db_table", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "def get_tablename(self):\n return self.ds_table", "def table_name(self):\n return self._new_table.name", "def standardize_table_name(self, schema_name: str, table: str) -> str:\n return table", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def graph_queries_table_name(self) -> str:\n return pulumi.get(self, \"graph_queries_table_name\")", "def get_tables_in_schema(self, conn, schema_name):\n return conn.get_tables(schema_name)['table_name']", "def tablename(entity) -> str:\n return entity.__tablename__", "def get_schema(self, repo, table):\n return self.user_con.get_schema(repo=repo, table=table)", "def schema(self) -> str:\n return parse_schema(self._spec[\"schema\"])", "def table_name(class_):\n try:\n return class_.__tablename__\n except AttributeError:\n return class_.__table__.name", "def get_table_schema(dataset_id, table_id):\n logging.info('getting table schema')\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n bg_tableref = bigquery.table.TableReference(dataset_ref, table_id)\n bg_table = bigquery_client.get_table(bg_tableref)\n return bg_table.schema", "def get_schema(): # noqa: WPS440\n return config.DEFAULT_SCHEMA", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def schema_ref(schema, table):\n return schema + '.' + table", "def get_schema_name(schema_path):\n print(schema_path)\n path = os.path.normpath(schema_path)\n return os.path.sep.join(path.split(os.path.sep)[-3:])", "def get_schemas(self, conn):\n return conn.get_schemas()['table_schema']", "def get_table_name(model_id: Text) -> Text:\n return model_id if not cfg.app.db.schema else cfg.app.db.schema + \".\" + model_id", "def get_qualified_name(self):\r\n return self.__schema + \".\" + self.__name", "def table_name(self) -> str:\n return \"OLTP\"", "def get_schema_url(self):\n return self.NAME_TYPE_SCHEMAS.get(self.name_type, None)", "def getTableSchema(self,tableName):\n\tif not self.schemaDict.has_key(tableName):\n\t if self.dbType==\"sqlite\":\n\t query = \"SELECT * FROM sqlite_master WHERE name='%s'\"%tableName\n\t tup = self.fetchOne(query)\n\t schema= tup[4]\n\t else: # MySQL \n\t query = \"DESCRIBE %s\"%tableName\n\t tup = self.fetchAll(query)\n\t schema= \"CREATE TABLE %s (\"%tableName\n\t for item in tup:\n\t name = item[0]\n\t\t type = item[1]\n\t\t priKey = item[3]\n\t\t autoInc = item[5] \n\t schema+=name+' '+type+' '+priKey+' '+autoInc\n\t\t if item!=tup[-1]:\n\t\t schema+=','\n\t schema+=\" )\"\n\t return schema\n\telse:\n\t return self.schemaDict[tableName]", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def table(cls):\n return cls.__name__", "def get_schema(sid, did, scid):\n\n driver = get_driver(PG_DEFAULT_DRIVER)\n manager = driver.connection_manager(sid)\n conn = manager.connection(did=did)\n\n ver = manager.version\n server_type = manager.server_type\n\n # Fetch schema name\n status, schema_name = conn.execute_scalar(\n render_template(\n \"/\".join(['schemas',\n '{0}/#{1}#'.format(server_type, ver),\n 'sql/get_name.sql']),\n conn=conn, scid=scid\n )\n )\n\n return status, schema_name", "def get_schema(self):\r\n return self.__schema", "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def keyspace(self, name):\n if self.options.db_prefix:\n return \"{}_{}\".format(self.options.db_prefix, name)\n return name", "def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")", "def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)", "def output_schema(self) -> Optional[str]:\n return pulumi.get(self, \"output_schema\")", "def table_id(self) -> str:\n return pulumi.get(self, \"table_id\")", "def name(self):\n if self.table:\n return \"{}.{}\".format(self.table, self.field_name)\n return self.field_name", "def delta_table_name(self):\n if len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 10:\n return constant.DELTA_TABLE_PREFIX + self._old_table.name\n elif (\n len(self._old_table.name) >= constant.MAX_TABLE_LENGTH - 10\n and len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 2\n ):\n return constant.SHORT_DELTA_TABLE_PREFIX + self._old_table.name\n else:\n return constant.DELTA_TABLE_PREFIX + constant.GENERIC_TABLE_NAME", "def table(self):\n if not self.exists:\n return None\n return self._get_table()", "def tablename(klass):\n if not hasattr(klass, 'TABLENAME'):\n inf = Inflector()\n klass.TABLENAME = inf.tableize(klass.__name__)\n return klass.TABLENAME", "def encodeTableName(self, schema, table):\r\n return '\"{}\".\"{}\"'.format(schema, table)", "def get_schema(self, get_stats=False):\n query = \"schema {}\"\n\n results = self.run_dgraph_query_raw(query)\n\n schema = {}\n\n for row in results[\"schema\"]:\n table_name = row[\"predicate\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n return list(schema.values())", "def get_schema(cls):\n return cls.schema()", "def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }", "def getTableByName(self, tablename):\n pass", "def get_context_table_name(self, table):\r\n return self.context_table_name or \"table\"", "def table_names(self, cursor=None):\r\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]", "def schema(self, name):\n return model.Schema(self, name)", "def get_default_schema(self):\n schema = self._connection.settings.get(\"schema\")\n if schema:\n res = (\n self.sql(_SELECT_SCHEMA_NAME_QUERY.format(escape(schema)))\n .execute()\n .fetch_all()\n )\n try:\n if res[0][0] == schema:\n return Schema(self, schema)\n except IndexError:\n raise ProgrammingError(\n f\"Default schema '{schema}' does not exists\"\n ) from None\n return None", "def getDBSchema(self, desired=None):\n role = self.getRole(desired)\n schema = role[\"roleName\"]\n return schema", "def _get_stored_schema(self, table: str) -> Optional[TableSchema]:\n try:\n with open(self.schemas / (table + '.json'), 'r') as f:\n return json.load(f)\n except FileNotFoundError:\n return None", "def get_name(self) -> str:\n return self.dbname", "def __tablename__(cls) -> str:\n return inflection.underscore(cls.__name__)", "def get_schema(self) -> dict:\n return schemas.get_object_schema(self.schema)", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def get_type(self) -> str:\n return Tables.ESL.name", "def getTableNames(self):\n\n # The specific command depends on whether we are using mysql or sqlite\n if self.connector == 'mysql':\n sqlcmd = (\"SELECT table_name FROM INFORMATION_SCHEMA.TABLES \" +\n \"WHERE table_schema='\" + self.dbname + \"'\")\n else:\n sqlcmd = \"SELECT name FROM sqlite_master WHERE type='table'\"\n\n self._c.execute(sqlcmd)\n tbnames = [el[0] for el in self._c.fetchall()]\n\n return tbnames", "def schema(self) -> graphql.GraphQLSchema:\n return self._schema", "def schema(self):\n if self.key_type is None:\n return \"%s %s\" % (self.name, self.data_type)\n else:\n return \"%s %s %s KEY\" % (self.name, self.data_type, self.key_type)", "def getTable(self):\n return self.table", "def get_table_column_name(self, table):\n c = self.conn.cursor()\n c.execute(\"SELECT * FROM %s\" % table)\n names = list(map(lambda x: x[0], c.description))\n return names", "def get_schema(db, sourcename):\n try:\n schema = db[\"tables\"][sourcename]\n schema[\"type\"] = constants.TABLE\n except KeyError:\n try:\n schema = db[\"views\"][sourcename]\n schema[\"type\"] = constants.VIEW\n except KeyError:\n raise ValueError(\"no such table/view\")\n return schema", "def get_schema(self) -> ArchiveSchema:\n return self.schema", "def database(self):\n return self.snowflake_options.database", "def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"", "def getTableSchema(self, lsstLevel, dbName, tableName):\n return self._doRequest(self.httpClient.getTableSchema, lsstLevel, dbName, tableName)", "def compute_tableprefix(cls):\n cls.dbtablename_prefix = cls.dbm().get_tablenameprefix(cls.dbschemaname)", "def getTable(self):\n return self.db.table(self.entity)", "def _TryGetCurrentSchema(dataset_id, table_id, project_id):\n client = GetApiClient()\n service = client.tables\n get_request_type = GetApiMessage('BigqueryTablesGetRequest')\n get_request = get_request_type(datasetId=dataset_id,\n tableId=table_id,\n projectId=project_id)\n try:\n table = service.Get(get_request)\n if not table or table.type != 'TABLE':\n raise SchemaUpdateError('Schema modifications only supported '\n 'on TABLE objects received [{}]'.format(\n table))\n except apitools_exceptions.HttpNotFoundError:\n raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format(\n project_id, dataset_id, table_id))\n\n return table.schema", "def get_table_names(self):\n return self.engine.table_names()", "def renamed_table_name(self):\n if len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 10:\n return constant.RENAMED_TABLE_PREFIX + self._old_table.name\n elif (\n len(self._old_table.name) >= constant.MAX_TABLE_LENGTH - 10\n and len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 2\n ):\n return constant.SHORT_RENAMED_TABLE_PREFIX + self._old_table.name\n else:\n return constant.RENAMED_TABLE_PREFIX + constant.GENERIC_TABLE_NAME", "def reference_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationReferenceDataSourceReferenceSchemaArgs']:\n return pulumi.get(self, \"reference_schema\")", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def schema(self):\n attrs = self.attrs.copy()\n parts = ['CREATE', 'TABLE', self.name, '(%s,' % self.hash_key.schema]\n del attrs[self.hash_key.name]\n if self.range_key:\n parts.append(self.range_key.schema + ',')\n del attrs[self.range_key.name]\n if attrs:\n attr_def = ', '.join([attr.schema for attr in six.itervalues(attrs)])\n parts.append(attr_def + ',')\n\n parts.append(\"THROUGHPUT (%d, %d))\" % (self.read_throughput,\n self.write_throughput))\n parts.extend([g.schema for g in six.itervalues(self.global_indexes)])\n return ' '.join(parts) + ';'", "def schema(self):\n return _parse_schema_resource(self._properties.get(\"schema\", {}))", "def get_db_schema_text(db_name: str) -> str:\n\n return str(subprocess.check_output(['sqlite3', db_name, '.schema']), 'utf-8')", "def schema(self):\n # type: () -> object\n return self._schema", "def table(self, table_name):\n return self._get_storage().table(table_name)", "def get_schema(self, name, namespace=None):\n avro_name = self.get_name(name=name, namespace=namespace)\n return self._names.get(avro_name.fullname, None)", "def schema(cls):\n return Schema.get_instance(cls)", "def get_schema(self):\n return ', '.join('%s:%s' % (col, self.schema[col]) for col in self.schema)", "def table(self):\n return self.reference.table", "def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]", "def table(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"table\")", "def get_schema(self):\n return ', '.join(\n '%s:%s' % (col, self.schema[col]) for col in self.schema)", "def getDatabaseName(self):\n raise NotImplementedError", "def get_meta_schema(self):\n return self._tc_meta_schema", "def sensorsTableName(self):\n return 'sensors'", "def target_namespace(self) -> str:\n return self.schema.target_namespace if self.ref is None else self.ref.target_namespace", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def schema(self) -> Schema:\n return next(schema for schema in self.metadata.schemas if schema.schema_id == self.metadata.current_schema_id)" ]
[ "0.7543852", "0.7407979", "0.73744506", "0.72745496", "0.7205025", "0.7046401", "0.70127237", "0.7001271", "0.6922719", "0.6922719", "0.6922719", "0.69220996", "0.6912203", "0.68869877", "0.6849201", "0.6825561", "0.6613302", "0.6605695", "0.66030455", "0.6539277", "0.65093803", "0.6427138", "0.64187604", "0.64168423", "0.63465756", "0.6336008", "0.6285102", "0.6260712", "0.6221021", "0.6217453", "0.61811703", "0.6178804", "0.6072324", "0.6071494", "0.6067768", "0.60435665", "0.60051674", "0.5990146", "0.5976678", "0.5974792", "0.59746116", "0.59672594", "0.59417605", "0.59175724", "0.59038305", "0.5886784", "0.5870361", "0.5852105", "0.58386236", "0.5837593", "0.58303195", "0.58269775", "0.5820307", "0.5819499", "0.57611614", "0.57385486", "0.5728905", "0.5724161", "0.5708831", "0.5704986", "0.5704746", "0.5699949", "0.56995785", "0.56892735", "0.5682668", "0.56826323", "0.56734955", "0.5668219", "0.56573385", "0.56525344", "0.5636782", "0.5633835", "0.5628225", "0.56214094", "0.56161463", "0.5606364", "0.56018305", "0.5600644", "0.5597748", "0.5597188", "0.5592444", "0.5590787", "0.55887383", "0.55877864", "0.5551694", "0.5547232", "0.55421007", "0.55398905", "0.5524614", "0.5517709", "0.55067027", "0.5502445", "0.55012864", "0.5495448", "0.5493022", "0.5492096", "0.5484201", "0.5483673", "0.5483325", "0.5479319" ]
0.5733677
56
Sets the schema of this snowflake table.
def schema(self, schema): self._schema = schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_schema(self, schema):\r\n self.__schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def set_schema(self, schema, set_num_columns=True):\n update_dict = {self.SCHEMA: schema}\n\n if set_num_columns:\n update_dict.update({self.NUM_COLUMNS: len(schema.keys())})\n\n self.update(update_dict)", "def schema(self, schema):\n # type: (object) -> None\n\n if schema is not None:\n if not isinstance(schema, object):\n raise TypeError(\"Invalid type for `schema`, type has to be `object`\")\n\n self._schema = schema", "def set_schema_class(self, schema):\n self.schema_class = schema", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def SetSchema(self, schema):\n if schema is None: schema__o = None\n else: schema__o = schema._o\n ret = libxml2mod.xmlTextReaderSetSchema(self._o, schema__o)\n return ret", "def update_schema(self, new_schema):\n return self.conn.update_schema(new_schema)", "def install(cls):\n return cls.interface.set_table(cls.schema)", "def schema(self):\n return self.snowflake_options.schema", "def setup_schema(self):\n models.Base.metadata.create_all(self.session.bind)", "def set_tablename(self, name):\n self.ds_table = name", "def set_schema():\n schema = StructType([\n StructField(\"cicid\",DoubleType(),True),\n StructField(\"arrdate\",DoubleType(),True),\n StructField(\"i94cit\",DoubleType(),True),\n StructField(\"i94res\",DoubleType(),True),\n StructField(\"i94port\",StringType(),True),\n StructField(\"i94mode\",DoubleType(),True),\n StructField(\"i94addr\",StringType(),True),\n StructField(\"depdate\",DoubleType(),True), \n StructField(\"i94bir\",DoubleType(),True),\n StructField(\"i94visa\",DoubleType(),True),\n StructField(\"gender\",StringType(),True),\n StructField(\"airline\",StringType(),True),\n StructField(\"visatype\",StringType(),True)])\n return schema", "def create_schema(self, schema: str):\n return", "def update(self, schema: 'Schema'):\n self._update(schema)", "def update_schema(self, engine_name, schema):\n endpoint = \"engines/{}/schema\".format(engine_name)\n data = json.dumps(schema)\n return self.swiftype_session.request('post', endpoint, data=data)", "def create_staging_schema(cursor,table_schema):\n create_schema = \"CREATE SCHEMA IF NOT EXISTS \" + table_schema + \";\"\n cursor.execute(create_schema)", "def add_schema(self, schema, db):\n self._dbs[schema.typename] = db\n return None", "def setTableComment(self, schema, table, comment):\r\n return self.runSql('COMMENT ON TABLE {} IS \\'{}\\''.format(self.encodeTableName(schema, table), self.encodeLiteral(comment)))", "def setTable(self, tabledef):\n if isinstance(tabledef, str):\n self._table = Table.Get ( tabledef )\n elif isinstance(tabledef, Table):\n self._table = tabledef\n else:\n raise ValueError (\"table - must be table name or Table instance.\" )", "def setupschema(sheet):\n\n wkst = sheet.sheet1\n wkst.update_title('SheetSQL')\n wkst.update_acell('A1', 'This worksheet holds all major settings for the DB. No Touching!')\n wkst.update_acell('A2', 'Spreadsheet ID: {}'.format(sheet.id)) #Writes ID to CELL A2", "def _add_table_schema(table_desc, table_name, schema):\n table_desc['TableName'] = table_name\n table_desc['AttributeDefinitions'] = [{\n 'AttributeName': item['name'],\n 'AttributeType': DynamoStubber._encode_type(item['type'])\n } for item in schema]\n table_desc['KeySchema'] = [{\n 'AttributeName': item['name'],\n 'KeyType': item['key_type']\n } for item in schema]", "def modify_bigquery_schema(self, schema, info_keys):\n # type: (bigquery.TableSchema, Set[str]) -> None", "def SetSchema(self, reader):\n if reader is None: reader__o = None\n else: reader__o = reader._o\n ret = libxml2mod.xmlTextReaderSetSchema(reader__o, self._o)\n return ret", "def create_schema(self, schema):\n sql = f'set role {self.write_role}; ' \\\n + f'CREATE SCHEMA IF NOT EXISTS {schema};'\n return sql", "async def upgradeSchema(self) -> None:", "def _schema_write(self, table: TableSchema) -> None:\n with open(self.schemas / (table['name'] + '.json'), 'w') as f:\n json.dump(table, f, indent=True)", "def create_table(self, schema: str, table: str, col_types: dict, non_null_columns: List[str]):\n return", "def schema(self):\n return self.table_info.schema", "def table(self, table):\n self._table = table", "def table(self, table):\n self._table = table", "def _create_schema(self, schema_name):\n try:\n if not self.__engine.dialect.has_schema(self.__engine, schema_name):\n self.__session.execute(CreateSchema(schema_name))\n self._commit()\n except Exception:\n self._rollback()\n self._reset_session()\n raise", "def set(self, table):\n if table is None:\n return\n for name in table.dtype.names:\n self._set_column(name, table[name])", "def from_schema(cls, sdict):\n\n table_schema = TableSchema()\n for name, dschema in sdict.items():\n\n schema = ColumnSchema(name=name, **dschema)\n table_schema.add_column(schema)\n\n return table_schema", "def _set_schema(self, schema_value):\n self._id = schema_value.id\n\n if type(self).__name__ != schema_value.type:\n # Make sure this object is the correct type.\n raise ValueError('Cannot convert a {} protocol to a {}.'\n .format(str(type(self)), schema_value.type))\n\n for input_full_path in schema_value.inputs:\n\n value = copy.deepcopy(schema_value.inputs[input_full_path])\n\n input_path = ProtocolPath.from_string(input_full_path)\n self.set_value(input_path, value)", "def create(self):\n c = self.cursor()\n byte_schema = pkgutil.get_data(__package__, 'schema.sql')\n c.executescript(byte_schema.decode('utf-8'))\n self.commit()", "def set_schemas(self, schemas, asset=None):\n dicts = [s.to_dict() for s in schemas]\n self._set_property('pc:schemas', dicts, asset)", "def initialize_schema(self, dry_run=False):\n if not dry_run:\n self.flush()", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "def setup_keyspace(self, keyspace):\n if not self.session:\n raise StorageError(\"Session not availble. Call connect() first\")\n\n schema = pkg_resources.read_text(db, SCHEMA_FILE)\n\n # Replace keyspace name placeholder in CQL schema script\n schema = schema.replace(KEYPACE_PACEHOLDER, keyspace)\n\n statements = schema.split(';')\n for stmt in statements:\n if len(stmt) > 0:\n stmt = stmt + ';'\n self.session.execute(stmt)", "def set_keyspace(self, keyspace_name):\n self.get_session().set_keyspace(keyspace_name);", "def generate_sql_schema(self, schema, schema_name, psql_tables_path):\n psql_tables = open(psql_tables_path, 'w')\n psql_tables.write(\"SET client_min_messages TO WARNING;\\n\")\n psql_tables.write(\"DROP SCHEMA IF EXISTS %s CASCADE;\\n\" % schema_name)\n psql_tables.write(\"CREATE SCHEMA IF NOT EXISTS %s;\\n\" % schema_name)\n psql_tables.write(\"SET SCHEMA '%s';\\n\" % schema_name)\n psql_tables.write(\"CREATE EXTENSION \\\"unaccent\\\";\\n\\n\")\n\n for table_name, table_attr in schema['tables'].iteritems():\n psql_tables.write(\"\\n-- CREATE TABLE %s \\n %s \\n %s \\n\" % (\n table_attr['name'], self._get_sql_drop_table(table_attr),\n self._get_sql_create_table(table_attr)\n ))\n\n psql_tables.close()", "def create_schema(conn, schemapath):\n with open(schemapath, 'r') as f:\n sql = f.read()\n with conn.cursor() as curs:\n curs.execute(sql)", "def create_schema(self, schema, *, debug=False):\n c = self.conn.cursor()\n for line in schema.split(\";\"):\n line = line.strip()\n if len(line)>0:\n if self.debug or debug:\n print(f\"{line};\", file=sys.stderr)\n try:\n c.execute(line)\n except (sqlite3.Error, pymysql.MySQLError) as e:\n print(\"SQL:\", line, file=sys.stderr)\n print(\"Error:\", e, file=sys.stderr)\n exit(1)", "def visit_table(self, sytable):\n self.current.update(sytable)", "def visit_table(self, sytable):\n self.current.update(sytable)", "def settable(self) -> bool:\n return self._data_provider.settable", "def make_schema_changes(self, session, namespace='ns1'):\n debug(\"make_schema_changes() \" + str(namespace))\n session.execute('USE ks_%s' % namespace)\n # drop keyspace\n session.execute('DROP KEYSPACE ks2_%s' % namespace)\n wait(2)\n\n # create keyspace\n self.create_ks(session, \"ks3_%s\" % namespace, 2)\n session.execute('USE ks_%s' % namespace)\n\n wait(2)\n # drop column family\n session.execute(\"DROP COLUMNFAMILY cf2_%s\" % namespace)\n\n # create column family\n query = \"\"\"\n CREATE TABLE cf3_%s (\n col1 uuid PRIMARY KEY,\n col2 text,\n col3 text,\n col4 text\n );\n \"\"\" % (namespace)\n session.execute(query)\n\n # alter column family\n query = \"\"\"\n ALTER COLUMNFAMILY cf_%s\n ADD col4 text;\n \"\"\" % namespace\n session.execute(query)\n\n # add index\n session.execute(\"CREATE INDEX index2_%s ON cf_%s(col3)\"%(namespace, namespace))\n\n # remove an index\n session.execute(\"DROP INDEX index_%s\" % namespace)", "def schema(self):\n pass", "def add_schema_attribute(self):\n schema_id = self.file.options['schema_id_attr']\n if self.sdef['df'] and self.file.options['include_schema_id']:\n # Normal defined entity\n ns = self.sdef['ns']\n id = self.sdef['id']\n schema = ns + \":\" + id\n self.attributes[schema_id] = {'value': schema}\n elif self.file.options['flag_custom_nodes']:\n self.attributes[schema_id] = {'value': 'custom'}", "def table(self):\n return self.snowflake_options.table", "def create_table(self, schema, table):\n fields = \", \".join([\" \".join(t) for t in zip(self.schemas[schema][table][0], self.schemas[schema][table][1])])\n sql = f'set role {self.write_role}; ' \\\n + f'CREATE TABLE IF NOT EXISTS {schema}.{table} ( {fields} );'\n return sql", "def __init__(self, schema=None):\n self.schema = schema or {}", "def sit(self, table):\n self.table = table", "def initialise_schema(db_name: str, password: str):\n conn = psycopg2.connect(host='localhost', dbname=db_name, user='postgres', password=password)\n cursor = conn.cursor()\n cursor.execute(_query)\n conn.commit()\n conn.close()\n\n print('Database schema was created successfully!\\n')", "def build_schema(self, dframe, overwrite=False, set_num_columns=True):\n new_schema = self.schema.rebuild(dframe, overwrite)\n self.set_schema(new_schema,\n set_num_columns=(set_num_columns or overwrite))", "def create_schema(schema): \n\n query = \"CREATE SCHEMA IF NOT EXISTS {}\".format(schema)\n qdb.execute(query)", "def table(self, table):\n self._table = table\n return self", "def __init__(self, schema ):\n self.schema = schema", "def ensure_schema(client, table_name):\n query = ''.join([\n 'CREATE TABLE {cf} ',\n '(\"lockId\" ascii, \"claimId\" timeuuid, PRIMARY KEY(\"lockId\", \"claimId\"));'])\n\n def errback(failure):\n failure.trap(InvalidRequestException)\n\n return client.execute(query.format(cf=table_name),\n {}, ConsistencyLevel.QUORUM).addErrback(errback)", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def add_table(self, schema, query=None, create=True):\n if not constants.NAME_RX.match(schema[\"name\"]):\n raise ValueError(\"invalid table name\")\n if utils.name_in_nocase(schema[\"name\"], self.db[\"tables\"]):\n raise ValueError(\"name is already in use for a table\")\n if utils.name_in_nocase(schema[\"name\"], self.db[\"views\"]):\n raise ValueError(\"name is already in use for a view\")\n if query:\n sql = dbshare.query.get_sql_statement(query)\n sql = 'CREATE TABLE \"%s\" AS %s' % (schema[\"name\"], sql)\n utils.execute_timeout(self.dbcnx, sql)\n if not schema.get(\"description\"):\n schema[\"description\"] = sql\n sql = 'PRAGMA table_info(\"%s\")' % schema[\"name\"]\n schema[\"columns\"] = []\n for row in self.dbcnx.execute(sql):\n column = {\"name\": row[1]}\n if row[2] == \"INT\":\n column[\"type\"] = constants.INTEGER\n else:\n column[\"type\"] = row[2]\n schema[\"columns\"].append(column)\n elif create:\n sql = get_sql_create_table(schema)\n self.dbcnx.execute(sql)\n with self.dbcnx:\n sql = f\"INSERT INTO {constants.TABLES} (name,schema) VALUES (?,?)\"\n self.dbcnx.execute(sql, (schema[\"name\"], json.dumps(schema)))\n self.update_table(schema)\n self.db[\"tables\"][schema[\"name\"]] = schema", "def set_feature_table(self, feature_table):\n\n if self.feature_table is not None:\n logger.warning(\n 'Feature table is already set, changing it now will not recompile '\n 'existing rules')\n self.feature_table = feature_table", "def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }", "def createschema(self):\n def closure(cur):\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS config (\n key varchar(1024) PRIMARY KEY,\n value text\n );\n CREATE TABLE IF NOT EXISTS rooms (\n id serial PRIMARY KEY,\n name text NOT NULL\n );\n CREATE TABLE IF NOT EXISTS slides (\n id serial PRIMARY KEY,\n -- The ordering index of the slide, set to NULL if slide should be hidden\n sequence_no integer NULL UNIQUE,\n -- The room that should be displayed on this slide, set to NULL for master slides aren't associated with a room\n room integer REFERENCES rooms NULL,\n -- The masters are numbered sequentially and defined in content.py\n master integer NOT NULL,\n -- Overrides the title (normally the room name will be used)\n title text NULL,\n -- If max_rows is NULL, use the config default\n max_rows integer NULL\n );\n CREATE TABLE IF NOT EXISTS events (\n id serial PRIMARY KEY,\n room integer REFERENCES rooms NOT NULL,\n begins timestamp NOT NULL,\n ends timestamp NOT NULL,\n name text NOT NULL\n );\n \"\"\")\n \n self.execute(closure)", "def _TableSetup(self):\n global _tablesetup\n global singlestarLocation\n if not _tablesetup:\n singlestar.star_setup(singlestarLocation)\n _tablesetup = True", "def schema() -> None:\n pass", "def init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql') as fobj:\n db.cursor().executescript(fobj.read())\n db.commit()", "def __init__(self, table_name, cursor=None, schema=None):\n self.name = table_name\n self.tablespace_name = None\n self.table_type = None\n self.columns = {}\n self.indexes = {}\n self.constraints = {}\n self.triggers = {}\n if schema:\n self.schema = schema\n else:\n self.schema = None\n if cursor:\n self._get_table(cursor)", "def create_schema(db_name, schema_name):\n # 1. Connect to database\n conn = connect()\n cur = conn.cursor()\n conn.autocommit = True\n\n command_drop = \"\"\"DROP SCHEMA IF EXISTS {} CASCADE\"\"\".format(schema_name)\n command_create = \"\"\"\n CREATE SCHEMA {}\n\n CREATE TABLE cityjson (\n id serial PRIMARY KEY,\n name text,\n referenceSystem int,\n bbox geometry(POLYGON),\n datasetTitle text,\n metadata jsonb,\n meta_attr jsonb,\n transform jsonb\n )\n\n CREATE TABLE cityobject (\n id serial PRIMARY KEY,\n obj_id text,\n parents text[],\n children text[],\n bbox geometry(POLYGON),\n attributes jsonb,\n vertices jsonb,\n object jsonb,\n cityjson_id int REFERENCES cityjson (id) on delete cascade on update cascade\n )\n \"\"\".format(schema_name)\n\n commands = [command_drop, command_create]\n\n for command in commands:\n cur.execute(command)\n conn.commit()\n\n conn.close()\n print(\"\"\"The creation of schema \"{}\" in database \"{}\" is done\"\"\".format(schema_name, db_name))", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise", "def setup_schema(BaseDao, session):\n def setup_schema_fn():\n for class_ in BaseDao._decl_class_registry.values():\n if hasattr(class_, '__tablename__'):\n if class_.__name__.endswith('Schema'):\n raise ModelConversionError(\n \"For safety, setup_schema can not be used when a\"\n \"Model class ends with 'Schema'\"\n )\n\n class Meta(object):\n model = class_\n sqla_session = session\n dump_only = ('pkId', 'created', 'modified')\n\n schema_class_name = '%sSchema' % class_.__name__\n\n schema_class = type(\n schema_class_name,\n (ModelSchema,),\n {'Meta': Meta}\n )\n\n setattr(class_, '__marshmallow__', schema_class)\n\n return setup_schema_fn", "def init_test_schema(db_parameters) -> Generator[None, None, None]:\n ret = db_parameters\n with snowflake.connector.connect(\n user=ret[\"user\"],\n password=ret[\"password\"],\n host=ret[\"host\"],\n port=ret[\"port\"],\n database=ret[\"database\"],\n account=ret[\"account\"],\n protocol=ret[\"protocol\"],\n ) as con:\n con.cursor().execute(f\"CREATE SCHEMA IF NOT EXISTS {TEST_SCHEMA}\")\n yield\n con.cursor().execute(f\"DROP SCHEMA IF EXISTS {TEST_SCHEMA}\")", "def create_table(self):\n Engine.create_table(self)\n self.connection.commit()", "def schema(self):\n raise NotImplementedError", "def initdb():\n db = getdb()\n\n with open(os.path.join(config.BASE_DIRECTORY, 'schema.sql')) as f:\n db.executescript(f.read())", "def clean_table(self, a_schema, a_table):\n \n self._conn.execute(\"delete from %s.%s;\" %(a_schema, a_table))", "def init_db():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n with app.app_context():\n db = connect_db()\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def init_db():\n db = get_db()\n with current_app.open_resource('schema.sql') as f:\n db.executescript(f.read().decode('utf8'))", "def init_db():\n db = get_db()\n\n with current_app.open_resource(\"schema.sql\") as f:\n db.executescript(f.read().decode(\"utf8\"))", "def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()", "def schema(self) -> graphql.GraphQLSchema:\n return self._schema", "def truncateTable(self, schema, table):\r\n return self.runSql('TRUNCATE TABLE {}'.format(self.encodeTableName(schema, table)))", "def __init__(\n self,\n server_name,\n schema,\n database,\n staging_bucket_name,\n storage_integration_name,\n create_disposition,\n write_disposition,\n table_schema,\n user_data_mapper,\n username=None,\n password=None,\n private_key_path=None,\n raw_private_key=None,\n private_key_passphrase=None,\n o_auth_token=None,\n table=None,\n query=None,\n role=None,\n warehouse=None,\n expansion_service=None,\n ):\n # pylint: disable=line-too-long\n verify_credentials(\n username=username,\n password=password,\n private_key_path=private_key_path,\n raw_private_key=raw_private_key,\n o_auth_token=o_auth_token,\n )\n WriteDisposition.VerifyParam(write_disposition)\n CreateDisposition.VerifyParam(create_disposition)\n\n self.params = WriteToSnowflakeSchema(\n server_name=server_name,\n schema=schema,\n database=database,\n staging_bucket_name=staging_bucket_name,\n storage_integration_name=storage_integration_name,\n create_disposition=create_disposition,\n write_disposition=write_disposition,\n table_schema=table_schema,\n username=username,\n password=password,\n private_key_path=private_key_path,\n raw_private_key=raw_private_key,\n private_key_passphrase=private_key_passphrase,\n o_auth_token=o_auth_token,\n table=table,\n query=query,\n role=role,\n warehouse=warehouse,\n )\n self.user_data_mapper = user_data_mapper\n self.expansion_service = expansion_service or default_io_expansion_service()", "def _load_tdx_schema(self):\n tdx_schema: TDXSchema = TDXSchema(dict())\n if _sqliteinfotable.checkInfoTable(self.sqlEngine):\n info_keys = _sqliteinfotable.getInfoKeys(\n self.sqlEngine, [SCHEMA_KEY], self.session_maker)\n if info_keys: # lists are False is empty\n info_keys.setdefault(SCHEMA_KEY, dict())\n # dataset schema definition\n tdx_schema = info_keys[SCHEMA_KEY]\n # dataset data schema\n tdx_schema.setdefault(\"dataSchema\", dict())\n self.tdx_schema = tdx_schema\n self.tdx_data_schema = t.cast(\n schemaconverter.TDXDataSchema, tdx_schema[\"dataSchema\"])", "def set(self, name):\n self.rpc.call(MsfRpcMethod.DbSetWorkspace, [name])", "def refresh_tables(self):\n\n if self.key is None:\n raise AttributeError('Can not refresh tables on uninitialised db')\n\n self.tables = self.client.ssclient.GetWorksheetsFeed(self.key)", "def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request", "def init_db():\n\twith closing(connect_db()) as db:\n\t\twith app.open_resource('schema.sql', mode='r') as f:\n\t\t\tdb.cursor().executescript(f.read())\n\t\tdb.commit()", "def add_column(self, schema):\n self[schema.name] = schema.copy()", "def set_schema_collection(db, schema_file, collection_name):\r\n current_dir = path.dirname(path.abspath(__file__))\r\n json_path = os.path.join(current_dir, schema_file)\r\n abs_json_path = os.path.abspath(json_path)\r\n with open(abs_json_path, 'r') as j:\r\n validator = json.loads(j.read())\r\n\r\n query = [('collMod', collection_name),\r\n ('validator', validator),\r\n ('validationLevel', 'moderate')]\r\n\r\n query = OrderedDict(query)\r\n db.command(query)", "def setup_schemaProperties(self):\n\n propname = 'xml_schema'\n curr = getattr(self, propname, '')\n try:\n self._delProperty(propname)\n except ValueError:\n pass\n try:\n delattr(self, propname)\n except:\n pass\n setattr(self, propname, curr)\n\n properties = list(self._properties)\n properties.append({'id': propname,\n 'type': 'selection',\n 'select_variable': 'get_schemaCandidates',\n 'mode': 'w'})\n\n self._properties = tuple(properties)", "def get_schema(self, repo, table):\n return self.user_con.get_schema(repo=repo, table=table)", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def drop_schema(self):\n connRepo = self.connect(self.cxRepo)\n sql = f\"\"\"drop schema if exists {self.schemaRepo} cascade\"\"\"\n with connRepo:\n with connRepo.cursor() as curs:\n curs.execute(sql)", "def get_schema(self):\r\n return self.__schema" ]
[ "0.7617374", "0.7328084", "0.7328084", "0.7328084", "0.7075173", "0.6481325", "0.63790625", "0.63008064", "0.6234697", "0.61715263", "0.61460024", "0.61385447", "0.6092166", "0.60661066", "0.60366577", "0.6025765", "0.5955309", "0.59028065", "0.5798888", "0.5796591", "0.5755517", "0.57495695", "0.57179797", "0.5708069", "0.55995846", "0.55919933", "0.5587192", "0.5577047", "0.55727684", "0.55640143", "0.5541447", "0.55070335", "0.55070335", "0.5478006", "0.54756665", "0.54714006", "0.5459882", "0.5434894", "0.5428338", "0.5413443", "0.54060644", "0.5404253", "0.53983337", "0.53820837", "0.5338433", "0.5337342", "0.5321555", "0.5321555", "0.5309147", "0.5307275", "0.5302147", "0.5296221", "0.528317", "0.52736175", "0.5266881", "0.5259156", "0.5248697", "0.52371025", "0.52043015", "0.5187532", "0.51868176", "0.5185799", "0.5183599", "0.5170605", "0.5170491", "0.5163657", "0.5150391", "0.51407385", "0.5122892", "0.51027083", "0.5101843", "0.50982124", "0.5087779", "0.50794786", "0.50754756", "0.50563353", "0.50561816", "0.5027497", "0.5023155", "0.5021989", "0.5017814", "0.5005781", "0.5005559", "0.5005425", "0.49950954", "0.49872607", "0.4978043", "0.49738586", "0.49578637", "0.49444577", "0.4943105", "0.49396768", "0.49360374", "0.4933409", "0.49258524", "0.49224424", "0.49209023", "0.49132887", "0.49093893", "0.49056512" ]
0.72960466
4
Returns the table name of this snowflake table.
def table(self): return self._table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_table_name(self):\n return self._table", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def table_name() -> str:\n pass", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table(self):\n return self._table_name", "def table_name(self):\n return self._new_table.name", "def get_tablename(self):\n return self.ds_table", "def table(self):\n return self.snowflake_options.table", "def table_name(self) -> str:\n return self.model._meta.db_table", "def table_name(self) -> str:\n return \"OLTP\"", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def tablename(entity) -> str:\n return entity.__tablename__", "def table_name(class_):\n try:\n return class_.__tablename__\n except AttributeError:\n return class_.__table__.name", "def table_id(self) -> str:\n return pulumi.get(self, \"table_id\")", "def table(cls):\n return cls.__name__", "def graph_queries_table_name(self) -> str:\n return pulumi.get(self, \"graph_queries_table_name\")", "def getTableByName(self, tablename):\n pass", "def table(self):\n if not self.exists:\n return None\n return self._get_table()", "def table(self, table_name):\n return self._get_storage().table(table_name)", "def tablename(klass):\n if not hasattr(klass, 'TABLENAME'):\n inf = Inflector()\n klass.TABLENAME = inf.tableize(klass.__name__)\n return klass.TABLENAME", "def get_context_table_name(self, table):\r\n return self.context_table_name or \"table\"", "def standardize_table_name(self, schema_name: str, table: str) -> str:\n return table", "def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"", "def table(self):\n return self.reference.table", "def delta_table_name(self):\n if len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 10:\n return constant.DELTA_TABLE_PREFIX + self._old_table.name\n elif (\n len(self._old_table.name) >= constant.MAX_TABLE_LENGTH - 10\n and len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 2\n ):\n return constant.SHORT_DELTA_TABLE_PREFIX + self._old_table.name\n else:\n return constant.DELTA_TABLE_PREFIX + constant.GENERIC_TABLE_NAME", "def getTable(self):\n return self.table", "def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]", "def table(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"table\")", "def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def _table_name(self, name: AnyStr) -> bytes:\n name = ensure_bytes(name)\n if self.table_prefix is None:\n return name\n return self.table_prefix + self.table_prefix_separator + name", "def get_table_name(model_id: Text) -> Text:\n return model_id if not cfg.app.db.schema else cfg.app.db.schema + \".\" + model_id", "def getTable(self):\n return self.db.table(self.entity)", "def getTable(self, tablename):\n tablename = self.prefix + tablename\n if not tablename in self.tables:\n self.tables[tablename] = Table( tablename, self.metadata, \\\n autoload=True, autoload_with=self.conn )\n\n return self.tables[tablename]", "def getTableDefForTable(self, tableName):\n\t\tif not \".\" in tableName:\n\t\t\ttableName = \"public.\"+tableName\n\t\t\n\t\tfor row in self.readerConnection.queryToDicts(\n\t\t\t\t\"select sourcerd, tablename from dc.tablemeta where\"\n\t\t\t\t\" lower(tableName)=%(tableName)s\",\n\t\t\t\t{\"tableName\": tableName.lower()}):\n\t\t\tbreak\n\t\telse:\n\t\t\traise base.ui.logOldExc(\n\t\t\t\tbase.NotFoundError(tableName, \"table\", \"dc_tables\"))\n\n\t\treturn base.caches.getRD(row[\"sourcerd\"]\n\t\t\t).getById(row[\"tablename\"].split(\".\")[-1])", "def get_table_names(self):\n return self.engine.table_names()", "def getTable(self):\n\n raise NotImplementedError", "def renamed_table_name(self):\n if len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 10:\n return constant.RENAMED_TABLE_PREFIX + self._old_table.name\n elif (\n len(self._old_table.name) >= constant.MAX_TABLE_LENGTH - 10\n and len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 2\n ):\n return constant.SHORT_RENAMED_TABLE_PREFIX + self._old_table.name\n else:\n return constant.RENAMED_TABLE_PREFIX + constant.GENERIC_TABLE_NAME", "def _get_table(self):\n\t\treturn self._table", "def name(self):\n if self.table:\n return \"{}.{}\".format(self.table, self.field_name)\n return self.field_name", "def __tablename__(cls) -> str:\n return inflection.underscore(cls.__name__)", "def get_table_column_name(self, table):\n c = self.conn.cursor()\n c.execute(\"SELECT * FROM %s\" % table)\n names = list(map(lambda x: x[0], c.description))\n return names", "def getTableNames(self):\n\n # The specific command depends on whether we are using mysql or sqlite\n if self.connector == 'mysql':\n sqlcmd = (\"SELECT table_name FROM INFORMATION_SCHEMA.TABLES \" +\n \"WHERE table_schema='\" + self.dbname + \"'\")\n else:\n sqlcmd = \"SELECT name FROM sqlite_master WHERE type='table'\"\n\n self._c.execute(sqlcmd)\n tbnames = [el[0] for el in self._c.fetchall()]\n\n return tbnames", "def getTable(self, name: str):\n query = f\"SELECT * FROM '{name}';\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result", "def get_table_name(query: str) -> str:\n find_table_name_from_query = r'(FROM `)(\\w+.\\w+)(`)'\n search_result = re.search(find_table_name_from_query, query)\n if search_result:\n return search_result.group(2)\n return \"Unrecognized table name\"", "def table_reference(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference':\n return pulumi.get(self, \"table_reference\")", "def tableName():\n return \"people\"", "def table_names(self, cursor=None):\r\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]", "def new_table_name(self):\n if len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 10:\n return constant.NEW_TABLE_PREFIX + self.table_name\n elif (\n len(self._old_table.name) >= constant.MAX_TABLE_LENGTH - 10\n and len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 2\n ):\n return constant.SHORT_NEW_TABLE_PREFIX + self.table_name\n else:\n return constant.NEW_TABLE_PREFIX + constant.GENERIC_TABLE_NAME", "def table(self, name: str) -> ir.TableExpr:\n qualified_name = self._qualify(name)\n return self.client.table(qualified_name, self.name)", "def table(self, name):\r\n if name in self._tables:\r\n return _tables[name]\r\n\r\n table = Table(name, self._storage)", "def get_table_name_from_model(model):\n return \"{0};{1}\".format(model._meta.app_label, model._meta.model_name)", "def table(entity) -> sa.Table:\n return entity.__table__", "def _table_path(self):\n return self._switch.path_on_odl + \"flow-node-inventory:table/%d/\" % self._table_id", "def get_sandbox_table_name(dataset_id, rule_name):\n return '{dataset_id}_{rule_name}'.format(dataset_id=dataset_id,\n rule_name=re.sub(\n r'\\W', '_', rule_name))", "def _table_name(cls, suffix, relative=False):\n mname = inspect.getmodule(cls).__name__ + '_' + suffix\n if relative:\n mname = mname.split('.')[-1]\n return mname", "def construct_bq_table_path(table_name: str) -> str:\n if not re.match(r'^\\w+$', table_name):\n raise ValueError(\n f'{table_name} should contain only letters, numbers and underscore.')\n\n return '{}.{}.{}'.format(\n get_airflow_variable('dest_project'),\n get_airflow_variable('dest_dataset'), table_name)", "def test_table_name(self):\n obs = SampleTemplate._table_name(self.test_study.id)\n self.assertEqual(obs, \"sample_1\")", "def table(self):\r\n return self._table", "def encodeTableName(self, schema, table):\r\n return '\"{}\".\"{}\"'.format(schema, table)", "def get_table_byname(self, aTable):\n if aTable in self._tablesObjects.keys():\n oTable = self._tablesObjects[aTable]\n else:\n oTable = None\n return oTable", "def __getTable(self):\n\n if not self.__table:\n tableConnectionParams = parseConnectionString(\n self.tableConnString);\n\n self.__table = Table(\n tableConnectionParams['name'],\n connection = getDbConnection(tableConnectionParams));\n\n return self.__table;", "def get_type(self) -> str:\n return Tables.ESL.name", "def sensorsTableName(self):\n return 'sensors'", "def table(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable':\n return pulumi.get(self, \"table\")", "def table(self, name):\r\n return NamedTable(self.name, name)", "def table_ref(self):\n return self._table_ref", "def symbol_table(self) -> str:\n return self._symbol_table", "def table(self, table_name):\n return self._create_table(table_name)", "def set_tablename(self, name):\n self.ds_table = name", "def rename_table(base, tablename: str, table: Table) -> str:\n return snake_to_camel(tablename, upper=True)", "def _table_id(project: str, table: FeatureView) -> str:\n return f\"{project}_{table.name}\"", "def get_table_pk_name(table):\n return '_'.join([table._meta.model_name, table ._meta.pk.name])", "def get_table_query_string(self) -> str:\n if self.database and self.table:\n return f'\"{self.database}\".\"{self.schema}\".\"{self.table}\"'\n elif self.table:\n return f'\"{self.table}\"'\n else:\n return f\"({self.query})\"", "def uniq_table_class_name(self):\n class_name = self.__class__.__name__\n prefix = self.prefix\n return \"{pref}{cls}\".format(pref=prefix, cls=class_name)", "def __tablename__(self):\n return sub(r\"(?<!^)(?=[A-Z])\", \"_\", self.__name__).lower()", "def parent_table(self):\n for suffix in self.SUFFIXES:\n if self.table.endswith(suffix):\n return self.table[:-len(suffix)]\n raise ValueError(\"{} does not match any of the known suffixes for a data table\".format(self.table))", "def table_reference(self) -> 'outputs.TableReferenceResponse':\n return pulumi.get(self, \"table_reference\")", "def Fetch_All_Table_Names(self, d_params=None):\n ctx = self.__Connect_To_Snowflake(d_params)\n all_tables = ctx.cursor().execute(\"show tables\")\n ctx.close()\n return [x[1] for x in all_tables]", "def schema(self):\n return self.table_info.schema", "def table(self, name: str, database: str | None = None) -> ir.Table:\n alch_table = self._get_sqla_table(name, schema=database)\n node = self.table_class(source=self, sqla_table=alch_table)\n return self.table_expr_class(node)", "def autoname(self):\n ret = \"%(table)s_%(reftable)s_fkey\"%dict(\n table=self.table.name,\n reftable=self.reftable.name,\n )\n return ret", "def table(self) -> 'outputs.PreventionJobTriggerInspectJobActionSaveFindingsOutputConfigTable':\n return pulumi.get(self, \"table\")", "def shootersTableName(self):\n return 'shooters'", "def test_table_name(self):\n obs = PrepTemplate._table_name(1)\n self.assertEqual(obs, \"prep_1\")", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def compute_tableprefix(cls):\n cls.dbtablename_prefix = cls.dbm().get_tablenameprefix(cls.dbschemaname)", "def _get_table_name(url):\n try:\n return urlparse(url).path.strip('/').split('/')[1]\n except IndexError:\n return None", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def list_tables(self) -> List[str]:\n return self.dynamodb_client.list_tables()[\"TableNames\"]", "def _extract_ks_tab(name):\n sp = name.split(\".\")\n if len(sp) == 2:\n ksp = sp[0]\n table = sp[1]\n else:\n ksp = config.execution_name\n table = name\n return ksp.lower().encode('UTF8'), table.lower().encode('UTF8')", "def _find_table(name):\n tables = Base.metadata.tables\n table = tables.get(name, None)\n if table is not None:\n return table\n else:\n raise NameError('Unable to locate table: %s' % name)", "def create_table(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"create_table\")", "def create_table(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"create_table\")", "def get_name(tablename):\n\n return tablename[tablename.find(\"_\") + 1:].replace(\"_\", \" \").capitalize()", "def get_table_class(self):\r\n if self.table_class:\r\n return self.table_class\r\n raise ImproperlyConfigured(\"A table class was not specified. Define \"\r\n \"%(cls)s.table_class\"\r\n % {\"cls\": type(self).__name__})", "def get_tables_in_schema(self, conn, schema_name):\n return conn.get_tables(schema_name)['table_name']" ]
[ "0.872357", "0.8587216", "0.8301262", "0.82909656", "0.82909656", "0.82909656", "0.82124346", "0.8067328", "0.80299294", "0.8022681", "0.79793465", "0.758069", "0.74290794", "0.7428557", "0.7422888", "0.7326025", "0.723662", "0.71609664", "0.71264887", "0.7048128", "0.70468855", "0.70271957", "0.70244193", "0.7016829", "0.7011169", "0.6924068", "0.69057417", "0.69015485", "0.68992615", "0.6824658", "0.6783915", "0.67713666", "0.67336106", "0.66821414", "0.6669012", "0.6644566", "0.65462095", "0.65420175", "0.6527802", "0.65164804", "0.650062", "0.64988947", "0.6496613", "0.64733464", "0.64731455", "0.64705884", "0.6447456", "0.6421707", "0.6408549", "0.6403498", "0.63940364", "0.6389927", "0.6369068", "0.6340589", "0.63239974", "0.6306309", "0.6294118", "0.62903994", "0.6237854", "0.6232782", "0.62152255", "0.62129223", "0.6209189", "0.61943203", "0.61783314", "0.61727196", "0.6164783", "0.61516017", "0.6139309", "0.61386615", "0.6137397", "0.61280316", "0.6127305", "0.6115822", "0.6110451", "0.6096392", "0.6057668", "0.6056062", "0.6027385", "0.6014862", "0.60088736", "0.6004277", "0.60023516", "0.5999834", "0.59904146", "0.59669703", "0.5959004", "0.5955293", "0.59524965", "0.5945748", "0.5936772", "0.5898416", "0.5892476", "0.5889468", "0.58634216", "0.58634216", "0.5852981", "0.58392537", "0.58379084" ]
0.64299667
48
Sets the table ref of this snowflake table.
def table(self, table): self._table = table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_ref(self, table_ref):\n self._table_ref = table_ref", "def setTable(self, tabledef):\n if isinstance(tabledef, str):\n self._table = Table.Get ( tabledef )\n elif isinstance(tabledef, Table):\n self._table = tabledef\n else:\n raise ValueError (\"table - must be table name or Table instance.\" )", "def table(self, table):\n self._table = table\n return self", "def set_tablename(self, name):\n self.ds_table = name", "def table_ref(self):\n return self._table_ref", "def set_feature_table(self, feature_table):\n\n if self.feature_table is not None:\n logger.warning(\n 'Feature table is already set, changing it now will not recompile '\n 'existing rules')\n self.feature_table = feature_table", "def into(self, table):\n self._tables.set(table)\n return self", "def table_reference(self) -> 'outputs.PreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference':\n return pulumi.get(self, \"table_reference\")", "def sit(self, table):\n self.table = table", "def set_parent_table(self, table):\n self.__parent_table = table", "def table_reference(self) -> 'outputs.TableReferenceResponse':\n return pulumi.get(self, \"table_reference\")", "def setRevertable(self, b):\n\n self.revertable = b", "def set(self, table):\n if table is None:\n return\n for name in table.dtype.names:\n self._set_column(name, table[name])", "def set_ref(self, new_ref):\n self.__ref = new_ref", "def schema_ref(schema, table):\n return schema + '.' + table", "def table(self):\n return self.reference.table", "def table(self):\n return self.snowflake_options.table", "def reference(self, reference):\n\n self._reference = reference", "def reference(self, reference):\n\n self._reference = reference", "def route_table_id(self, route_table_id):\n self._route_table_id = route_table_id", "def AddTable(self, table):\n self.tables.append(table)", "def use_reference(self, ref):\n self._ref = ref\n self._ref_name = (\n get_pretty_var_names(\n target_vars=[ref],\n local_vars=inspect.currentframe().f_back.f_locals.items(),\n fallback_name_prefix=\"Ref\",\n )[0]\n if self._ref_name is None\n else self._ref_name\n )\n return self", "def set_data(self, indexer_table):\n\n self.indexer_table = indexer_table", "def setReference(self,ref):\n rospy.wait_for_service('/myo/myo_muscle0_controller/set_ref')\n try:\n sDsp = rospy.ServiceProxy('/myo/myo_muscle0_controller/set_ref',myo_msgs.srv.SetReference)\n sDsp(ref)\n except(rospy.ServiceException, e):\n print(\" \")", "def set_reference(self, traj):\n self.ref_traj_aa = traj[0]\n self.ref_traj = self.mapping.map_traj(traj[0])", "def setColorTable(table='rainbow'):\n colortabledict = {'small':'SMALL','vga':'VGA','rainbow':'RAIN',\n 'violet':'SPEC', 'greyscale':'GREY', 'reverse rainbow':'RRAIN', \n 'reverse violet':'RSPEC', 'reverse grey':'RGREY'} \n dislin.setvlt(colortabledict[table])", "def visit_table(self, sytable):\n self.current.update(sytable)", "def visit_table(self, sytable):\n self.current.update(sytable)", "def setPortRef(self, *args):\n return _libsbml.SBaseRef_setPortRef(self, *args)", "def _seek_to_table(self, table):\n\n self.stream.seek(self.table_pointers[table])", "def init_table_obj(self):\n # Check the existence of original table\n if not self.table_exists(self.table_name):\n raise OSCError(\n \"TABLE_NOT_EXIST\", {\"db\": self._current_db, \"table\": self.table_name}\n )\n self._old_table = self.fetch_table_schema(self.table_name)\n self.partitions[self.table_name] = self.fetch_partitions(self.table_name)\n # The table after swap will have the same partition layout as current\n # table\n self.partitions[self.renamed_table_name] = self.partitions[self.table_name]\n # Preserve the auto_inc value from old table, so that we don't revert\n # back to a smaller value after OSC\n if self._old_table.auto_increment:\n self._new_table.auto_increment = self._old_table.auto_increment\n # We don't change the storage engine in OSC, so just use\n # the fetched instance storage engine\n self._new_table.engine = self._old_table.engine\n # Populate both old and new tables with explicit charset/collate\n self.populate_charset_collation(self._old_table)\n self.populate_charset_collation(self._new_table)", "def add_table(self, name):\n self.puml_tables[name] = {\n 'default': OrderedDict(),\n 'foreign': OrderedDict(),\n 'primary': OrderedDict()\n }\n # Set current table name.\n self.current_table = name", "def refu(self, refu):\n\n self._refu = refu", "def use_table(self):\n connection = self._get_connection()\n cursor = connection.cursor()\n cursor.execute(\n 'select exists(select * from information_schema.tables where table_name=%s)',\n (self.table,),\n )\n if cursor.fetchone()[0]:\n self.logger.info('Using existing table')\n else:\n try:\n cursor.execute(\n f'CREATE TABLE {self.table} ( \\\n ID VARCHAR PRIMARY KEY, \\\n DOC BYTEA);'\n )\n self.logger.info('Successfully created table')\n except (Exception, psycopg2.Error) as error:\n self.logger.error('Error while creating table!')\n connection.commit()\n self._close_connection(connection)", "def set_table_status(table_id: int, status: int) -> Table:\n table = Table.query.filter_by(id=table_id).first()\n table.status = status\n\n db.session.commit()\n\n return table", "def reference_id(self, reference_id):\n\n self._reference_id = reference_id", "def table_name(self):\n return self._new_table.name", "def setUnitRef(self, *args):\n return _libsbml.SBaseRef_setUnitRef(self, *args)", "def set_reference_id(self, reference_id):\n self.reference_id = reference_id", "def setSBaseRef(self, *args):\n return _libsbml.SBaseRef_setSBaseRef(self, *args)", "def setup_table(table_name = None, reconstruct = False):\n \n if table_name is None:\n table_name = config[\"default-table\"]\n\n conn, tunnel = create_db_conn()\n try:\n cur = conn.cursor()\n cur.execute(\"\"\"\n USE %s\n \"\"\"%(config['db'], ))\n\n if reconstruct:\n cur.execute(\"\"\"\n DROP TABLE IF EXISTS `%s`;\n \"\"\"%(table_name,))\n cur.execute(\"\"\"CREATE TABLE `%s` (\n `id` INT UNSIGNED AUTO_INCREMENT,\n `actual value` INT UNSIGNED,\n `predict value` INT UNSIGNED,\n PRIMARY KEY(`id`)\n )\n ;\"\"\"%(table_name,))\n conn.commit()\n\n cur.execute(\"\"\"\n SHOW TABLES;\n \"\"\")\n conn.commit()\n \n all_tables = cur.fetchall()\n assert((table_name,) in all_tables)\n print(\"setup_table PASSED\")\n except Exception as e:\n print(\"setup_table FAILED\")\n print(e)\n\n conn.close()\n tunnel.close()", "def sethead(self, ref):\n log.debug('[%s] Setting to ref %s', self.name, ref)\n try:\n ref = self.repo.rev_parse(ref)\n except gitdb.exc.BadObject:\n # Probably means we don't have it cached yet.\n # So maybe we can fetch it.\n ref = self.fetchref(ref)\n log.debug('[%s] Setting head to %s', self.name, ref)\n self.repo.head.reset(ref, working_tree=True)\n log.debug('[%s] Head object: %s', self.name, self.currenthead)", "def setrawtable(self, rawtable):\n\n # Store the \"raw\" table data\n self.__rawtable = rawtable", "def initRef(self, obj_pkgs):\n if self.comment and self.comment.startswith('@'):\n i = self.comment.index('.')\n name = self.comment[1:i]\n table = obj_pkgs.get(name, None)\n if table:\n self.ref = MySqlRef(self, obj_pkgs)\n self.query = True\n else:\n self.ref = None\n self.query = True\n else:\n self.ref = None\n self.query = False", "def chart_data_table(self, chart_data_table):\n\n self.container['chart_data_table'] = chart_data_table", "def class_ref(self, class_ref):\n\n self._class_ref = class_ref", "def table(self, table_name):\n return self._create_table(table_name)", "def append_table(self, table):\n\n self._db_manager.register_table(table)", "def set_master_table(filepath):\n my_globals['master_table_path'] = filepath\n my_globals['master_table_data'] = None", "def setTableComment(self, schema, table, comment):\r\n return self.runSql('COMMENT ON TABLE {} IS \\'{}\\''.format(self.encodeTableName(schema, table), self.encodeLiteral(comment)))", "def set_reference(self, refobj, reference):\n refnodeattr = \"%s.referencenode\" % refobj\n if reference:\n cmds.connectAttr(\"%s.message\" % reference, refnodeattr, force=True)\n ns = cmds.referenceQuery(reference, namespace=True)\n cmds.setAttr(\"%s.namespace\" % refobj, ns, type=\"string\")\n else:\n conns = cmds.listConnections(refnodeattr, plugs=True)\n if not conns:\n return\n for c in conns:\n cmds.disconnectAttr(c, refnodeattr)", "def getTableByName(self, tablename):\n pass", "def table(self):\n return self._table_name", "def create_table_url(self, table_id):\n return self.base_url + \"/table?table=\" + str(table_id)", "def labeltable(self, labeltable):\n if not isinstance(labeltable, GiftiLabelTable):\n raise TypeError(\"Not a valid GiftiLabelTable instance\")\n self._labeltable = labeltable", "def set_designator(self, ref):\n self.ref = ref", "def table(self, table_id):\n return Table(table_id, self)", "def __init__(self, tableConnString):\n\n self.tableConnString = tableConnString;\n self.__table = None", "def setPortRef(self, *args):\n return _libsbml.Port_setPortRef(self, *args)", "def settable(self) -> bool:\n return self._data_provider.settable", "def test_table_reference(self):\n networktables_mock = unittest.mock.Mock()\n table_mock = unittest.mock.Mock()\n # When table is gotten from first network, table will be None,\n # from second network will return table_mock\n networktables_mock.getTable.side_effect = [None, table_mock]\n\n network_instance = network.Network(networktables_mock, None, None)\n\n # Test initial Network.table value\n self.assertTrue(network_instance.table is None)\n\n network_instance.change_server(\"localhost\")\n\n # Test final value of Network.table\n self.assertTrue(network_instance.table == table_mock)", "def _set_interface_ref(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interface_ref_openconfig_qos_interfaces__qos_interfaces_interface_interface_ref, is_container='container', yang_name=\"interface-ref\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface_ref must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interface_ref_openconfig_qos_interfaces__qos_interfaces_interface_interface_ref, is_container='container', yang_name=\"interface-ref\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interface_ref = t\n if hasattr(self, '_set'):\n self._set()", "def _set_interface_ref(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interface_ref_openconfig_qos__qos_interfaces_interface_interface_ref, is_container='container', yang_name=\"interface-ref\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface_ref must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interface_ref_openconfig_qos__qos_interfaces_interface_interface_ref, is_container='container', yang_name=\"interface-ref\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interface_ref = t\n if hasattr(self, '_set'):\n self._set()", "def setTable(self, signalNumber, table,\n numbering=xctypes.XCLOC_C_NUMBERING):\n fname = '%s::%s'%(self.__class__.__name__, self.setTable.__name__)\n if (not self.linit):\n print(\"%s: Module not initialized\"%fname)\n i = signalNumber\n if (numbering == xctypes.XCLOC_C_NUMBERING): \n i = i + 1\n it = c_int(1)\n ierr = c_int(1)\n self.lib.xcloc_signalToTableIndex(i, it, ierr) \n it = it.value\n ngrd = table.size\n if (ierr.value != 0):\n print(\"%s: Could not find signal %d\"%(fname, signalNumber)) \n return -1\n ngrd = table.size\n table = table.flatten(order='C') \n if (table.dtype == float32):\n table = ascontiguousarray(table, float32)\n tablePtr = table.ctypes.data_as(POINTER(c_float))\n self.lib.xcloc_setTable32f(it, ngrd, tablePtr, ierr)\n if (ierr.value != 0):\n print(\"%s: Failed to call setTable32f\"%fname)\n elif (table.dtype == float64):\n table = ascontiguousarray(table, float64)\n tablePtr = table.ctypes.data_as(POINTER(c_double))\n self.lib.xcloc_setTable64f(it, ngrd, tablePtr, ierr)\n if (ierr.value != 0):\n print(\"%s: Failed to call setTable64f\"%fname)\n else:\n print(\"%s: Precision must be float32 or float64\"%fname)\n return -1\n ierr = ierr.value\n return ierr", "def from_(self, table):\n self.from__ = table\n\n return self", "def define_table(self, cls):\n\n self.append_table(cls)\n\n return cls", "def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data", "def get_table_name(self):\n return self._table", "def table_save_data_frame(self, table_name):\n self.recordset_df = pd.read_sql_table(table_name, self.con)\n return self", "def logs_table(self, logs_table):\n\n self._logs_table = logs_table", "def set_reference(self, reference):\n\t\tif ((reference == 0) or (reference == 1)):\n\t\t\tself._reference = reference\n\t\telse:\n\t\t\tsys.stderr.write(\"\\nERROR : %s reference must be 0 (internal) or 1 (external) so it can't be %s !\\n\" % (self._target_id, reference))\n\t\t\tsys.exit(1)", "def setReference(self, *args):\n return _libsbml.Association_setReference(self, *args)", "def _set_interface_ref(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_interface_ref_openconfig_qos_elements__qos_interfaces_interface_interface_ref, is_container='container', yang_name=\"interface-ref\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface_ref must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_interface_ref_openconfig_qos_elements__qos_interfaces_interface_interface_ref, is_container='container', yang_name=\"interface-ref\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__interface_ref = t\n if hasattr(self, '_set'):\n self._set()", "def table(self, table_name):\n return self._get_storage().table(table_name)", "def upload_table_data(client: bigquery.Client, tableRef: bigquery.Table, fusionFile: str) -> bigquery.LoadJob:\n with open(fusionFile, mode='rb') as file:\n job = client.load_table_from_file(file, tableRef)\n return job", "def setRef(self,reference):\n (iMod,iObj) = reference\n self.rnam.setData(struct.pack('i',iObj)[:3] + struct.pack('B',iMod))\n self.setChanged()", "def add_table(self, table, data_frame, alias=None):\n if alias is not None:\n for name in alias:\n self.alias_map[name] = table\n self.alias_map[table] = table\n self.data_frames[table] = data_frame", "def table(self, *tables):\n self._tables.append(', '.join(tables))\n return self", "def for_table(cls, table_doc):\n model_doc = {\n 'schemas': {\n table_doc['schema_name']: {\n 'tables': {\n table_doc['table_name']: table_doc\n }\n }\n }\n }\n return cls(model_doc)", "def __init__(self, tablename):\n dynamodb = boto3.resource(\"dynamodb\")\n self._table = dynamodb.Table(table_name)\n super().__init__()", "def symbol_table(self, value: str):\n self._symbol_table = value", "def new_table(self):\n self.c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS {table} (\n id integer primary key,\n {event} integer,\n {desc} text,\n {date} text,\n {link} text)\n \"\"\".format(\n table=TABLE,\n event=EVENT,\n desc=DESC,\n date=DATE,\n link=LINK,\n )\n )", "def create_reference_ids_table(self, vals, table_name='_ref'):\n # remove existing\n self._drop_tables([table_name])\n cursor = self.conn.cursor()\n cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)\n for i, v in enumerate(vals):\n cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))", "def _TableSetup(self):\n global _tablesetup\n global singlestarLocation\n if not _tablesetup:\n singlestar.star_setup(singlestarLocation)\n _tablesetup = True", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table(self, name):\r\n if name in self._tables:\r\n return _tables[name]\r\n\r\n table = Table(name, self._storage)", "def do_set_table(tpath, comment):\n content = sys.stdin.read()\n runs = run.split('-')\n if len(runs) == 1:\n runs.append(ccdb.INFINITE_RUN)\n ass = provider.create_assignment([[content]], tpath, \n runs[0], runs[1],\n var, comment)", "def add_table(self, table: pd.DataFrame, table_name: str, table_description: str) -> None:\n\n columns = table.columns\n\n columns_lists = []\n for i in range(0, len(columns), self.table_columns_count):\n columns_list = []\n for j in range(i, len(columns)):\n columns_list.append(columns[j])\n if(len(columns_list) == self.table_columns_count):\n break\n columns_lists.append(columns_list)\n\n tag = r\"\"\n for i in range(len(columns_lists)):\n columns_list = columns_lists[i]\n\n if(len(columns_lists) == 1):\n table_num = r''\n else:\n table_num = r' [' + str(i + 1) + r'/' + str(len(columns_lists)) + r']'\n\n if(len(table) < 30):\n tag = tag + r'''\n \\begin{table}[H]\n \\center\n \\caption{''' + table_description + table_num + '''}\n \\label{tab:''' + table_name + str(len(self.tables)) + r'''}\n \\begin{tabular}{c''' + ' c' * len(columns_list) + '''}\n '''\n else:\n tag = tag + r'''\n \\begin{longtable}{''' + 'c ' + ' c' * len(columns_list) + '''}\n \\caption{''' + table_description + table_num + '''\\label{tab:''' + table_name + str(len(self.tables)) + r'''}}\\\\\n '''\n\n cell = str(columns_list[0])\n\n for column in columns_list[1:]:\n cell = cell + r' & ' + str(column)\n tag = tag + cell + r''' \\\\\n\n \\hline\n '''\n\n for j in range(len(table)):\n cell = str(table[columns_list[0]].values[j])\n\n for column in columns_list[1:]:\n cell = cell + r' & ' + str(table[column].values[j])\n\n tag = tag + cell + r''' \\\\\n '''\n\n if(len(table) < 30):\n tag = tag + r'''\n \\hline\n \\end{tabular}\n \\end{table}\n '''\n else:\n tag = tag + r'''\n \\hline\n \\end{longtable}\n '''\n\n self.tables[len(self.tables)] = [table_name, table_description]\n\n tag = tag.replace('%', '\\%').replace('_', '\\_').replace('#', '\\#')\n\n self.doc = self.doc + tag", "def table_id(self) -> str:\n return pulumi.get(self, \"table_id\")", "def on(self, table):\n self._last_foreign.on(table)\n return self", "def set_content(reftable, variable):\n assert isinstance(reftable, rt.ReferenceTable)\n assert IsEUDVariable(variable)\n\n _reftable_epd << EPD(reftable)\n _reftable_sz << f_dwread_epd(_reftable_epd)\n _value << variable\n _result_epd << EPD(variable.getValueAddr())", "def getTable(self, tablename):\n tablename = self.prefix + tablename\n if not tablename in self.tables:\n self.tables[tablename] = Table( tablename, self.metadata, \\\n autoload=True, autoload_with=self.conn )\n\n return self.tables[tablename]", "def add_reference(self,ref):\n \n master = None #for the table that is referenced\n slave = None #for the table that uses the reference\n new_root = None\n \n for child in ref:\n local_tag = child.tag.split(\"}\")[1] #gets the local part of the tag\n if local_tag == 'connections':\n new_root = child\n break\n \n #new_root == None means the connection exists but is not properly\n #connected to any table in the diagram\n if new_root == None: ###\n self.err.print_error(\"dia:ref_not_closed\") ###\n e_code = self.err.exit_code[\"diagram\"] ###\n ###\n exit(e_code) ###\n \n \n for child in new_root: \n #master table\n if child.attrib['handle'] == \"0\":\n master_id = child.attrib['to'] #gets the master id\n master = self.table_dict[master_id] \n \n #slave table\n elif child.attrib['handle'] == \"1\":\n slave_id = child.attrib['to'] #gets the slave id\n slave = self.table_dict[slave_id] \n \n \n #error check if either table not found\n if master == None or slave == None: ###\n self.err.print_error(\"dia:ref_not_closed\") ###\n e_code = self.err.exit_code[\"diagram\"] ###\n ###\n exit(e_code) ###\n \n \n #updating both tables\n master.add_slave(slave)\n slave.add_foreign_key(master)\n \n return", "def assign_ref_id(self: _EntityT, ref_id: EntityId) -> _EntityT:\n return dataclasses.replace(self, ref_id=ref_id)", "def setIdRef(self, *args):\n return _libsbml.SBaseRef_setIdRef(self, *args)", "def setModelRef(self, *args):\n return _libsbml.ExternalModelDefinition_setModelRef(self, *args)", "def set_service(service_name, reference):\n Container.services[service_name] = reference" ]
[ "0.840069", "0.7259923", "0.68128806", "0.6655275", "0.6641437", "0.62347263", "0.61731863", "0.6130862", "0.60809314", "0.6049684", "0.60375315", "0.5879953", "0.58563435", "0.5758894", "0.5728879", "0.5722117", "0.5694347", "0.5653783", "0.5653783", "0.5530361", "0.5422365", "0.534846", "0.53335243", "0.53321475", "0.5330522", "0.53290904", "0.53054357", "0.53054357", "0.5266486", "0.52524304", "0.52396685", "0.52388614", "0.5235997", "0.5227938", "0.521754", "0.5212905", "0.5202962", "0.51997733", "0.51922554", "0.5178704", "0.51784956", "0.51692903", "0.51549464", "0.51518506", "0.5140881", "0.5135589", "0.51219773", "0.5113227", "0.51131755", "0.5101158", "0.5099151", "0.50775075", "0.50671774", "0.50544983", "0.50472665", "0.5029499", "0.5026231", "0.50021976", "0.49856615", "0.49763334", "0.49717698", "0.49475738", "0.4941906", "0.4939616", "0.49360353", "0.4922869", "0.4922052", "0.49219242", "0.4920315", "0.48950967", "0.48937842", "0.48776034", "0.48713925", "0.48632768", "0.4861684", "0.48538294", "0.48451504", "0.4840564", "0.48401007", "0.48352638", "0.48339647", "0.48287052", "0.4798199", "0.4793844", "0.47916365", "0.47916365", "0.47916365", "0.4789586", "0.47789544", "0.4767255", "0.47646606", "0.47642815", "0.4754874", "0.47503012", "0.4750094", "0.4749701", "0.47400185", "0.4738774", "0.47286794" ]
0.6948597
3
Creates a SnowflakeOptions from a protobuf representation of a snowflake option.
def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions): snowflake_options = cls( database=snowflake_options_proto.database, schema=snowflake_options_proto.schema, table=snowflake_options_proto.table, query=snowflake_options_proto.query, ) return snowflake_options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def from_proto(cls, hive_options_proto: Any):\n\n pass", "def _pacbio_legacy_option_from_dict(d):\n warnings.warn(\n \"This is obsolete and will disappear soon\",\n DeprecationWarning)\n\n opt_id = d['pb_option']['option_id']\n name = d['pb_option']['name']\n default = d['pb_option']['default']\n desc = d['pb_option']['description']\n option_type_id = to_ascii(d['pb_option']['type'])\n\n # Hack to support \"number\"\n if option_type_id == \"number\":\n option_type_id = \"float\"\n\n return __simple_option_by_type(opt_id, name, default, desc, option_type_id)", "def to_python(self, value):\n if value is None:\n return value\n value = super(BitOptionsField, self).to_python(value)\n return BitOptions(self.options.flags, value)", "def ParseOptions(cls, options, config_object):", "def create_from_pb2(cls, pb2_obj: _BaseOptionsProto) -> 'BaseOptions':\n return BaseOptions(\n model_asset_path=pb2_obj.model_asset.file_name,\n model_asset_buffer=pb2_obj.model_asset.file_content)", "def from_proto(data_source: DataSourceProto):\n return SnowflakeSource(\n field_mapping=dict(data_source.field_mapping),\n database=data_source.snowflake_options.database,\n schema=data_source.snowflake_options.schema,\n table=data_source.snowflake_options.table,\n event_timestamp_column=data_source.event_timestamp_column,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n query=data_source.snowflake_options.query,\n )", "def from_json(cls, options_json: Text) -> 'StatsOptions':\n options_dict = json.loads(options_json)\n type_name = options_dict.pop(_TYPE_NAME_KEY, None)\n if type_name is not None and type_name != 'StatsOptions':\n raise ValueError('JSON does not encode a StatsOptions')\n if _SCHEMA_JSON_KEY in options_dict:\n options_dict['_schema'] = json_format.Parse(\n options_dict[_SCHEMA_JSON_KEY], schema_pb2.Schema())\n del options_dict[_SCHEMA_JSON_KEY]\n if _SLICING_CONFIG_JSON_KEY in options_dict:\n options_dict['_slicing_config'] = json_format.Parse(\n options_dict[_SLICING_CONFIG_JSON_KEY],\n slicing_spec_pb2.SlicingConfig())\n del options_dict[_SLICING_CONFIG_JSON_KEY]\n per_feature_weight_override_json = options_dict.get(\n _PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY)\n if per_feature_weight_override_json is not None:\n options_dict['_per_feature_weight_override'] = {\n types.FeaturePath.from_json(k): v\n for k, v in per_feature_weight_override_json.items()\n }\n del options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY]\n options = cls()\n options.__dict__ = options_dict\n return options", "def to_pb2(self) -> _FaceDetectorGraphOptionsProto:\n base_options_proto = self.base_options.to_pb2()\n base_options_proto.use_stream_mode = (\n False if self.running_mode == _RunningMode.IMAGE else True\n )\n return _FaceDetectorGraphOptionsProto(\n base_options=base_options_proto,\n min_detection_confidence=self.min_detection_confidence,\n min_suppression_threshold=self.min_suppression_threshold,\n )", "def initialize_options(self):\n self.proto_path = \"oef-core-protocol\"", "def from_internal_dict(cls, params):\n options = cls({}) # basic default options\n opt_dict = options.__dict__\n\n for key, val in opt_dict.items():\n options.__dict__[key] = params.get(key, val)\n\n return options", "def loads(text):\n values = {}\n for line in text.splitlines():\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n name, value = line.split(\"=\", 1)\n values[name] = value\n return Options(options_values=values)", "def tcp_pkt_parse_options(options: bytes)->dict:\n i = 0\n options_dict = dict()\n opts_rev_mapping = enum_value_to_enum(TCPOptions)\n while i < len(options):\n prefix = options[i]\n i += 1\n if prefix == dpkt.tcp.TCP_OPT_EOL or prefix == dpkt.tcp.TCP_OPT_NOP:\n options_dict[opts_rev_mapping[prefix]] = (0, None)\n else:\n if i < len(options):\n opt_len = options[i]\n i += 1\n if prefix in opts_rev_mapping.keys():\n options_dict[opts_rev_mapping[prefix]] = (opt_len, options[i:i + opt_len - 2])\n else:\n options_dict[UNKNOWN_OPTION_PREFIX + str(prefix)] = (opt_len, options[i:i + opt_len - 2])\n i += opt_len - 2\n else:\n break\n return options_dict", "def from_caffe_solver_protoxt(cls, caffe_solver_prototxt_file: Path):\n solver_param = caffe_pb2.SolverParameter()\n with open(caffe_solver_prototxt_file, 'rt') as f:\n pb2.text_format.Merge(f.read(), solver_param)\n dictionary = {'lr_policy': solver_param.lr_policy,\n 'base_lr': solver_param.base_lr,\n 'gamma': solver_param.gamma,\n 'momentum': solver_param.momentum,\n 'max_iter': solver_param.max_iter,\n 'stepsize': solver_param.stepsize,\n 'stepvalues': solver_param.stepvalue,\n 'weight_decay': solver_param.weight_decay,\n 'iter_size': solver_param.iter_size,\n 'from_prototxt': caffe_solver_prototxt_file}\n return cls(**dictionary)", "def _create_options(self):\n self._OPTIONS = {}", "def from_json(cls, value: str, options: Set[Option] = None):\n doc_raw: dict = json.loads(value)\n return cls.deserialize(doc_raw, options)", "def deconstruct(self):\n name, path, args, kwargs = super(SimpleBitOptionsField,\n self).deconstruct()\n if kwargs['default'] == self.options.maximum_value:\n del kwargs['default']\n kwargs['options'] = self.options.flags\n return name, path, args, kwargs", "def __init__(self):\n super(t_var_size_Options, self).__init__()\n self.options = {\n t_var_size_Options.BOARD_ID : {'value' : '', 'name' : 'board_id' },\n t_var_size_Options.CURRENT_STATE : {'value' : '', 'name' : 'state' },\n t_var_size_Options.PATTERN_WAVE : {'value' : '', 'name' : 'pat_wav' }\n }", "def decode_option(as_bytes: typing.List[int], inner_cl_type: CLType):\n is_defined, rem_bytes = bool(as_bytes[0]), as_bytes[1:]\n\n return decode(inner_cl_type, rem_bytes) if is_defined else None", "def from_dict(cls, dikt) -> 'EavDataAttributeOptionInterface':\n return deserialize_model(dikt, cls)", "def from_json(cls, json_text: Text):\n options = SampleOptions(**json.loads(json_text))\n # JSON parsing produces lists rather than tuples for JSON arrays. Convert\n # these elements into tuples for immutability\n for k, v in options._asdict().items():\n if isinstance(v, list):\n options = options._replace(**{k: tuple(v)})\n return options", "def clean_and_validate_options(self):\n options = self.options\n\n id = options.get('id', None)\n assert(isinstance(id, str) or id is None)\n options['id'] = id\n\n name = options.get('name', None)\n assert(isinstance(name, str) or name is None)\n options['name'] = name\n\n version = options.get('version', None)\n assert(isinstance(version, str) or version is None)\n options['version'] = version", "def options(self, **kwds):\n opts = dict(self.opts)\n for k in kwds:\n try:\n # Ensure that the key exists because we want to change\n # existing options, not add new ones.\n _ = opts[k]\n except KeyError:\n raise ValueError(\"invalid option {!r}\".format(k))\n opts[k] = kwds[k]\n return type(self)(self.cls, opts, self.kwargs)", "def pacbio_option_from_dict(d):\n # This should probably be pushed into pbcommand/pb_io/* for consistency\n # Extensions are supported by adding a dispatch method by looking for\n # required key(s) in the dict.\n if \"choices\" in d and d.get('choices') is not None:\n # the None check is for the TCs that are non-choice based models, but\n # were written with \"choices\" key\n return _pacbio_choice_option_from_dict(d)\n else:\n return _pacbio_option_from_dict(d)", "def parse_options(self, options):\n pass", "def optionxform(self, optionstr):\r\n return optionstr", "def optionxform(self, optionstr):\r\n return optionstr", "def __init__(self, optv):\n self.__p4optv = optv\n # Treat '-g' like '-G' except the marshal'ed Python dicts\n # will be unmarshal'ed.\n if '-g' in self.__p4optv:\n self.__p4optv[self.__p4optv.index('-g')] = '-G'\n self.__unmarshal = 1\n else:\n self.__unmarshal = 0\n # Drop '-s'. 'p4' implements this on the client side and so\n # should 'px' (XXX though it does not yet), so the option should\n # not be passed to the server.\n if '-s' in self.__p4optv:\n self.__p4optv.remove('-s')\n log.warn(\"dropping '-s' option, px cannot yet handle it\")\n _ListCmd.__init__(self)", "def FromProto(cls, proto_obj):\n source = GameSource()\n source.type = proto_obj.type\n if proto_obj.update_time_utc_str:\n source.update_date_time = datetime.strptime(\n proto_obj.update_time_utc_str, tweets.DATE_PARSE_FMT_STR)\n else:\n source.update_date_time = datetime.now()\n if proto_obj.twitter_account:\n source.account_id = long(proto_obj.twitter_account.id_str)\n source.tweet_text = proto_obj.tweet_text\n if proto_obj.score_reporter_url:\n source.score_reporter_url = proto_obj.score_reporter_url\n if not (source.account_id or source.score_reporter_url):\n raise GameModelError('Converting GameSource from malformed proto')\n return source", "def set_options(self, packet, field, value):\n base, option = field.split(\"-\")\n assert base == \"options\", \"Must use an options field with set_options\"\n\n option_type = self.option_str_to_int(option)\n if type(value) == str:\n # Prepare the value for storage in the packet\n value = binascii.unhexlify(value)\n\n # Scapy requires these options to be a tuple - since evaling this\n # is not yet supported, for now, SAck will always be an empty tuple\n if option in [\"sack\"]:\n value = ()\n # These options must be set as integers - if they didn't exist, they can\n # be added like this\n if option in [\"timestamp\", \"mss\", \"wscale\", \"altchksum\", \"uto\"] and not value:\n value = 0\n i = 0\n # First, check if the option is already present in the packet\n for option in self.layer.options:\n # Scapy may try to be helpful and return the string of the option\n next_option = self.option_str_to_int(option[0])\n\n if option_type == next_option:\n packet[\"TCP\"].options[i] = self.format_option(option_type, value)\n break\n i += 1\n # If we didn't break, the option doesn't exist in the packet currently.\n else:\n old_options_array = packet[\"TCP\"].options\n old_options_array.append(self.format_option(option_type, value))\n packet[\"TCP\"].options = old_options_array\n\n # Let scapy recalculate the required values\n del self.layer.chksum\n del self.layer.dataofs\n if packet.haslayer(\"IP\"):\n del packet[\"IP\"].chksum\n del packet[\"IP\"].len\n return True", "def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n type=DataSourceProto.BATCH_SNOWFLAKE,\n field_mapping=self.field_mapping,\n snowflake_options=self.snowflake_options.to_proto(),\n )\n\n data_source_proto.event_timestamp_column = self.event_timestamp_column\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto", "def to_pb2(self) -> _BaseOptionsProto:\n if self.model_asset_path is not None:\n full_path = os.path.abspath(self.model_asset_path)\n else:\n full_path = None\n\n return _BaseOptionsProto(\n model_asset=_ExternalFileProto(\n file_name=full_path, file_content=self.model_asset_buffer))", "def __init__(self, _id, _name, _value, _default_value=None, _option_type=None):\n self.type = 'option'\n self.id = _id\n self.name = _name.strip()\n self.value = _value.strip()\n self.defaultValue = _default_value\n self.optionType = _option_type", "def load_engine_options_simple(engine_options_file: IO[AnyStr]) -> engine.ConfigMapping:\n options: engine.ConfigMapping = {}\n name_counts: Dict[str, int] = {}\n for line_in_file in engine_options_file.read().splitlines():\n line = typing.cast(str, line_in_file.strip())\n if not line or line.startswith('#'):\n continue\n line = line.split('#')[0].rstrip()\n if '=' not in line:\n raise ValueError(f'Missing \\'=\\' on line \\'{line_in_file}\\'')\n if line.startswith('='):\n raise ValueError(f'Missing option name on line \\'{line_in_file}\\'')\n (name, value) = line.split('=', maxsplit=1)\n if name in options:\n raise ValueError(f'Duplicate engine option \\'{name}\\'')\n lower_case_value = value.lower()\n if lower_case_value in ('true', 'false'):\n value = lower_case_value == 'true'\n else:\n try:\n value = int(value)\n except ValueError:\n pass\n options[name] = value\n # Count duplicates\n if name not in name_counts:\n name_counts[name] = 0\n name_counts[name] = name_counts[name] + 1\n _raise_if_duplicates(name_counts)\n return options", "def options():\n mappings = {key: key for key in Schema.option_columns}\n return Schema(mappings)", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(gfsk_mod.__init__,\n ('self',), options)\n extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)", "def create_options(options_hash=None):\r\n opts = options_hash or {}\r\n if not isinstance(opts, dict):\r\n raise ValueError('The given options_hash must be a dict, got: %s' % options_hash)\r\n\r\n class Options(object):\r\n def __init__(self):\r\n self.__dict__ = opts\r\n return Options()", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(dqpsk_mod.__init__,\n ('self',), options)", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(\n dqpsk_demod.__init__, ('self',), options)", "def __init__(self, s=None, unchecked=False):\r\n # if not Options.defaults: # this is different from self.defaults!!!\r\n # Options.defaults = fmin([],[])\r\n if s is None:\r\n super(Options, self).__init__(Options.defaults())\r\n # self = Options.defaults()\r\n elif type(s) is str:\r\n super(Options, self).__init__(Options().match(s))\r\n # we could return here\r\n else:\r\n super(Options, self).__init__(s)\r\n\r\n if not unchecked:\r\n for key in list(self.keys()):\r\n if key not in Options.defaults():\r\n print('Warning in cma.Options.__init__(): invalid key ``' + str(key) + '`` popped')\r\n self.pop(key)\r\n # self.evaluated = False # would become an option entry\r", "def config(\n self,\n key: Optional[str] = None,\n value: Optional[Any] = None,\n *,\n map: Optional[Dict[str, \"OptionalPrimitiveType\"]] = None,\n ) -> \"SparkSession.Builder\":\n with self._lock:\n if map is not None:\n for k, v in map.items():\n self._options[k] = to_str(v)\n else:\n self._options[cast(str, key)] = to_str(value)\n return self", "def set_options(self, options):\n self.options = options", "def initialize_petsc_options():\n petsc_options = PETSc.Options()\n petsc_options.clear()\n for k in petsc_options.getAll(): petsc_options.delValue(k)\n petsc_options.setValue('ksp_type', 'fgmres')\n petsc_options.setValue('ksp_rtol', 1.0e-8)\n petsc_options.setValue('ksp_atol', 1.0e-8)\n petsc_options.setValue('ksp_gmres_restart', 300)\n petsc_options.setValue('ksp_gmres_modifiedgramschmidt', 1)\n petsc_options.setValue('ksp_pc_side','right')\n petsc_options.setValue('pc_type', 'fieldsplit')\n petsc_options.setValue('pc_fieldsplit_type', 'schur')\n petsc_options.setValue('pc_fieldsplit_schur_fact_type', 'upper')\n petsc_options.setValue('pc_fieldsplit_schur_precondition', 'user')\n # Velocity block options\n petsc_options.setValue('fieldsplit_velocity_ksp_type', 'gmres')\n petsc_options.setValue('fieldsplit_velocity_ksp_gmres_modifiedgramschmidt', 1)\n petsc_options.setValue('fieldsplit_velocity_ksp_atol', 1e-5)\n petsc_options.setValue('fieldsplit_velocity_ksp_rtol', 1e-5)\n petsc_options.setValue('fieldsplit_velocity_ksp_pc_side', 'right')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_u_ksp_type', 'preonly')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_u_pc_type', 'hypre')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_u_pc_hypre_type', 'boomeramg')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_u_pc_hypre_boomeramg_coarsen_type', 'HMIS')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_v_ksp_type', 'preonly')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_v_pc_type', 'hypre')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_v_pc_hypre_type', 'boomeramg')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_v_pc_hypre_boomeramg_coarsen_type', 'HMIS')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_w_ksp_type', 'preonly')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_w_pc_type', 'hypre')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_w_pc_hypre_type', 'boomeramg')\n petsc_options.setValue('fieldsplit_velocity_fieldsplit_w_pc_hypre_boomeramg_coarsen_type', 'HMIS')\n #PCD Schur Complement options\n petsc_options.setValue('fieldsplit_pressure_ksp_type', 'preonly')\n petsc_options.setValue('innerTPPCDsolver_Qp_visc_ksp_type', 'preonly')\n petsc_options.setValue('innerTPPCDsolver_Qp_visc_pc_type', 'lu')\n petsc_options.setValue('innerTPPCDsolver_Qp_visc_pc_factor_mat_solver_type', 'superlu_dist')\n petsc_options.setValue('innerTPPCDsolver_Qp_dens_ksp_type', 'preonly')\n petsc_options.setValue('innerTPPCDsolver_Qp_dens_pc_type', 'lu')\n petsc_options.setValue('innerTPPCDsolver_Qp_dens_pc_factor_mat_solver_type', 'superlu_dist')\n petsc_options.setValue('innerTPPCDsolver_Ap_rho_ksp_type', 'richardson')\n petsc_options.setValue('innerTPPCDsolver_Ap_rho_ksp_max_it', 1)\n #petsc_options.setValue('innerTPPCDsolver_Ap_rho_ksp_constant_null_space',1)\n petsc_options.setValue('innerTPPCDsolver_Ap_rho_pc_type', 'hypre')\n petsc_options.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_type', 'boomeramg')\n petsc_options.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_boomeramg_strong_threshold', 0.5)\n petsc_options.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_boomeramg_interp_type', 'ext+i-cc')\n petsc_options.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_boomeramg_coarsen_type', 'HMIS')\n petsc_options.setValue('innerTPPCDsolver_Ap_rho_pc_hypre_boomeramg_agg_nl', 2)\n return petsc_options", "def test_protoDefaults(self):\n options = Options()\n options.parseOptions([])\n\n self.assertEqual(len(options[\"pop3\"]), 1)\n self.assertIsInstance(options[\"pop3\"][0], endpoints.TCP4ServerEndpoint)\n\n self.assertEqual(len(options[\"smtp\"]), 1)\n self.assertIsInstance(options[\"smtp\"][0], endpoints.TCP4ServerEndpoint)", "def initFromOptions(cls, ns, name=None):\n instance = cls(ns)\n if name is not None:\n instance.name = name\n instance.preprocessOptions()\n return cls._runloop(instance)", "def from_pb(cls, instance_pb, client):\n match = _INSTANCE_NAME_RE.match(instance_pb.name)\n if match is None:\n raise ValueError('Instance protobuf name was not in the '\n 'expected format.', instance_pb.name)\n if match.group('project') != client.project:\n raise ValueError('Project ID on instance does not match the '\n 'project ID on the client')\n instance_id = match.group('instance_id')\n\n result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION_ID)\n result._update_from_pb(instance_pb)\n return result", "def _restore_options(self):\r\n fn = self._opt_file_name()\r\n if os.path.exists(fn):\r\n try:\r\n opt = pickle.load(open(fn, \"r\"))\r\n self.tb_pragma.SetValue(opt[\"topic\"])\r\n self.tb_package.SetValue(opt[\"package\"])\r\n self.tb_file_header.SetValue(opt[\"header\"])\r\n self.tb_target_folder.SetValue(opt[\"target_folder\"])\r\n self.tb_encoding.SetValue(opt[\"encoding\"])\r\n except Exception as ex:\r\n print(\"Error durring restore default options\")\r\n print(ex)", "def selector_from_proto(\n s: schema_pb2.KeyDistributionOptions\n) -> reverb_types.SelectorType:\n if s.fifo:\n return reverb.selectors.Fifo()\n elif s.uniform:\n return reverb.selectors.Uniform()\n elif s.lifo:\n return reverb.selectors.Lifo()\n elif s.WhichOneof('distribution') == 'heap':\n if s.heap.min_heap:\n return reverb.selectors.MinHeap()\n else:\n return reverb.selectors.MaxHeap()\n elif s.WhichOneof('distribution') == 'prioritized':\n return reverb.selectors.Prioritized(\n s.prioritized.priority_exponent)\n else:\n simple_booleans_options = ('fifo', 'lifo', 'uniform')\n if s.WhichOneof('distribution') in simple_booleans_options:\n raise ValueError(f'distribution={s.WhichOneof(\"distribution\")}'\n ' but the associated boolean value is false.')\n else:\n raise NotImplementedError(\n f'distribution={s.WhichOneof(\"distribution\")}')", "def modify_options(parser, is_train):\n return parser", "def _GetOptionDecoderConstructions(cls):\n result = super()._GetOptionDecoderConstructions()\n result.update({\n 'service_type': (\n option_decoders.EnumDecoder,\n {\n 'valid_values': [\n DEFAULT_SPANNER_TYPE,\n ],\n 'default': DEFAULT_SPANNER_TYPE\n }),\n 'name': (option_decoders.StringDecoder, _NONE_OK),\n 'database': (option_decoders.StringDecoder, _NONE_OK),\n 'description': (option_decoders.StringDecoder, _NONE_OK),\n 'ddl': (option_decoders.StringDecoder, _NONE_OK),\n 'config': (option_decoders.StringDecoder, _NONE_OK),\n 'nodes': (option_decoders.IntDecoder, _NONE_OK),\n 'project': (option_decoders.StringDecoder, _NONE_OK),\n })\n return result", "def from_configuration(cls, config, options):\n return cls(**options)", "def FromWireFormat(cls, value):\n return _GetFactory(cls).FromWireFormat(value)", "def construct_option(arg, hooks, parser):\n option_args = construct_args(arg, hooks, parser, add_descriptions=False)\n if len(arg.option_strings) > 1:\n name = [str(name) for name in arg.option_strings]\n else:\n name = arg.option_strings[0]\n\n option = {\n \"name\": name,\n **get_base_suggestion(arg)\n }\n\n if hasattr(arg, \"action\") and arg.action in REPEATABLE_ACTIONS:\n option[\"isRepeatable\"] = True\n if option_args:\n option[\"args\"] = option_args\n if arg.required:\n option[\"isRequired\"] = True\n\n option_hook = hooks.get(\"option\")\n if option_hook:\n option_hook(option, parser)\n\n return option", "def parse_option_custom_entry( # noqa: PLR0912\n entry: Custom,\n options: FavaOptions,\n) -> None:\n key = entry.values[0].value.replace(\"-\", \"_\")\n if key not in All_OPTS:\n raise ValueError(f\"unknown option `{key}`\")\n\n if key == \"default_file\":\n options.default_file = entry.meta[\"filename\"]\n return\n\n value = entry.values[1].value\n if not isinstance(value, str):\n raise TypeError(f\"expected string value for option `{key}`\")\n\n if key == \"insert_entry\":\n try:\n pattern = re.compile(value)\n except re.error as err:\n raise TypeError(\n f\"Should be a regular expression: '{value}'.\",\n ) from err\n opt = InsertEntryOption(\n entry.date,\n pattern,\n entry.meta[\"filename\"],\n entry.meta[\"lineno\"],\n )\n options.insert_entry.append(opt)\n elif key == \"collapse_pattern\":\n try:\n pattern = re.compile(value)\n except re.error as err:\n raise TypeError(\n f\"Should be a regular expression: '{value}'.\",\n ) from err\n options.collapse_pattern.append(pattern)\n elif key == \"locale\":\n try:\n Locale.parse(value)\n options.locale = value\n except UnknownLocaleError as err:\n raise ValueError(f\"Unknown locale: '{value}'.\") from err\n elif key == \"fiscal_year_end\":\n fye = parse_fye_string(value)\n if fye is None:\n raise ValueError(\"Invalid 'fiscal_year_end' option.\")\n options.fiscal_year_end = fye\n elif key in STR_OPTS:\n setattr(options, key, value)\n elif key in BOOL_OPTS:\n setattr(options, key, value.lower() == \"true\")\n elif key in INT_OPTS:\n setattr(options, key, int(value))\n else: # key in TUPLE_OPTS\n setattr(options, key, tuple(value.strip().split(\" \")))", "def define_options(self):\n return {\n 'basename': OptionDef(required=True, default_value='keycloak', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='default', allowed_types=[str]),\n 'config': {\n 'service_port': OptionDef(required=True, default_value=8080, allowed_types=[int]),\n 'realm_import': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, bytes, KData_Secret]),\n 'proxy_address_forwarding': OptionDef(format=OptionDefFormat.KDATA_ENV,\n allowed_types=[bool, *KDataHelper_Env.allowed_kdata()]),\n 'frontend_url': OptionDef(allowed_types=[str]),\n 'admin': {\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n 'db': {\n 'vendor': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'addr': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'port': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[int, *KDataHelper_Env.allowed_kdata()]),\n 'database': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'schema': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n },\n 'container': {\n 'keycloak': OptionDef(required=True, default_value='quay.io/keycloak/keycloak:11.0.2', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'deployment': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def FromPb(pb):\n if isinstance(pb, str):\n real_pb = entity_pb.EntityProto()\n real_pb.ParseFromString(pb)\n pb = real_pb\n\n return Entity._FromPb(pb, require_valid_key=False)", "def __init__(self, **options):\n self.options = options", "def from_protobuf(cls, msg):\n if not isinstance(msg, cls._protobuf_cls):\n raise TypeError(\"Expected message of type \"\n \"%r\" % cls._protobuf_cls.__name__)\n kwargs = {k: getattr(msg, k) for k in cls._get_params()}\n return cls(**kwargs)", "def _restore_salesforce_options_json(self, value):\r\n\r\n if not isinstance(value, dict):\r\n raise SDKException('Subclient', '101')\r\n\r\n self._salesforce_restore_option_json = {\r\n \"instanceType\": 'SALESFORCE',\r\n \"salesforceRestoreOptions\": {\r\n \"restoreToFileSystem\": value.get(\"to_fs\", True),\r\n \"pathToStoreCsv\": value.get(\"staging_path\", '/tmp/'),\r\n \"dependentRestoreLevel\": value.get(\"dependent_level\", 0),\r\n \"isMetadataRestore\": value.get(\"is_metadata\", False),\r\n \"restoreToSalesforce\": value.get(\"to_cloud\", False),\r\n \"restoreFromDatabase\": value.get(\"from_database\", False),\r\n \"overrideTable\": value.get(\"override_table\", True),\r\n \"syncDatabase\": {\r\n \"dbEnabled\": value.get(\"db_enabled\", False),\r\n \"dbType\": value.get(\"db_type\", 'SQLSERVER'),\r\n \"dbHost\": value.get(\"db_host_name\", ''),\r\n \"dbPort\": value.get(\"db_port\", '1433'),\r\n \"dbName\": value.get(\"db_name\", ''),\r\n \"dbInstance\": value.get(\"db_instance\", ''),\r\n \"dbUserPassword\": {\r\n \"userName\": value.get(\"db_user_name\", ''),\r\n \"password\": value.get(\"db_user_password\", '')\r\n },\r\n\r\n }\r\n\r\n }\r\n }", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def initialize_options(self):", "def __init__(self, value: str):\n self.options = [\n \"v1.0\"\n ]", "def from_options(**kwargs):\n return Molecule(Molecule.default_options().set_values(kwargs))", "def from_opt(cls, corpora, transforms, vocabs, opt, task, copy, stride=1, offset=0):\n corpora_info = {}\n batch_size = (\n opt.valid_batch_size if (task == CorpusTask.VALID) else opt.batch_size\n )\n if task != CorpusTask.INFER:\n if opt.batch_size_multiple is not None:\n batch_size_multiple = opt.batch_size_multiple\n else:\n batch_size_multiple = 8 if opt.model_dtype == \"fp16\" else 1\n corpora_info = opt.data\n bucket_size = opt.bucket_size\n bucket_size_init = opt.bucket_size_init\n bucket_size_increment = opt.bucket_size_increment\n skip_empty_level = opt.skip_empty_level\n else:\n batch_size_multiple = 1\n corpora_info[CorpusTask.INFER] = {\"transforms\": opt.transforms}\n corpora_info[CorpusTask.INFER][\"weight\"] = 1\n # bucket_size = batch_size\n bucket_size = 16384\n bucket_size_init = -1\n bucket_size_increment = 0\n skip_empty_level = \"warning\"\n return cls(\n corpora,\n corpora_info,\n transforms,\n vocabs,\n task,\n opt.batch_type,\n batch_size,\n batch_size_multiple,\n data_type=opt.data_type,\n bucket_size=bucket_size,\n bucket_size_init=bucket_size_init,\n bucket_size_increment=bucket_size_increment,\n copy=copy,\n skip_empty_level=skip_empty_level,\n stride=stride,\n offset=offset,\n )", "def initialize_options(self):\n pass", "def _ParseStringOption(cls, options, argument_name, default_value=None):\n argument_value = getattr(options, argument_name, None)\n if not argument_value:\n return default_value\n\n if isinstance(argument_value, py2to3.BYTES_TYPE):\n encoding = sys.stdin.encoding\n\n # Note that sys.stdin.encoding can be None.\n if not encoding:\n encoding = locale.getpreferredencoding()\n if not encoding:\n encoding = cls._PREFERRED_ENCODING\n\n try:\n argument_value = argument_value.decode(encoding)\n except UnicodeDecodeError as exception:\n raise errors.BadConfigOption((\n u'Unable to convert option: {0:s} to Unicode with error: '\n u'{1:s}.').format(argument_name, exception))\n\n elif not isinstance(argument_value, py2to3.UNICODE_TYPE):\n raise errors.BadConfigOption(\n u'Unsupported option: {0:s} string type required.'.format(\n argument_name))\n\n return argument_value", "def from_bytes(buf: bytes) -> 'ProposalInfo':\n proposal_info_in_dict: dict = json_loads(buf.decode())\n proposal_info_in_dict[\"id\"] = bytes.fromhex(proposal_info_in_dict[\"id\"])\n proposal_info_in_dict[\"proposer\"] = Address.from_string(proposal_info_in_dict[\"proposer\"])\n return ProposalInfo(**proposal_info_in_dict)", "def load_options():\n try:\n with open(config, \"rU\") as f:\n options = serializer.load(f)\n check(options)\n if options[\"version\"] < version:\n options[\"version\"] = version.int\n options = get_config(options)\n save_options(options)\n except IOError:\n options = get_config()\n save_options(options)\n except Exception:\n print (\"Options could not be loaded:\")\n import traceback\n traceback.print_exc()\n options = get_config()\n save_options(options)\n else:\n o_o = options\n options = get_config(options)\n if o_o != options:\n save_options(options)\n del(o_o)\n globals()[\"clientoptions\"] = options\n return options", "def from_pb(cls, cell_pb):\n timestamp = _datetime_from_microseconds(cell_pb.timestamp_micros)\n if cell_pb.labels:\n return cls(cell_pb.value, timestamp, labels=cell_pb.labels)\n else:\n return cls(cell_pb.value, timestamp)", "def __init__(self, options, is_training=False):\n super(MessagePassing, self).__init__(options, is_training)\n\n if not isinstance(options, graph_network_pb2.MessagePassing):\n raise ValueError('Options has to be an MessagePassing proto.')\n\n self.use_reverse_edges = options.use_reverse_edges\n self.add_bi_directional_edges = options.add_bi_directional_edges\n self.add_self_loop_edges = True", "def get_options():\n user_options = {}\n user_options['surface'] = {'label': 'Surface',\n 'type': 'stringList',\n 'default': 'bcc100',\n 'values': surface_selections}\n\n user_options['metal'] = {'label': 'Metal',\n 'type': 'string',\n 'default': 'Au'}\n\n user_options['a'] = {'label': 'Lattice Constant',\n 'type': 'float',\n 'precision': 3,\n 'suffix': 'Å'}\n\n user_options['size-x'] = {'label': 'Size X',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-y'] = {'label': 'Size Y',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-z'] = {'label': 'Size Z',\n 'type': 'integer',\n 'default': 3}\n\n user_options['vacuum'] = {'label': 'Vacuum distance',\n 'type': 'float',\n 'precision': 1,\n 'suffix': 'Å'}\n\n user_options['orthogonal'] = {'label': 'Orthogonal',\n 'type': 'stringList',\n 'default': 'True',\n 'values': ['True', 'False']}\n\n return {'userOptions': user_options }", "def from_legacy(cls, name=\"default\"):\n with open(CONFIG_DIR / \"user.nsc\") as f:\n data = json.load(f)\n\n c = cls(name)\n values = list(data.values())\n order = (3, 4, 2, 0, 1, 5)\n\n for i, k in enumerate(c.data.keys()):\n v = values[order[i]]\n\n if k == \"starting_version\":\n if v != \"CALVER\" and not VERSION_PATTERN.match(v):\n v = (\n input(f\"🎤 Starting version [0.1.0]: \").strip()\n or \"0.1.0\"\n )\n v = c._validate_option(k, v)\n\n elif k == \"preferred_license\":\n v = c._resolve_license(v)\n if not v:\n v = (\n input(f\"🎤 Preferred license [unlicense]: \").strip()\n or \"unlicense\"\n )\n v = c._validate_option(k, v)\n\n c.data[k] = v\n\n return c", "def SoapOptions(self) -> SoapOption:", "def __init__(self, options_file: str,\n use_character_inputs=True,\n embedding_weight_file=None,\n max_batch_size=128, *args, **kwargs):\n super().__init__(*args, **kwargs)\n with open(options_file, 'r') as fin:\n options = json.load(fin)\n\n if not use_character_inputs:\n if embedding_weight_file is None:\n raise ValueError(\n \"embedding_weight_file is required input with \"\n \"not use_character_inputs\"\n )\n self._options = options\n self._embedding_weight_file = embedding_weight_file\n self._use_character_inputs = use_character_inputs\n self._max_batch_size = max_batch_size\n\n self._ops = {}\n self.lm_graph = BidirectionalLanguageModelGraph(self._options, name='bilm',\n embedding_weight_file=self._embedding_weight_file,\n use_character_inputs=self._use_character_inputs,\n max_batch_size=self._max_batch_size)", "def from_proto(cls, feature_set_proto: FeatureSetProto):\n\n feature_set = cls(\n name=feature_set_proto.spec.name,\n features=[\n Feature.from_proto(feature)\n for feature in feature_set_proto.spec.features\n ],\n entities=[\n Entity.from_proto(entity) for entity in feature_set_proto.spec.entities\n ],\n max_age=(\n None\n if feature_set_proto.spec.max_age.seconds == 0\n and feature_set_proto.spec.max_age.nanos == 0\n else feature_set_proto.spec.max_age\n ),\n labels=feature_set_proto.spec.labels,\n source=(\n None\n if feature_set_proto.spec.source.type == 0\n else Source.from_proto(feature_set_proto.spec.source)\n ),\n project=None\n if len(feature_set_proto.spec.project) == 0\n else feature_set_proto.spec.project,\n )\n feature_set._status = feature_set_proto.meta.status # type: ignore\n feature_set._created_timestamp = feature_set_proto.meta.created_timestamp\n return feature_set", "def __init__(self, value: str):\n self.options = [\n \"K\"\n ]", "def translate_options(cls, options):\n used_options = cls.default_options()\n if options is None:\n options = {}\n for key, val in options.items():\n if key not in used_options:\n raise KeyError(f\"Cannot handle key {key}.\")\n used_options[key] = val\n return used_options", "def parse_options(parser):\n TensorflowModel.parse_options(parser)\n parser.add_argument('--input-dim', type=int, default=160)\n parser.add_argument('--input-len', type=int, default=7501)\n parser.add_argument('--output-len', type=int, default=7501)\n parser.add_argument('--conv-layer-num', type=int, default=2)\n parser.add_argument('--conv-kernel-num', type=int, default=1)\n parser.add_argument('--conv-kernel-len', type=int, default=512)", "def __validate_options__(cls, options):\n pass", "def test__ApplicationCommandOptionMetadataNested__copy_with_keyword_parameters__1():\n old_options = [\n ApplicationCommandOption('nue', 'nue', ApplicationCommandOptionType.string),\n ApplicationCommandOption('seija', 'seija', ApplicationCommandOptionType.integer),\n ]\n \n new_options = [\n ApplicationCommandOption('aya', 'ayaya', ApplicationCommandOptionType.float),\n ApplicationCommandOption('momiji', 'awoo', ApplicationCommandOptionType.user),\n ]\n \n option_metadata = ApplicationCommandOptionMetadataNested(\n options = old_options,\n )\n \n copy = option_metadata.copy_with_keyword_parameters({\n 'options': new_options,\n })\n \n _asert_fields_set(copy)\n vampytest.assert_is_not(option_metadata, copy)\n \n vampytest.assert_eq(copy.options, tuple(new_options))", "def update_option(**options):\n api_options = default_options\n\n for k,v in option.iteritems():\n if k in default_options:\n api_options[k] = v\n\n return api_options", "def test_creation(self):\n \n from pystarlab.starlab import Option\n opt = Option(parameter=\"n\",\n long_name=\"specify number of particles\",\n is_required=True,\n default_value=None)\n \n self.assertIsInstance(opt, Option)\n self.assertEquals(opt.parameter, \"n\")\n self.assertTrue(opt.is_required)\n self.assertEquals(opt.long_name, \"specify number of particles\")\n self.assertIsNone(opt.default_value)\n self.assertIsNone(opt.value)", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def _pacbio_choice_option_from_dict(d):\n choices = d['choices']\n default_value = d['default']\n # this will immediately raise\n option_type_id = TaskOptionTypes.from_choice_str(d['optionTypeId'])\n\n opt_id = d['id']\n name = d['name']\n desc = to_utf8(d['description'])\n\n klass_map = {TaskOptionTypes.CHOICE_STR: PacBioStringChoiceOption,\n TaskOptionTypes.CHOICE_FLOAT: PacBioFloatChoiceOption,\n TaskOptionTypes.CHOICE_INT: PacBioIntChoiceOption}\n\n k = klass_map[option_type_id]\n\n # Sanitize Unicode hack\n if k is PacBioStringChoiceOption:\n default_value = to_ascii(default_value)\n choices = [to_ascii(i) for i in choices]\n\n opt = k(opt_id, name, default_value, desc, choices)\n\n return opt", "def read_options(config, options):\n inference_options = config.get(\"inference_options\")\n if inference_options is None:\n logger.warning(\n \"This model does not accept inference options, \"\n \"but the request includes the following option(s): %s\",\n \", \".join(options.keys()),\n )\n return {}\n try:\n jsonschema.validate(options, inference_options[\"json_schema\"])\n except jsonschema.ValidationError as e:\n raise ValueError(\"Options validation error: %s\" % e.message)\n v2_config = is_v2_config(config)\n operators_options = collections.defaultdict(dict)\n config_override = {}\n for mapping in inference_options[\"options\"]:\n try:\n option_value = index_config(options, mapping[\"option_path\"])\n except ValueError:\n continue # Option not passed for this request.\n config_path = mapping[\"config_path\"]\n if isinstance(config_path, str):\n config_path = [config_path]\n if v2_config:\n for cp in config_path:\n dst_config, dst_key = index_config(config, cp, index_structure=False)\n operators_options[dst_config[\"name\"]].update({dst_key: option_value})\n else:\n for cp in config_path:\n merge_config(\n config_override,\n build_override(config, cp, option_value),\n )\n if v2_config:\n return operators_options\n return config_override", "def initialize(self, options):", "def __init__(self, value: str):\n self.options = [\n \"s\",\n ]", "def set_options(self, options):\n self.options = options", "def __init__(self, **options):\n self.__dict__.update(\n (k, v) for (k, v) in options.items() if not k.startswith('__'))", "def initialize_options(self):\n pass", "def initialize_options(self):\n pass", "def _GetOptionDecoderConstructions(cls):\n result = super()._GetOptionDecoderConstructions()\n result.update({\n 'enable_freeze_restore': (option_decoders.BooleanDecoder, {\n 'default': False,\n 'none_ok': True\n }),\n 'delete_on_freeze_error': (option_decoders.BooleanDecoder, {\n 'default': False,\n 'none_ok': True\n }),\n 'create_on_restore_error': (option_decoders.BooleanDecoder, {\n 'default': False,\n 'none_ok': True\n }),\n })\n return result", "def set(cls,options):\n cls.instance = Options(options)", "def from_dict(doc: Dict, validate: Optional[bool] = True) -> Select:\n if validate:\n util.validate_doc(\n doc,\n mandatory=pd.MANDATORY + ['values'],\n optional=pd.OPTIONAL,\n exception=err.InvalidParameterError\n )\n for val in doc['values']:\n util.validate_doc(\n val,\n mandatory=['name', 'value'],\n optional=['isDefault'],\n exception=err.InvalidParameterError\n )\n if doc[pd.TYPE] != PARA_SELECT:\n raise ValueError(\"invalid type '{}'\".format(doc[pd.TYPE]))\n return Select(\n name=doc[pd.NAME],\n index=doc[pd.INDEX],\n label=doc.get(pd.LABEL),\n help=doc.get(pd.HELP),\n default=doc.get(pd.DEFAULT),\n required=doc[pd.REQUIRED],\n group=doc.get(pd.GROUP),\n values=doc['values']\n )", "def __init__(\n self,\n *,\n name: str,\n label: str,\n options: Union[List[Option], List[OptionGroup]],\n optional: bool = False,\n value: Union[Option, str] = None,\n placeholder: str = None,\n ):\n super().__init__(\n name=name,\n label=label,\n optional=optional,\n value=value,\n placeholder=placeholder,\n )\n self.options = options", "def options(self, a: str) -> typing.Any:", "def __init__(self, options=None):\n if options is None:\n options = {}\n # do we need any options for import behavior, such as 'create references', 'use PK', 'resolve DRS', etc?\n defaults = {}\n self.options = {**defaults, **options}", "def create(self, **kwargs):\n return OptionValue.objects.create(name=kwargs['name'], option=kwargs['option'])", "def FromProto(cls, proto_obj):\n if not proto_obj.last_update_source:\n raise GameModelError('No update source specified in Game creation.')\n # TODO(P2): refactor all constructors into one base function like in tweets.\n return Game(id_str=proto_obj.id_str,\n teams=[Team.FromProto(tm) for tm in proto_obj.teams],\n scores=proto_obj.scores,\n name=proto_obj.name,\n tournament_id=proto_obj.tournament_id_str,\n tournament_name=proto_obj.tournament_name,\n game_status=proto_obj.game_status,\n division=proto_obj.division,\n league=proto_obj.league,\n age_bracket=proto_obj.age_bracket,\n sources=[GameSource.FromProto(proto_obj.last_update_source)],\n key=game_key(proto_obj))", "def fromKVForm(cls, kvform_string):\n return cls.fromOpenIDArgs(kvform.kvToDict(kvform_string))" ]
[ "0.72627205", "0.67112297", "0.6254082", "0.5537946", "0.5431298", "0.54273605", "0.5401342", "0.53541476", "0.53435814", "0.5294465", "0.523745", "0.5237094", "0.5193706", "0.5100491", "0.5098164", "0.50886667", "0.50617826", "0.5013392", "0.50065786", "0.49748728", "0.49595988", "0.4942827", "0.4937329", "0.4932765", "0.49071205", "0.49011543", "0.49011543", "0.4896274", "0.48960942", "0.4892244", "0.4888161", "0.48796466", "0.48736262", "0.48628634", "0.48576662", "0.48504373", "0.484726", "0.4798589", "0.47946504", "0.47707096", "0.47511122", "0.4740537", "0.472582", "0.47055548", "0.47007418", "0.46853575", "0.46649745", "0.46623152", "0.4659261", "0.465896", "0.46547642", "0.4644416", "0.46409965", "0.4627808", "0.4619819", "0.4614636", "0.46128112", "0.46057603", "0.45813352", "0.4578943", "0.4546953", "0.45468277", "0.45369312", "0.45359182", "0.45281377", "0.4526041", "0.4515546", "0.45151162", "0.45142347", "0.45104256", "0.45083037", "0.45041373", "0.45014977", "0.44962385", "0.44942418", "0.44826624", "0.4481116", "0.44801712", "0.44767532", "0.44738734", "0.44713315", "0.44569448", "0.44567937", "0.44474408", "0.44462094", "0.443995", "0.44383106", "0.44235745", "0.4420481", "0.44174397", "0.44174397", "0.4414367", "0.44086504", "0.44070643", "0.43916306", "0.4390926", "0.4388396", "0.43870893", "0.43864807", "0.4379427" ]
0.8055073
0
Converts an SnowflakeOptionsProto object to its protobuf representation.
def to_proto(self) -> DataSourceProto.SnowflakeOptions: snowflake_options_proto = DataSourceProto.SnowflakeOptions( database=self.database, schema=self.schema, table=self.table, query=self.query, ) return snowflake_options_proto
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def to_pb2(self) -> _FaceDetectorGraphOptionsProto:\n base_options_proto = self.base_options.to_pb2()\n base_options_proto.use_stream_mode = (\n False if self.running_mode == _RunningMode.IMAGE else True\n )\n return _FaceDetectorGraphOptionsProto(\n base_options=base_options_proto,\n min_detection_confidence=self.min_detection_confidence,\n min_suppression_threshold=self.min_suppression_threshold,\n )", "def to_proto(self):\n prototxt = str()\n opts = self.options('solver')\n for opt in opts:\n val = self.get('solver',opt)\n prototxt += opt + ': ' + val + '\\n'\n return prototxt", "def to_proto(self) -> None:\n\n pass", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def to_pb2(self) -> _BaseOptionsProto:\n if self.model_asset_path is not None:\n full_path = os.path.abspath(self.model_asset_path)\n else:\n full_path = None\n\n return _BaseOptionsProto(\n model_asset=_ExternalFileProto(\n file_name=full_path, file_content=self.model_asset_buffer))", "def from_proto(cls, hive_options_proto: Any):\n\n pass", "def to_python(self, value):\n if value is None:\n return value\n value = super(BitOptionsField, self).to_python(value)\n return BitOptions(self.options.flags, value)", "def _proto_to_string(self, p: google.protobuf.message.Message) -> str:\n return text_format.MessageToString(p, as_one_line=True)", "def setConfigProtoBytes(self, v):\n return self._set(configProtoBytes=v)", "def _stringify_proto(obj):\n return obj.SerializeToString()", "def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n type=DataSourceProto.BATCH_SNOWFLAKE,\n field_mapping=self.field_mapping,\n snowflake_options=self.snowflake_options.to_proto(),\n )\n\n data_source_proto.event_timestamp_column = self.event_timestamp_column\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto", "def initialize_options(self):\n self.proto_path = \"oef-core-protocol\"", "def to_proto(self):\n proto = bounding_box_pb2.BoundingBox()\n proto.start.CopyFrom(geom_utils.ToVector3j(self.start))\n proto.size.CopyFrom(geom_utils.ToVector3j(self.size))\n return proto", "def _object2proto(self) -> UpdateGroupMessage_PB:\n return UpdateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def to_proto(self) -> FeatureSetReferenceProto:\n return self.proto", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def toStr(self, protoObj):\n return text_format.MessageToString(protoObj)", "def toStr(self, protoObj):\n return text_format.MessageToString(protoObj)", "def _object2proto(self) -> GetGroupsMessage_PB:\n return GetGroupsMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> DeleteGroupMessage_PB:\n return DeleteGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def test_protoDefaults(self):\n options = Options()\n options.parseOptions([])\n\n self.assertEqual(len(options[\"pop3\"]), 1)\n self.assertIsInstance(options[\"pop3\"][0], endpoints.TCP4ServerEndpoint)\n\n self.assertEqual(len(options[\"smtp\"]), 1)\n self.assertIsInstance(options[\"smtp\"][0], endpoints.TCP4ServerEndpoint)", "def _object2proto(self) -> CreateGroupMessage_PB:\n return CreateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _pacbio_legacy_option_from_dict(d):\n warnings.warn(\n \"This is obsolete and will disappear soon\",\n DeprecationWarning)\n\n opt_id = d['pb_option']['option_id']\n name = d['pb_option']['name']\n default = d['pb_option']['default']\n desc = d['pb_option']['description']\n option_type_id = to_ascii(d['pb_option']['type'])\n\n # Hack to support \"number\"\n if option_type_id == \"number\":\n option_type_id = \"float\"\n\n return __simple_option_by_type(opt_id, name, default, desc, option_type_id)", "def ToProto(self):\n game = scores_messages.Game()\n game.id_str = self.id_str\n game.teams = [team.ToProto() for team in self.teams]\n game.scores = self.scores\n game.name = self.name\n game.tournament_id_str = self.tournament_id\n game.tournament_name = self.tournament_name\n game.game_status = self.game_status\n game.division = self.division\n game.league = self.league\n game.age_bracket = self.age_bracket\n if self.sources:\n game.last_update_source = self.sources[0].ToProto()\n return game", "def to_json(self) -> Text:\n options_dict = copy.copy(self.__dict__)\n options_dict[_TYPE_NAME_KEY] = 'StatsOptions'\n if options_dict['_slice_functions'] is not None:\n raise ValueError(\n 'StatsOptions cannot be converted with experimental_slice_functions.'\n )\n if options_dict['_generators'] is not None:\n raise ValueError(\n 'StatsOptions cannot be converted with generators.'\n )\n if self.schema is not None:\n del options_dict['_schema']\n options_dict[_SCHEMA_JSON_KEY] = json_format.MessageToJson(self.schema)\n if self.slicing_config is not None:\n del options_dict['_slicing_config']\n options_dict[_SLICING_CONFIG_JSON_KEY] = json_format.MessageToJson(\n self.slicing_config)\n if self._per_feature_weight_override is not None:\n del options_dict['_per_feature_weight_override']\n options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY] = {\n k.to_json(): v for k, v in self._per_feature_weight_override.items()\n }\n return json.dumps(options_dict)", "def to_proto(self):\n filename_tensor = array_ops.placeholder(\n shape=[], dtype=dtypes.string, name=\"saver_filename\")\n save_tensor = self._traced_save(filename_tensor)\n restore_op = self._traced_restore(filename_tensor).op\n return saver_pb2.SaverDef(\n filename_tensor_name=filename_tensor.name,\n save_tensor_name=save_tensor.name,\n restore_op_name=restore_op.name,\n version=saver_pb2.SaverDef.V2)", "def _object2proto(self) -> Metadata_PB:\n return Metadata_PB(\n name=self.name, id=serialize(self.id), node=serialize(self.node)\n )", "def _stringify_proto(obj):\n if isinstance(obj, str): return obj\n elif isinstance(obj, Message): return obj.SerializeToString()\n else: raise TypeError('Object can not be serialized as a string.')", "def _object2proto(self) -> UpdateGroupResponse_PB:\n return UpdateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def to_proto(self) -> FeatureSetProto:\n\n meta = FeatureSetMetaProto(\n created_timestamp=self.created_timestamp, status=self.status\n )\n\n spec = FeatureSetSpecProto(\n name=self.name,\n project=self.project,\n max_age=self.max_age,\n labels=self.labels,\n source=self.source.to_proto() if self.source is not None else None,\n features=[\n field.to_proto()\n for field in self._fields.values()\n if type(field) == Feature\n ],\n entities=[\n field.to_proto()\n for field in self._fields.values()\n if type(field) == Entity\n ],\n )\n\n return FeatureSetProto(spec=spec, meta=meta)", "def save_options(options):\n with open(config, \"wt\") as f:\n f.write(serializer.dumps(options, indent = 4, sort_keys = True))", "def to_pb2(self) -> _DetectionProto:\n labels = []\n label_ids = []\n scores = []\n display_names = []\n relative_keypoints = []\n\n for category in self.categories:\n scores.append(category.score)\n if category.index:\n label_ids.append(category.index)\n if category.category_name:\n labels.append(category.category_name)\n if category.display_name:\n display_names.append(category.display_name)\n\n if self.keypoints:\n for keypoint in self.keypoints:\n relative_keypoint_proto = _LocationDataProto.RelativeKeypoint()\n if keypoint.x:\n relative_keypoint_proto.x = keypoint.x\n if keypoint.y:\n relative_keypoint_proto.y = keypoint.y\n if keypoint.label:\n relative_keypoint_proto.keypoint_label = keypoint.label\n if keypoint.score:\n relative_keypoint_proto.score = keypoint.score\n relative_keypoints.append(relative_keypoint_proto)\n\n return _DetectionProto(\n label=labels,\n label_id=label_ids,\n score=scores,\n display_name=display_names,\n location_data=_LocationDataProto(\n format=_LocationDataProto.Format.BOUNDING_BOX,\n bounding_box=self.bounding_box.to_pb2(),\n relative_keypoints=relative_keypoints,\n ),\n )", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n return StorableObject_PB", "def encode_options(options):\n last_number = 0\n packed = []\n for opt in sorted_options(options):\n delta = opt.number - last_number\n last_number = opt.number\n pvalue = opt.packed_value\n (od, odx) = _optionint_helper.option_encoding(delta)\n (ol, olx) = _optionint_helper.option_encoding(len(pvalue))\n encoded = struct.pack(str('B'), (od << 4) | ol)\n encoded += odx + olx + pvalue\n packed.append(encoded)\n return b''.join(packed)", "def _to_google_protobuf_value(value):\n lv = struct_pb2.ListValue()\n lv.append(value)\n return lv.values[0]", "def create_from_pb2(cls, pb2_obj: _BaseOptionsProto) -> 'BaseOptions':\n return BaseOptions(\n model_asset_path=pb2_obj.model_asset.file_name,\n model_asset_buffer=pb2_obj.model_asset.file_content)", "def __init__(self, options, is_training=False):\n super(MessagePassing, self).__init__(options, is_training)\n\n if not isinstance(options, graph_network_pb2.MessagePassing):\n raise ValueError('Options has to be an MessagePassing proto.')\n\n self.use_reverse_edges = options.use_reverse_edges\n self.add_bi_directional_edges = options.add_bi_directional_edges\n self.add_self_loop_edges = True", "def proto(self) -> Proto:\n # Create a \"context-naïve\" proto.\n # This has everything but is ignorant of naming collisions in the\n # ultimate file that will be written.\n naive = Proto(\n all_enums=self.proto_enums,\n all_messages=self.proto_messages,\n file_pb2=self.file_descriptor,\n file_to_generate=self.file_to_generate,\n services=self.proto_services,\n meta=metadata.Metadata(\n address=self.address,\n ),\n )\n\n # If this is not a file being generated, we do not need to\n # do anything else.\n if not self.file_to_generate:\n return naive\n\n # Return a context-aware proto object.\n return dataclasses.replace(\n naive,\n all_enums=collections.OrderedDict(\n (k, v.with_context(collisions=naive.names))\n for k, v in naive.all_enums.items()\n ),\n all_messages=collections.OrderedDict(\n (k, v.with_context(collisions=naive.names))\n for k, v in naive.all_messages.items()\n ),\n services=collections.OrderedDict(\n # Note: services bind to themselves because services get their\n # own output files.\n (k, v.with_context(collisions=v.names))\n for k, v in naive.services.items()\n ),\n meta=naive.meta.with_context(collisions=naive.names),\n )", "def ToProto(self):\n team = scores_messages.Team()\n if self.twitter_id:\n account = scores_messages.TwitterAccount()\n account.id_str = str(self.twitter_id)\n team.twitter_account = account\n if self.score_reporter_id:\n account = scores_messages.ScoreReporterAccount()\n account.id = self.score_reporter_id\n team.score_reporter_account = account\n return team", "def _object2proto(self) -> DeleteGroupResponse_PB:\n return DeleteGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _proto2object(\n proto: UpdateGroupMessage_PB,\n ) -> \"UpdateGroupMessage\":\n\n return UpdateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def deconstruct(self):\n name, path, args, kwargs = super(SimpleBitOptionsField,\n self).deconstruct()\n if kwargs['default'] == self.options.maximum_value:\n del kwargs['default']\n kwargs['options'] = self.options.flags\n return name, path, args, kwargs", "def dict_to_protobuf(pb_klass_or_instance, values, type_callable_map=REVERSE_TYPE_CALLABLE_MAP, \\\n strict=True):\n if isinstance(pb_klass_or_instance, Message):\n instance = pb_klass_or_instance\n else:\n instance = pb_klass_or_instance()\n return _dict_to_protobuf(instance, values, type_callable_map, strict)", "def from_proto(data_source: DataSourceProto):\n return SnowflakeSource(\n field_mapping=dict(data_source.field_mapping),\n database=data_source.snowflake_options.database,\n schema=data_source.snowflake_options.schema,\n table=data_source.snowflake_options.table,\n event_timestamp_column=data_source.event_timestamp_column,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n query=data_source.snowflake_options.query,\n )", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n\n return UpdateGroupMessage_PB", "def tcp_pkt_parse_options(options: bytes)->dict:\n i = 0\n options_dict = dict()\n opts_rev_mapping = enum_value_to_enum(TCPOptions)\n while i < len(options):\n prefix = options[i]\n i += 1\n if prefix == dpkt.tcp.TCP_OPT_EOL or prefix == dpkt.tcp.TCP_OPT_NOP:\n options_dict[opts_rev_mapping[prefix]] = (0, None)\n else:\n if i < len(options):\n opt_len = options[i]\n i += 1\n if prefix in opts_rev_mapping.keys():\n options_dict[opts_rev_mapping[prefix]] = (opt_len, options[i:i + opt_len - 2])\n else:\n options_dict[UNKNOWN_OPTION_PREFIX + str(prefix)] = (opt_len, options[i:i + opt_len - 2])\n i += opt_len - 2\n else:\n break\n return options_dict", "def _object2proto(self) -> GetGroupsResponse_PB:\n return GetGroupsResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def _object2proto(self) -> GetGroupResponse_PB:\n return GetGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )", "def test_convert_proto_plus_to_protobuf(self):\n proto_plus = ProtoPlusFixture()\n converted = util.convert_proto_plus_to_protobuf(proto_plus)\n # Assert that the converted proto is an instance of the protobuf\n # protobuf message class.\n self.assertIsInstance(converted, ProtobufMessageType)", "def _proto2object(\n proto: DeleteGroupMessage_PB,\n ) -> \"DeleteGroupMessage\":\n\n return DeleteGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def ToPb(self):\n return self._ToPb(False)", "def selector_from_proto(\n s: schema_pb2.KeyDistributionOptions\n) -> reverb_types.SelectorType:\n if s.fifo:\n return reverb.selectors.Fifo()\n elif s.uniform:\n return reverb.selectors.Uniform()\n elif s.lifo:\n return reverb.selectors.Lifo()\n elif s.WhichOneof('distribution') == 'heap':\n if s.heap.min_heap:\n return reverb.selectors.MinHeap()\n else:\n return reverb.selectors.MaxHeap()\n elif s.WhichOneof('distribution') == 'prioritized':\n return reverb.selectors.Prioritized(\n s.prioritized.priority_exponent)\n else:\n simple_booleans_options = ('fifo', 'lifo', 'uniform')\n if s.WhichOneof('distribution') in simple_booleans_options:\n raise ValueError(f'distribution={s.WhichOneof(\"distribution\")}'\n ' but the associated boolean value is false.')\n else:\n raise NotImplementedError(\n f'distribution={s.WhichOneof(\"distribution\")}')", "def from_caffe_solver_protoxt(cls, caffe_solver_prototxt_file: Path):\n solver_param = caffe_pb2.SolverParameter()\n with open(caffe_solver_prototxt_file, 'rt') as f:\n pb2.text_format.Merge(f.read(), solver_param)\n dictionary = {'lr_policy': solver_param.lr_policy,\n 'base_lr': solver_param.base_lr,\n 'gamma': solver_param.gamma,\n 'momentum': solver_param.momentum,\n 'max_iter': solver_param.max_iter,\n 'stepsize': solver_param.stepsize,\n 'stepvalues': solver_param.stepvalue,\n 'weight_decay': solver_param.weight_decay,\n 'iter_size': solver_param.iter_size,\n 'from_prototxt': caffe_solver_prototxt_file}\n return cls(**dictionary)", "def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def _proto2object(\n proto: GetGroupMessage_PB,\n ) -> \"GetGroupMessage\":\n\n return GetGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def decode_proto(self):\n # Create output directory it does not exist\n if not os.path.exists(PROTO_CACHE):\n os.makedirs(PROTO_CACHE)\n\n # Compile proto (TODO: Assumes protoc is in PATH)\n cmd = \"protoc -I {} --python_out={} {}\".format(\n os.path.dirname(self.proto_file_path),\n PROTO_CACHE,\n self.proto_file_path)\n subprocess.check_call(cmd, shell=True)\n\n # Append compiled python module to Python's system path\n sys.path.insert(0, PROTO_CACHE)\n globals()[\"ProtoDefinition\"] = __import__(\"u_s_s_r_proto_pb2\")", "def _object2proto(self) -> CreateGroupResponse_PB:\n return CreateGroupResponse_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n status_code=self.status_code,\n content=json.dumps(self.content),\n )" ]
[ "0.7185293", "0.6591346", "0.63469636", "0.6190279", "0.609588", "0.6037695", "0.59587693", "0.57750213", "0.574301", "0.5736975", "0.5731854", "0.5646835", "0.5621101", "0.55236673", "0.5520179", "0.54465526", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.5359293", "0.5359293", "0.53536105", "0.5324065", "0.53190064", "0.53051645", "0.5283954", "0.5242074", "0.52331936", "0.52267313", "0.5130916", "0.5130073", "0.50909406", "0.5076463", "0.50562423", "0.50404936", "0.50191045", "0.50073624", "0.4990783", "0.49791184", "0.49634853", "0.495542", "0.49247608", "0.49154454", "0.48985267", "0.48960373", "0.48832193", "0.48692435", "0.48469085", "0.48250732", "0.48202375", "0.4818375", "0.48138314", "0.4809193", "0.47994277", "0.47955447", "0.47869205", "0.47819108", "0.47765085", "0.47724795", "0.47692502", "0.47634622" ]
0.7964391
0
Given a dict of lang>names, return a default one
def primary_name(names): langs = names.keys() if 'en' in langs: return names['en'] return names[langs[0]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def localizedWithFallback(field, allowEmpty=True):\n for lang in [''] + FallbackLanguages():\n t = field[lang]\n if allowEmpty:\n if isinstance(t, basestring):\n return t\n elif t:\n return t\n return u\"\"", "def fallback_trans(x):\r\n t = _(x)\r\n if t == x:\r\n l = h.get_lang()\r\n h.set_lang('en', graceful_fail = True)\r\n t = _(x)\r\n if l and l[0] != 'en':\r\n h.set_lang(l[0])\r\n return t", "def get_language(lang_code) -> str:\n langs = defaultdict(lambda: \"en\", {\"ru\": \"ru\"})\n return langs[lang_code.split(\"-\")[0]] if lang_code else \"en\"", "def test_default_translations(self):\n\t\t\n\t\tself.assertTrue(data.get_default_translation('Catholicism', 3) == 'DRA')\n\t\tself.assertTrue(data.get_default_translation('Christianity', 3) == 'ESV')", "def get_default_lang_slug(instance):\n try:\n default_language = settings.LANGUAGES[0][0]\n slug_name = 'slug_%s' % default_language\n return getattr(instance, slug_name, '')\n\n except Exception:\n return ''", "def find(lang):\n try:\n return as_dict(pycountry.languages.lookup(lang))\n except LookupError:\n return {}", "def get_lang(ix):\n\tlang = None\n\tif ix == 0:\n\t\tlang = setting.TLA_ENG\n\telif ix == 1:\n\t\tlang = setting.TLA_JP\n\telse:\n\t\tlang = setting.TLA_VN\n\n\tf = open (f\"lang\\\\{lang}.json\", encoding=setting.TLA_UTF8)\n\tglobal data_json\n\tdata_json = json.load(f)\n\n\treturn lang", "def get_dictionary_default(path):\n if path in defaults_dict.keys():\n return defaults_dict[path]\n else:\n return ''", "def get_default_variant(variants):\n for variant in variants:\n if variant.default:\n return variant", "def test_defaultdict_config():\n lang_configs = defaultdict(lambda: dict(processors=\"tokenize\"))\n run_multilingual_pipeline(en_has_dependencies=False, fr_has_dependencies=False, lang_configs=lang_configs)\n\n lang_configs = defaultdict(lambda: dict(processors=\"tokenize\"))\n lang_configs[\"en\"] = {\"processors\": \"tokenize,pos,lemma,depparse\"}\n run_multilingual_pipeline(en_has_dependencies=True, fr_has_dependencies=False, lang_configs=lang_configs)", "def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"", "def default_locale(category: str | None = None, aliases: Mapping[str, str] = LOCALE_ALIASES) -> str | None:\n varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')\n for name in filter(None, varnames):\n locale = os.getenv(name)\n if locale:\n if name == 'LANGUAGE' and ':' in locale:\n # the LANGUAGE variable may contain a colon-separated list of\n # language codes; we just pick the language on the list\n locale = locale.split(':')[0]\n if locale.split('.')[0] in ('C', 'POSIX'):\n locale = 'en_US_POSIX'\n elif aliases and locale in aliases:\n locale = aliases[locale]\n try:\n return get_locale_identifier(parse_locale(locale))\n except ValueError:\n pass\n return None", "def default(cls, category: str | None = None, aliases: Mapping[str, str] = LOCALE_ALIASES) -> Locale:\n # XXX: use likely subtag expansion here instead of the\n # aliases dictionary.\n locale_string = default_locale(category, aliases=aliases)\n return cls.parse(locale_string)", "def _get_lang(self, *args, **kwargs):\n if \"lang\" in kwargs:\n if kwargs[\"lang\"] in self._available_languages:\n self.lang = kwargs[\"lang\"]", "def get_key_recursive(lang_map, lang_code, key_name, default=None):\n key_val = lang_map.get(lang_code, {}).get(key_name, sentinel)\n\n if key_val is not sentinel:\n return key_val\n\n parts = lang_code.split('_')\n parts.pop()\n if not parts:\n return default\n\n _lang_code = '_'.join(parts)\n return get_key_recursive(lang_map, _lang_code, key_name, default)", "def get_default(self, name):\n rargs = [_ for _ in reversed(self.args)]\n rdefaults = [_ for _ in reversed(self.defaults)]\n return rdefaults[rargs.index(name)]", "def defaultLanguage(self, lang=None):\n if(lang is not None):\n self.lang = lang\n return self.lang", "def to_language(arg: str) -> Tuple[Union[str, None], str]: \n if (low:= arg.lower()) in LANGUAGES:\n return arg\n else:\n return LANGCODES.get(low, None)", "def get_localized_name(name):\n locale = \"{}_{}\".format(\n name[\"preferredLocale\"][\"language\"],\n name[\"preferredLocale\"][\"country\"]\n )\n return name['localized'].get(locale, '')", "def get_full_dict(lang):\n\tif not lang:\n\t\treturn {}\n\t# found in local, return!\n\tif getattr(frappe.local, 'lang_full_dict', None) and frappe.local.lang_full_dict.get(lang, None):\n\t\treturn frappe.local.lang_full_dict\n\n\tfrappe.local.lang_full_dict = load_lang(lang)\n\n\treturn frappe.local.lang_full_dict", "def _try_to_get_an_english_value(self, localized_values):\n if not localized_values:\n return None\n\n for localized_value in localized_values:\n if localized_value.language in self.ENGLISH_LANGUAGE_CODES:\n return localized_value.value\n\n return first_or_default(localized_values).value", "def i18n_to_eng(string, map):\r\n\r\n return map.get(string, None)", "def get_word(key: str, language: str):\n if key not in word_keys:\n return \"LOCALIZATION KEY {} NOT FOUND FOR LANGUAGE {}\".format(key, language)\n words = word_keys[key]\n\n # If the word doesn't exist, just show word in English\n if language not in words or words[language] == \"\":\n return words[EN]\n else:\n return words[language]", "def get_language(self, word, lang=None):\n lang = lang or self.cfg.get('lang', 'en')\n # let's retrieve the word from configuration dict.\n try:\n return self.cfg['words_' + lang][word]\n except StandardError:\n return 'Do not know how to \"{}\" in \"{}\"'.format(word, lang)", "def getorelse(self, name, default=None):\n try:\n return self._defaults[name]\n except KeyError:\n return default", "def get_default_language():\n return getattr(thread_locals, 'DEFAULT_LANGUAGE',\n settings.DEFAULT_LANGUAGE)", "def guess_language(lang_list=None):\n\tlang_codes = frappe.request.accept_languages.values()\n\tif not lang_codes:\n\t\treturn frappe.local.lang\n\n\tguess = None\n\tif not lang_list:\n\t\tlang_list = get_all_languages() or []\n\n\tfor l in lang_codes:\n\t\tcode = l.strip()\n\t\tif not isinstance(code, text_type):\n\t\t\tcode = text_type(code, 'utf-8')\n\t\tif code in lang_list or code == \"en\":\n\t\t\tguess = code\n\t\t\tbreak\n\n\t\t# check if parent language (pt) is setup, if variant (pt-BR)\n\t\tif \"-\" in code:\n\t\t\tcode = code.split(\"-\")[0]\n\t\t\tif code in lang_list:\n\t\t\t\tguess = code\n\t\t\t\tbreak\n\n\treturn guess or frappe.local.lang", "def test_find_default(self):\n mute_map = MutableMap(**VALUE)\n\n assert mute_map.find('NOT_VALID', 'default_val') == \\\n 'default_val', 'default should be used'\n assert mute_map.find('str_val', 'default_val') == \\\n VALUE['str_val'], 'default should be ignored'", "def gpwDefaultLanguage(self):\n parent = self.getFolderWhenPortalFactory()\n if hasattr(parent, 'getRawLanguage') and parent.getRawLanguage():\n return parent.getRawLanguage()\n tool = getToolByName(self, 'portal_languages', None)\n if tool is not None:\n return tool.getDefaultLanguage()\n return config.LANGUAGE_DEFAULT", "def getDefault():", "def lang_init():\n _locale, _encoding = locale.getdefaultlocale() # Default system values\n path = os.path.join(os.path.dirname(sys.argv[0]), 'localization/lang')\n if os.path.exists(path):\n lang = gettext.translation('UnrulyPuzzlePython', path, [_locale],\n fallback=True)\n else:\n lang = gettext.translation('UnrulyPuzzlePython', path,\n fallback=True)\n return lang.gettext", "def _lang_id(dic: Dictionary, lang: str):\r\n idx = dic.index(lang)\r\n assert idx != dic.unk_index, \"cannot find language ID for lang {}\".format(lang)\r\n return idx", "def test_fallback_language_no_current(self):\n x = SimpleModel()\n x.set_current_language(self.conf_fallback)\n x.tr_title = \"TITLE_FALLBACK\"\n\n self.assertEqual(\n x.safe_translation_getter(\"tr_title\", language_code=self.other_lang1), \"TITLE_FALLBACK\"\n )", "def browserLanguages(request):\n fallback = []\n accepted = request.http_accept_language\n if accepted:\n # Extract the languages names from the string\n accepted = accepted.split(',')\n accepted = map(lambda x: x.split(';')[0], accepted)\n # Add base language for each sub language. If the user specified\n # a sub language like \"en-us\", we will try to to provide it or\n # a least the base language \"en\" in this case.\n for lang in accepted:\n lang = lang.lower()\n fallback.append(lang)\n if '-' in lang:\n baselang = lang.split('-')[0]\n fallback.append(baselang)\n return fallback", "def get_lang(context, field):\n lang = json.load(open(\"json/lang.json\", \"r\"))\n conf = json.load(open(\"json/serverconfig.json\", \"r\"))\n return lang[conf[str(context)][\"lang\"]][field]", "def getLang(lang, localedir=os.path.expanduser(\"~\") + \"/share/locale\"):\n return gettext.translation(\"bridgedb\", localedir=localedir, \n languages=[lang], fallback=\"en\")", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def default(self):\n return self.get(name='Unknown')", "def __getitem__(self, lang):\n return self.__registry[lang]", "def getFromDefaults(inDict, inKey, inLastDefault, *args):\n\n if inKey in inDict:\n return inDict[inKey]\n\n for defaultDict in args:\n if inKey in defaultDict:\n return defaultDict[inKey]\n\n return inLastDefault", "def _get_default(ddict, key, default):\n if ddict is None or key not in ddict or ddict[key] is None:\n return default\n return ddict[key]", "def lookup_some_key(what, where, default=None):\n for w in what:\n try:\n return where[w]\n except KeyError:\n pass\n return default", "def get(name, default=None):", "def AlternativeNames(self, default=[None]):\n return self.data.get('alternative_names', default)", "def default():", "def loadDic(lang = \"en\"):\n\t# import random library to make choice\n\tfrom random import choice\n\t# Read dictionary line by line and turn into list\n\tdictionary = open(lang+\"_dict.txt\").readlines()\n\t# return random entry with \\n (new line) character stripped and made lower-case (this could be changed if dictionary is double checked for desired case, or case is made non-important by letter guesser aspect of program)\n\treturn choice(dictionary).rstrip().lower()", "def _translation(basename, props_dir, languages, key_language=None):\n props_dir = os.path.abspath(props_dir)\n if os.path.isfile(props_dir):\n props_dir = os.path.dirname(props_dir)\n trans = None\n use_key_as_lang = False\n for lang in languages:\n while True:\n trans = _try_file \\\n (props_dir, basename + \"_\" + lang + \".properties\", lang, trans)\n # Use identity mapping instead (or in addition to) file?\n if lang == key_language:\n use_key_as_lang = True\n # We need no more fallbacks after identity mapping\n break;\n lang_up = lang.rsplit(\"_\", 1)[0]\n if lang_up == lang:\n break\n lang = lang_up\n # Finally try properties file without language specification\n trans = _try_file(props_dir, basename + \".properties\", None, trans)\n if trans:\n trans._add_fallback_unchecked(BaseTranslations()) # last resort\n else:\n if use_key_as_lang:\n trans = BaseTranslations(key_language)\n else:\n trans = BaseTranslations()\n return trans", "def default_language_tag(self):\n return self.properties.get(\"defaultLanguageTag\", None)", "def get_default_language():\n utility = queryUtility(ILanguageAvailability)\n if utility is not None:\n return utility.getDefaultLanguage()\n return DEFAULT_LANGUAGE", "def load_lang(lang, apps=None):\n\n\tif lang=='en':\n\t\treturn {}\n\n\tout = frappe.cache().hget(\"lang_full_dict\", lang, shared=True)\n\n\tif not out:\n\t\tout = {}\n\t\tfor app in (apps or frappe.get_all_apps(True)):\n\t\t\tpath = os.path.join(frappe.get_pymodule_path(app), \"translations\", lang + \".json\")\n\t\t\tout.update(get_translation_dict_from_file(path, lang, app) or {})\n\n\t\tif '-' in lang:\n\t\t\tparent = lang.split('-')[0]\n\t\t\tparent_out = load_lang(parent)\n\t\t\tparent_out.update(out)\n\t\t\tout = parent_out\n\n\t\tfrappe.cache().hset(\"lang_full_dict\", lang, out, shared=True)\n\n\treturn out or {}", "def context_or_settings(context, name):\n if name in context:\n return context[name]\n return getattr(settings, \"DEFAULT_\" + name.upper())", "def test_translate_unique_langs(self):\n\n trans_msgs_dict = MessageController.translate_unique_langs({'2': 'es', '4': 'fr'}, \n 'hi', 'en', False, False)\n\n self.assertEqual(trans_msgs_dict, {'es': u'{hola}', 'fr': u'salut'})", "def lang_to_fieldname(l):\r\n global searchable_langs\r\n\r\n code = l[:2]\r\n\r\n if code in searchable_langs:\r\n return (\"contents_%s\" % code)\r\n else:\r\n return \"contents\"", "def GetEnvironFallback(var_list, default):\n for var in var_list:\n if var in os.environ:\n return os.environ[var]\n return default", "def _label_language_swap(self, languagename, curr_lang):\n chrdict = self._chrdict\n varlist = self._varlist\n vlblist = self._vlblist\n lbllist = self._lbllist\n \n old_varlab_key = \"_lang_v_\" + curr_lang\n old_vallab_key = \"_lang_l_\" + curr_lang\n \n new_varlab_key = \"_lang_v_\" + languagename\n new_vallab_key = \"_lang_l_\" + languagename\n \n # Replace data label and _lang_c. No need to set _lang_list: \n # can only swap between two defined languages.\n dta_dict = chrdict[\"_dta\"]\n dta_dict[\"_lang_c\"] = languagename\n if self._data_label != '':\n dta_dict[old_varlab_key] = self._data_label\n self._data_label = (dta_dict.pop(new_varlab_key) \n if new_varlab_key in dta_dict else '')\n \n # put current variable and value labels in chrdict \n # and replace with languagename's\n for varname, i in zip(varlist, range(self._nvar)):\n varlab = vlblist[i]\n vallab = lbllist[i]\n \n if varname not in chrdict: # then nothing to retreive\n if varlab == '' and vallab == '': # then nothing to store\n continue\n chrdict[varname] = {}\n \n var_dict = chrdict[varname]\n \n # store current if non-empty\n if varlab != '': var_dict[old_varlab_key] = varlab\n if vallab != '': var_dict[old_vallab_key] = vallab\n \n # set languagename's labels as current\n vlblist[i] = (var_dict.pop(new_varlab_key) \n if new_varlab_key in var_dict else '')\n lbllist[i] = (var_dict.pop(new_vallab_key) \n if new_vallab_key in var_dict else '')\n \n # delete sub-dict from chrdict if empty\n if len(var_dict) == 0:\n del chrdict[varname]", "def context_or_settings(context, name):\n if name in context:\n return context[name]\n return getattr(settings, 'DEFAULT_' + name.upper())", "def _load_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(self.lblFallback['text'][start:])\n pass", "def eng_to_i18n(string, map):\r\n\r\n i18n = None\r\n for k, v in map.items():\r\n if v == string:\r\n i18n = k\r\n break\r\n return i18n", "def dict_get_ignore_case(d: Mapping[str, V], key: str,\n default: Optional[V] = None) -> Optional[V]:\n\n try: # try O(1) just in case\n return d[key]\n except KeyError:\n pass\n\n key_lower = key.lower()\n for k, v in d.items():\n if k.lower() == key_lower:\n return v\n\n return default", "def Language(self, default=None):\n return self.data.get('language', default)", "def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])", "def __init__(self, langConf: str) -> None:\n with open(r\"localization\\localization.json\", \"rt\", encoding=\"utf-8\") as lang:\n try:\n self.translation = json.load(lang)[langConf]\n except KeyError:\n self.translation = json.load(lang)[\"config\"][\"default\"]", "def get_default(self, stmt, slist):\n for s in slist:\n if s.keyword == \"default\": return s.arg\n dst = stmt.search_one(\"default\")\n if dst: return dst.arg\n return None", "def get_language_key(host, domain, user):\n\n # Get lang from authenticated user\n if not user.is_anonymous():\n value = user.language\n\n # Get lang based on request host and global language settings\n else:\n current_subdomain = host[:-len(domain) - 1]\n default_language = settings.LANGUAGE_CODE\n valid_languages = [l[0] for l in settings.LANGUAGES]\n valid_subdomains = list(settings.SUBDOMAIN_URLCONFS)\n default_language_domains = []\n\n for d in valid_subdomains:\n if (d is default_language) or (d not in valid_languages):\n default_language_domains.append(d)\n\n if current_subdomain in default_language_domains:\n value = default_language\n else:\n value = current_subdomain\n\n return value", "def test_get_with_default(self):\n self.assertEqual(self.config.get('basic','salutation'),None)\n self.assertEqual(self.config.get('basic','salutation','bonjour'),\n 'bonjour')", "def get_language(mgroups):\n\n if mgroups:\n lang = mgroups[0].strip('[').strip(']')\n return lang.lower().strip()\n return None", "def get_langs(id):", "def make_translated_text():\n return {\n code: ''\n for code, name\n in settings.LANGUAGES\n }", "def get_default(name, value):\n return os.environ.get('EXAMPLE_{}'.format(name.upper()), value)", "def extract_lang(lang, lang_available):\n reg = r\"\"\"[a-z]{2}[_][A-Z]{2}\"\"\" # xx_XX\n lang_user = re.findall(reg, lang.replace(\"-\", \"_\"))\n # code below will list matching languages to\n # first two letters (\"en\") instead of whole\n # language code (\"en_GB\", \"en_US\", etc.).\n # Not really a problem when we've only 2 languages...\n lang_available_user = list()\n for l in lang_user:\n for lat in lang_available:\n if l[:2] in lat:\n lang_available_user.append(lat)\n return lang_available_user[0]", "def set_default_language(request):\n response = HttpResponseRedirect(get_redirect_url(request))\n\n if request.method == 'POST':\n lang_code = request.POST.get('language', None)\n if lang_code and check_for_language(lang_code):\n request.user.moderator_profile.language = lang_code\n request.user.moderator_profile.save()\n messages.success(request, _(\"Your default language has been updated.\"))\n\n return response", "def find_new_values(data, values, key):\n new_values = {}\n for lang, value in values.iteritems():\n if lang not in data.get(key).keys():\n new_values[lang] = value['value']\n return new_values", "def default_values():\n return pad_keys({}, default_basenames())", "def Languages(self, default=[\"en\"]):\n return self.data.get('metadata', {}).get('languages', default)", "def my(d,k):\n try:\n return d[k]\n except KeyError:\n return CONFIG_DEFAULTS[k]", "def search_lang(self,strz):\n\t\tfor lang in languages: #languages = list of allow lang words\n\t\t\tif lang in strz:\n\t\t\t\tif len(self.language)>0:\n\t\t\t\t\tself.language+='.'+lang.replace(\".\",\"\")\n\t\t\t\telse:\n\t\t\t\t\tself.language+=lang.replace(\".\",\"\")\n\t\t\t\tstrz =strz.replace(lang,\"\")\n\t\treturn strz", "def Default():\n return _DEFAULT", "def language():\r\n\r\n cursor.execute('SELECT name from languages order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]", "def lookup(key, default=None):\n def _lookup(mapping):\n return mapping.get(key, default)\n return _lookup", "def _load_transliterated_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(\\\n self._transliterate_text(self.lblFallback['text'][start:]))\n pass", "def setdefault(*dicts):\n param_complete = dict(dicts[0])\n for d in dicts[1:]:\n for k,v in d.items():\n param_complete.setdefault(k, v)\n\n return param_complete", "def get_lang_code(lang_code):\r\n if lang_code not in constants.SUPPORTED_LANG_CODES_ANALYZERS:\r\n return constants.FALLBACK_LANG_CODE\r\n return lang_code", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"Dinosaur\":\n return \"NewDinosaur\"\n return key", "def test_fallback_language(self):\n x = SimpleModel()\n x.set_current_language(self.conf_fallback)\n x.tr_title = \"TITLE_FALLBACK\"\n\n x.set_current_language(self.other_lang1)\n x.tr_title = \"TITLE_XX\"\n x.save()\n\n with translation.override(self.other_lang2):\n x = SimpleModel.objects.get(pk=x.pk)\n self.assertEqual(x.tr_title, \"TITLE_FALLBACK\")", "def get_label_by_language(language):\n\n if language in [\"en4\", \"en5\", \"en15\"]:\n plot_label = \"Supreme Court EN\"\n shade = True\n ls = \"-\"\n c = \"C2\"\n elif language in [\"en2\", \"en8\", \"en18\"]:\n plot_label = \"EuroParl EN\"\n shade = True\n ls = \"-\"\n c = \"C8\"\n\n elif language in [\"de2\", \"de8\", \"de18\"]:\n plot_label = \"EuroParl DE\"\n shade = True\n ls = \"-\"\n c = \"C4\"\n\n elif language in [\"de5\", \"de15\"]:\n plot_label = \"BGH Strafsenat\"\n shade = True\n ls = \"-\"\n c = \"C0\"\n\n elif language in [\"de6\", \"de16\"]:\n plot_label = \"BGH Zivilsenat\"\n shade = True\n ls = \"-\"\n c = \"C1\"\n\n elif language in [\"de7\", \"de17\"]:\n plot_label = \"BGH DE\"\n shade = True\n ls = \"-\"\n\n else:\n plot_label = language\n shade = True\n ls = \"-\"\n c = \"C1\"\n\n if language in [\"de15\", \"de16\", \"en15\", \"de17\", \"de18\", \"en18\"]:\n plot_label += \" shuffled\"\n\n return plot_label, shade, ls, c", "def get_user_language() -> str:\n languages = {\n \"arabic\": \"arb\",\n \"chinese\": \"cmn-CN\",\n \"danish\": \"da-DK\",\n \"english\": \"en-GB\",\n \"french\": \"fr-FR\",\n \"german\": \"de-DE\",\n \"portuguese\": \"pl-PT\",\n \"spanish\": \"es-ES\"\n }\n textlang = input(\"What language do you want to hear?\")\n try:\n return languages[textlang.lower()]\n except KeyError as e:\n print(\"Enter a valid language.\")\n sys.exit(1)", "def get_proper_language():\n lang = config['summernote'].get('lang')\n\n if not lang:\n return config['lang_matches'].get(get_language(), 'en-US')\n\n return lang", "def validate_language(language):\n\n try:\n lang_code = language_dict[language]\n except KeyError:\n lang_code = None\n return lang_code", "def find_langs(args):\n infile = args.source\n langs = {}\n for line in infile:\n name_and_files = line.split()\n name = name_and_files[0]\n if name not in langs:\n langs[name] = []\n langs[name] += read_files.filter_files(name_and_files[1:])\n langs[args.unknown] += read_files.filter_files(args.classify)\n return langs", "def default_from_golang(lines):\n\n for i in lines[\n lines.index(\"golang/golang.sh\\n\"):\n lines.index(\"Default-Golang-Server\\n\")]:\n\n if i.startswith(\"BenchmarkBuild\"):\n num = re.findall(\"\\d+\\.?\\d* ns/op\", i)\n data.get(\"default\").get(\"golang\").update(\n {\"BenchmarkBuild\": num[0][:-6]}\n )\n\n if i.startswith(\"BenchmarkGarbage\"):\n num = re.findall(\"\\d+\\.?\\d* ns/op\", i)\n data.get(\"default\").get(\"golang\").update(\n {\"BenchmarkGarbage\": num[0][:-6]}\n )\n\n if i.startswith(\"BenchmarkHTTP\"):\n num = re.findall(\"\\d+\\.?\\d* ns/op\", i)\n data.get(\"default\").get(\"golang\").update(\n {\"BenchmarkHTTP\": num[0][:-6]}\n )\n\n if i.startswith(\"BenchmarkJSON\"):\n num = re.findall(\"\\d+\\.?\\d* ns/op\", i)\n data.get(\"default\").get(\"golang\").update(\n {\"BenchmarkJSON\": num[0][:-6]}\n )", "def google_default(arg):\n # Leave this at the bottom\n if arg in bad_terms:\n return 'https://www.youtube.com/watch?v=dQw4w9WgXcQ'\n else:\n # check words\n words = arg.split()\n if len(words) == 1 and words[0] in bad_terms:\n return 'https://www.youtube.com/watch?v=dQw4w9WgXcQ'\n else:\n score = 0\n non_bad_word_score = 0\n for w in words:\n if w in bad_terms:\n score+=1\n else:\n non_bad_word_score +=1\n if score == 2:\n break\n if score == 2 and non_bad_word_score < 4:\n return 'https://www.youtube.com/watch?v=dQw4w9WgXcQ'\n\n return g(arg)[0]", "def addr_lang(key):\n known = [\"de\", \"cn\", \"en\"]\n line = ''\n while len(line) != 2:\n rawline = input(\n 'please enter language '\n + str(known)\n + ': '\n )\n line = rawline.strip()\n if line not in known:\n line = ''\n print(\"Language key unknown.\")\n return line", "def get_locale():\n setting = Setting.query.filter(Setting.name == 'default_language').first()\n\n if setting is not None:\n return setting.value\n\n # Return default language when none found\n return 'en'", "def determine_language_prefix():\n # TODO(eholder): Figure out a more appropriate way to map the header into\n # our set of prefixes. Since I don't know what those prefixes are yet, this\n # is intentionally very generic. I also need to decide if this should just be\n # done once as part of the login flow rather than checking every request.\n # Checking every request makes this easier to test and change though in the\n # meantime.\n languages_string = request.headers.get('Accept-Language')\n\n # If there is no header, use the default.\n if languages_string is None:\n flask.session['language_prefix'] = DEFAULT_LANGUAGE_PREFIX\n return\n\n languages = languages_string.split(',')\n if languages[0] in ACCEPTABLE_LANGUAGE_PREFIXES:\n flask.session['language_prefix'] = languages[0]\n return\n\n language_sections = languages[0].split(';')\n if language_sections[0] in ACCEPTABLE_LANGUAGE_PREFIXES:\n flask.session['language_prefix'] = language_sections[0]\n return\n\n language_subsections = language_sections[0].split('-')\n if language_subsections[0] in ACCEPTABLE_LANGUAGE_PREFIXES:\n flask.session['language_prefix'] = language_subsections[0]\n return\n\n flask.session['language_prefix'] = DEFAULT_LANGUAGE_PREFIX", "def translate_one(text, lang, format='plain'):\n if not isinstance(text, basestring):\n raise TypeError('Text should be a string, not a %s' % type(text).__name__)\n return translate_many(text, lang, format)[0]", "def clean_lang(self):\n lang = self.cleaned_data.get('lang', None)\n if not lang in self.greetings:\n raise forms.ValidationError(\n \"We couldn't find the language you selected {}\"\n \" Please select another\".format(lang)\n )\n return lang", "def get_default_prefix(path):\n if path in prefixes_dict.keys():\n return prefixes_dict[path]\n else:\n return ''", "def get_language_name(iso_code):\n if iso_code not in LANGUAGES_BY_CODE:\n try:\n lang = iso639.languages.get(part3=iso_code)\n except KeyError:\n lang = None\n\n if lang:\n # we only show up to the first semi or paren\n lang = re.split(r\";|\\(\", lang.name)[0].strip()\n\n LANGUAGES_BY_CODE[iso_code] = lang\n\n return LANGUAGES_BY_CODE[iso_code]", "def get_language(lang_list: list = None) -> str:\n\tis_logged_in = frappe.session.user != \"Guest\"\n\n\t# fetch language from form_dict\n\tif frappe.form_dict._lang:\n\t\tlanguage = get_lang_code(frappe.form_dict._lang or get_parent_language(frappe.form_dict._lang))\n\t\tif language:\n\t\t\treturn language\n\n\t# use language set in User or System Settings if user is logged in\n\tif is_logged_in:\n\t\treturn frappe.local.lang\n\n\tlang_set = set(lang_list or get_all_languages() or [])\n\n\t# fetch language from cookie\n\tpreferred_language_cookie = get_preferred_language_cookie()\n\n\tif preferred_language_cookie:\n\t\tif preferred_language_cookie in lang_set:\n\t\t\treturn preferred_language_cookie\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fetch language from request headers\n\taccept_language = list(frappe.request.accept_languages.values())\n\n\tfor language in accept_language:\n\t\tif language in lang_set:\n\t\t\treturn language\n\n\t\tparent_language = get_parent_language(language)\n\t\tif parent_language in lang_set:\n\t\t\treturn parent_language\n\n\t# fallback to language set in System Settings or \"en\"\n\treturn frappe.db.get_default(\"lang\") or \"en\"", "def langs(cls):\n codes = cls.codes[:]\n\n if hasattr(cls, 'test_codes'):\n codes += cls.test_codes\n\n codes += cls.closed_wikis\n\n # shortcut this classproperty\n cls.langs = {code: f'{code}.{cls.domain}' for code in codes}\n cls.langs.update({alias: f'{code}.{cls.domain}'\n for alias, code in cls.code_aliases.items()})\n\n return cls.langs" ]
[ "0.60933506", "0.60726446", "0.6008112", "0.5990976", "0.59868455", "0.5985267", "0.5922496", "0.590694", "0.58639705", "0.5810866", "0.5780663", "0.5768472", "0.57512575", "0.5717193", "0.5704346", "0.5672763", "0.5649485", "0.56342536", "0.563318", "0.5624843", "0.5548256", "0.5538525", "0.55365795", "0.55344003", "0.55340284", "0.54796356", "0.5478518", "0.5471905", "0.54464394", "0.5413185", "0.5401369", "0.53692406", "0.53657305", "0.5335276", "0.5328378", "0.53252685", "0.53243136", "0.53145015", "0.5311085", "0.5295975", "0.5275004", "0.5251926", "0.52492833", "0.5243105", "0.5236167", "0.523459", "0.5228838", "0.52277255", "0.5225259", "0.5225099", "0.5214662", "0.52086693", "0.5201313", "0.5195426", "0.518137", "0.5179792", "0.5174064", "0.5173832", "0.51685876", "0.5163677", "0.5162469", "0.5160426", "0.5144419", "0.51442134", "0.51413864", "0.513618", "0.5126461", "0.51228863", "0.512186", "0.51217645", "0.5115921", "0.5112101", "0.51093346", "0.5106819", "0.51061994", "0.51035607", "0.5103121", "0.51018435", "0.5088298", "0.50840396", "0.50782853", "0.5076223", "0.507012", "0.506019", "0.50598973", "0.5057558", "0.5055245", "0.5052051", "0.5047237", "0.5045389", "0.5041074", "0.50357044", "0.503006", "0.50213903", "0.5017319", "0.50158554", "0.5014279", "0.5010683", "0.49942297", "0.49939594" ]
0.6597497
0
Initializes an instance of the InstagramBot class.
def __init__(self, username = None, password = None): self.username = config['AUTH']['USERNAME'] self.password = config['AUTH']['PASSWORD'] self.login = config['URL']['LOGIN'] self.nav_url = config['URL']['NAV'] self.tag_url = config['URL']['TAGS'] self.direct_url = config['URL']['DM'] self.driver = webdriver.Chrome(config['ENVIRONMENT']['CHROMEDRIVER']) self.stay_logged = False self.api = InstagramAPI(self.username, self.password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\r\n self._instagram_api = InstagramAPI(mongo_api=self._mongo_api)\r\n self._inst_run()", "def __init__(self, bot=BNBot):\n self.bot = bot", "def __init__(self, client_id=None, access_token=None):\r\n if not client_id and not access_token:\r\n raise TypeError('__init__() must be passed at least one '\r\n 'of client_id, access_token')\r\n\r\n self.apiroot = 'https://api.instagram.com/v1'\r\n\r\n self.client_id = client_id\r\n self.access_token = access_token\r\n self.add_filter(self.add_authorization)", "def __init__(self, bot):\n self.bot = bot", "def __init__(self, bot):\n self.bot = bot", "def instagram(self, instagram):\n\n self._instagram = instagram", "def __init__(self, bot: DreamBot) -> None:\n\n self.bot = bot", "def __init__(self, bot: DreamBot) -> None:\n\n self.bot = bot", "def __init__(self, username, password, bot, channel):\n super().__init__(username, password)\n\n self.queue = deque()\n self.ingame_cog = Ingame(bot)\n\n self.bot = bot\n self.channel = channel\n self.chat_breakout = False\n self.loop = asyncio.get_event_loop()\n self.ingame_cog.is_pycraft_instance = True", "def __init__(self, mongo_api, cnn_model):\r\n self._mongo_api = mongo_api\r\n self._cnn_model = cnn_model\r\n\r\n self._instagram_api = None", "def __init__(self, config):\n self._slack_client = self._connect(config[\"slack_bot_token\"])\n self.bot_id = self._get_user_id()\n self.default_channel = config[\"default_channel\"]", "def __init__(self):\n self.site = pywikibot.Site(u'commons', u'commons')\n self.generator = self.getGenerator()", "def __init__(self):\n\n # configure logging\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n self.logger = logging.getLogger(\"chaturbate\")\n self.logger.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n self.logger.addHandler(console_handler)\n\n # read configuration\n self.config_parser = ConfigParser.ConfigParser()\n self.config_parser.read(\"config.ini\")\n\n # is pushbullet is enabled on the config\n if self.config_parser.get('PushBullet', 'enable') == 'true':\n # try to import it and connect\n try:\n import pushbullet\n self.push_bullet = pushbullet.Pushbullet(\n self.config_parser.get('PushBullet', 'access_token'))\n except (ImportError, pushbullet.InvalidKeyError):\n self.push_bullet = None\n\n # create a requests object that has sessions\n self.req = requests.Session()\n\n self.username = self.config_parser.get('User', 'username')\n self.password = self.config_parser.get('User', 'password')", "def initialize(self):\n self.voteskips = []\n self.response = {}\n self.route = {}\n self.userlist = []\n self.poll = []\n self.media = []\n self.init = False\n self.question = None\n self.jumble = None\n self.imgur = None", "def __init__(self, address=('', 50000), authkey=b'tradingbot'):\n _ClientBot.__init__(self, address=address, authkey=authkey)", "def __init__(self):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n self.api = tweepy.API(auth)", "def __init__(self):\n\n # This environment variable should be set before using the bot\n self.token = os.environ['STATS_BOT_TOKEN']\n\n\n # These will be checked against as substrings within each\n # message, so different variations are not required if their\n # radix is present (e.g. \"all\" covers \"/all\" and \"ball\")\n self.menu_trigger = ['/all', '/stats']\n self.loan_stats_trigger = ['/loans']\n self.il_trigger = ['/IL']\n self.assets_trigger = ['/assets']\n\n\n # Stops runtime if the token has not been set\n if self.token is None:\n raise RuntimeError(\n \"FATAL: No token was found. \" + \\\n \"You might need to specify one or more environment variables.\")\n\n # Configures logging in debug level to check for errors\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)", "def __init__(self):\n log.msg(\"Initializing Twitch parser.\")\n\n # initialize our data members\n self.streams = tuple()\n self.crc32 = 0", "def __init__(self):\n super().__init__()\n\n # Will only reply to every 3rd or so tweet, defined in settings\n self.received_tweet_count = 0\n\n # Twitter api init\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n self.twitter_api = tweepy.API(self.auth)\n\n print('Authenticated, creating stream...')\n\n self._init_stream()", "def __init__(self):\n\n self.db = ImageDB()\n self.vitess = VitessConn()\n self.minio = MinioConn()", "def __init__(self, imag_bot, block_, player_loc):\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc", "def __init__(self) -> None:\n super().__init__()\n self.handler = IxnHandler()", "def init(self, sevabot):\n\n logger.debug(\"GiantbombHandler init\")\n self.sevabot = sevabot\n self.skype = sevabot.getSkype()", "def init_bot():\n\n # We create the Reddit instance.\n reddit = praw.Reddit(client_id=config.APP_ID, client_secret=config.APP_SECRET,\n user_agent=config.USER_AGENT, username=config.REDDIT_USERNAME,\n password=config.REDDIT_PASSWORD)\n\n # Check if we have the 3 required arguments.\n if len(sys.argv) == 3:\n\n method = sys.argv[1]\n day = sys.argv[2]\n\n if method == \"sticky\":\n if day == \"monday\":\n post_monday(reddit)\n elif day == \"wednesday\":\n post_wednesday(reddit)\n elif day == \"friday\":\n post_friday(reddit)\n\n elif method == \"unsticky\":\n if day == \"monday\":\n unsticky_post(reddit, MONDAY_FILE)\n elif day == \"wednesday\":\n unsticky_post(reddit, WEDNESDAY_FILE)\n elif day == \"friday\":\n unsticky_post(reddit, FRIDAY_FILE)", "def __init__(self):\n self.emotions_list = EmotionsList('NRC-Emotion-Intensity-Lexicon-v1.txt')\n self.tweets_list = None\n self.nickname = None", "def __init__(self, bot: BunkBot, channels: ChannelService):\r\n self.bot: BunkBot = bot\r\n self.message: Message = None\r\n self.channels: ChannelService = channels\r\n self.yt_result: YoutubeResult = YoutubeResult()\r\n self.yt_link: str = \"\"", "def __init__(self, bot: commands.Bot):\n\n super().__init__(bot)\n\n # Init instance vars\n self.cookie_data = self._parse_cookie_data()\n self.cookie_available = False\n self.cookie_prepared_timestamp = None\n self.cookie_drop_delay_hours = None\n self.cookie_drop_delay_minutes = None\n self.cookie_type = None", "def __init__(self, config):\n self.config = config\n\n self.slack_client = SlackClient(self.config.SLACK_TOKEN)", "def __init__(self, *, specified_loop=None):\n intents = discord.Intents(\n members=True,\n presences=True,\n guilds=True,\n emojis=True,\n invites=True,\n messages=True,\n reactions=True,\n voice_states=True,\n )\n loop = asyncio.get_event_loop()\n session = aiohttp.ClientSession(loop=loop)\n\n # Load all the environment variables\n load_dotenv(\"config/Bot/token.env\")\n load_dotenv(\"config/Apis/tokens.env\")\n load_dotenv(\"config/Database/db.env\")\n\n # Read the emoji file\n self.emoji_config = CustomEmojis.from_json(read_file(\"config/General/emojis.json\"))\n # Read the config file\n self.config = Config.from_json(read_file(\"config/General/config.json\"))\n\n # Set the HTTPException error codes dict to a custom property for easy access\n self.httpexception_codes = load_json(\"assets/data/httpexception_codes.json\", make_keys_int=True)\n\n # We save the bot start time to a variable\n self.started_at = datetime.datetime.utcnow()\n\n # APIs\n self.cleverbot = async_cleverbot.Cleverbot(\n os.environ[\"cleverbot\"],\n session=session,\n context=async_cleverbot.DictContext(),\n )\n self.dagpi = asyncdagpi.Client(os.environ[\"dagpi\"])\n self.google_api = async_cse.Search(os.environ[\"google_search\"], session=session)\n self.translate_api = aiogoogletrans.Translator()\n self.aki = Akinator()\n self.apis = [\"OMDB\", \"tenor\", \"owlbot\", \"gender_api\", \"nasa\"]\n self.api_keys = {api: os.environ[api.lower()] for api in self.apis}\n\n # For the snipe command\n self.snipes = {}\n\n # For tracking commands\n self.command_uses = {}\n\n # For api requests\n self.session = session\n\n super().__init__(\n command_prefix=get_prefix,\n case_insensitive=True,\n intents=intents,\n session=session,\n loop=specified_loop or loop,\n strip_after_prefix=True,\n owner_ids=self.config.owner_ids,\n )\n\n # For before_invoke\n self._before_invoke = self.before_invoke\n # For blacklisted check\n self._checks.append(self.bot_check)", "def init(self) -> None:\n ...", "def __init__(self, network: Network):\n if LOG[\"ExperimentAI\"]:\n print(\"[ExperimentAI] Initializing AI\")\n self.network = network", "async def init(self) -> None:", "async def init(self) -> None:", "def __init__(self):\n # keys and tokens from the Twitter Dev Console\n key = provide_keys('males')\n\n consumer_key = key['consumer_key']\n consumer_secret = key['consumer_secret']\n access_token = key['access_token_key']\n access_token_secret = key['access_token_secret']\n\n # attempt authentication\n\n # create OAuthHandler object\n self.auth = OAuthHandler(consumer_key, consumer_secret)\n\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n\n try:\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n\n except:\n print(\"Error: Authentication Failed\")\n sys.exit(-1)", "def __init__(self, address=('', 50000), authkey=b'tradingbot'):\n self.id = 0\n _ClientBot.__init__(self, address=address, authkey=authkey)\n self.conn_tbm = ConnTradingBotManager(self.id)", "def __init__(self, client, userID, robotID):\r\n client.registerConnection(self)\r\n self._client = client\r\n self._userID = userID\r\n self._robotID = robotID\r\n self._avatar = None\r\n self._view = None\r\n self._namespace = None\r\n self._protocol = None", "async def _init(self, **kwargs):", "def __init__(self, username=None, password=None, user_agent='pyadtpulse'):\n self._session = requests.Session()\n self._user_agent = user_agent\n self._api_version = None\n\n self._sync_timestamp = 0\n self._sync_token = '0-0-0'\n\n self._sites = []\n\n self._api_host = DEFAULT_API_HOST\n \n # authenticate the user\n self._authenticated = False\n self._username = username\n self._password = password\n\n self.login()", "def bot_init():\n client.Console.Say('Hello World')\n global player\n player = client.GetPlayer()", "def __init__(self):\n\n self.host=\"localhost\"\n \"\"\"\n :annotation = \"localhost\":\n defaults to \"localhost\". At this time MumbleClient is ipv4 only\n \"\"\"\n\n self.port=64738\n self.nickname=\"MumblePythonBot\"\n self.SSLOptions=CertificateOptions()\n self.password=None", "def __init__(self):\n super().__init__(\n websocket=Mock(),\n slack_access_token=SLACK_ACCESS,\n github_access_token=GITHUB_ACCESS,\n timezone=pytz.timezone(\"America/New_York\"),\n repos_info=TEST_REPOS_INFO,\n )\n\n self.slack_users = []\n self.messages = []", "def __init__(self, config):\n self._config = config\n\n self.twitter = Twitter(auth=OAuth(\n self._config['twitter']['oauth'][0],\n self._config['twitter']['oauth'][1],\n self._config['twitter']['oauth'][2],\n self._config['twitter']['oauth'][3]\n ))\n\n self.sender = Mailer('smtp.gmail.com', use_tls=True, port=587)\n self.sender.login(self._config['mail']['address'], self._config['mail']['pass'])", "def __init__(self, username):\n self.username = username\n\n self._requests = requests.Session()\n self._requests.headers[\"User-Agent\"] = constants.USER_AGENT\n self._requests.headers[\"Referer\"] = constants.FA_ROOT + \"/\"\n\n self._rate_limit_start = datetime.datetime.now()\n self._rate_limit_count = 0\n\n self._folders = {}\n self._submissions = {}\n\n self._gallery = None\n self._scraps = None\n self._root_folders = None", "def initialize(self) -> None:\n pass", "def initialize(self, *args, **kwargs):\n pass", "def init_bot(self):\n dispatcher = self.updater.dispatcher\n\n dispatcher.add_handler(CommandHandler(\"start\", self.on_bot_start))\n dispatcher.add_handler(CommandHandler(\"help\", self.on_bot_help))\n dispatcher.add_handler(CommandHandler(\"about\", self.on_bot_about))\n dispatcher.add_handler(CommandHandler(\"vreausaajut\", self.on_bot_offer_to_help))\n dispatcher.add_handler(CommandHandler(\"status\", self.on_status))\n dispatcher.add_handler(CommandHandler(\"Da\", self.on_accept))\n dispatcher.add_handler(CommandHandler(\"Nu\", self.on_reject))\n\n dispatcher.add_handler(CallbackQueryHandler(self.negotiate_time, pattern=\"^eta.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_dispatch, pattern=\"^caution.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_handle, pattern=\"^handle.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wellbeing, pattern=\"^state.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_symptom, pattern=\"^symptom.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wouldyou, pattern=\"^wouldyou.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_further, pattern=\"^further.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_activities, pattern=\"^assist.*\"))\n\n dispatcher.add_handler(MessageHandler(Filters.photo, self.on_photo))\n dispatcher.add_handler(MessageHandler(Filters.contact, self.on_contact))\n dispatcher.add_handler(MessageHandler(Filters.text, self.on_text_message))\n dispatcher.add_error_handler(self.on_bot_error)", "def __init__(self, **kwargs):\n\n session = vk_api.VkApi(**kwargs)\n try:\n session.auth(token_only=True)\n except vk_api.AuthError as error_msg:\n print(error_msg)\n raise\n self.api = session.get_api()", "def initialize(self, **kwargs: Any) -> None:\n pass", "def __init__(self, client_id, client_secret):\n self.client_id = client_id\n self.client_secret = client_secret\n self.token = None\n self.request_time = None\n self._initialized = False", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def _setup_connection(cls):\n try:\n cls.imdb_access = imdb.IMDb()\n except imdb.IMDbError, err:\n print \"Problem with connectivity to imdb.com due to %s \" \\\n % (err)", "def __init__(self, email, password):\n _logger.debug('Entered Simplenote()')\n self.base_url = 'https://simple-note.appspot.com/api2/'\n self.email = email\n self.password = password\n self.authtok = ''\n self.api_count = 0", "def instagram(self):\n return self._instagram", "def __init__(self, reddit):\n self._reddit = reddit", "def __init__(self):\n self.load_objects()\n super(TwitterMain,self).__init__()", "async def initialize(self):", "def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n super().__init__(*args, **kwargs)", "def __init__(self, poolbot):\n self.poolbot = poolbot", "def __init__(self):\n\n\t\tself.connection = self.get_connection()", "def __init__(self) -> None:\n self._public_id = 'daf1fbca87e94c9db377c98570e32ece'\n self._secret_id = '1a674398d1bb44859ccaa4488df1aaa9'\n self._redirect_uri = 'https://pass-post.netlify.app'", "def _initialize(self):\n self.send_init_command()", "def __init__(self):\n Session.SESSIONS_COUNT += 1\n self.channelCount = 0\n self._channels = []\n self._error = ''\n self._client = None", "def __init__(self):\r\n # keys and tokens from the Twitter Dev Console\r\n consumer_key = 'e1I0CSqgSOGxhH940cey1PR50'\r\n consumer_secret = 'APZE7kT2MgJsledQszLbNVcZZEhCUDX3NKAseXTjnsEcggUAkf'\r\n access_token = '876294238144786432-Q9PfwxPd4T7OdYO9hXiFyVDO38Q8jZV'\r\n access_token_secret = 'e0RhKgnLLyHnEOrWS92Tw0pKv5hWrN3chjp4Azm4NayOG'\r\n\r\n # clean tween regular expression\r\n self.pattern = re.compile('(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+://\\S+)')\r\n\r\n # attempt authentication\r\n try:\r\n # create OAuthHandler object\r\n self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n # set access token and secret\r\n self.auth.set_access_token(access_token, access_token_secret)\r\n # create tweepy API object to fetch tweets\r\n self.api = tweepy.API(self.auth)\r\n logging.info(self.api.rate_limit_status()['resources']['search'])\r\n except:\r\n logging.error(\"Error: Authentication Failed\")", "def __init__(self):\n\n\t\tself.account_sid = os.environ['TWILIO_ACCOUNT_SID']\n\t\tself.auth_token = os.environ['TWILIO_AUTH_TOKEN']\n\t\tself.twilio_phone_number = os.environ['TWILIO_PHONE_NUMBER']\n\t\tself.client = Client(self.account_sid, self.auth_token)\n\n\t\tself.call_domain = 'http://twimlets.com/echo?Twiml='", "def setUp(self):\n self.bot = helpers.MockBot()\n self.bot.api_client.get = unittest.mock.AsyncMock()\n self.cog = information.Information(self.bot)\n self.member = helpers.MockMember(id=1234)", "def __init__(self):\n\n # TODO: Add login and data grab logic", "def __init__(self, api_key, api_secret, base_url=settings.TWITTER_API_URL):\n self.api_key = api_key\n self.api_secret = api_secret\n self.base_url = base_url\n self.bearer_token = None\n self.__auth = None\n self.get_bearer_token()\n self.set_requests_auth()\n self.session = requests_retry_session()", "def __init__(self, *args):\n _snap.TUNGraphNodeI_swiginit(self, _snap.new_TUNGraphNodeI(*args))", "async def initialize(self):\r\n self.access_token = await async_get_value(SPOTIFY_ACCESS_TOKEN)\r\n self.refresh_token = await async_get_value(SPOTIFY_REFRESH_TOKEN)\r\n self.should_poll = await async_get_value(SPOTIFY_SHOULD_POLL)\r\n request_code = self.get_currently_playing().status_code\r\n if request_code == requests.codes.ok or request_code == requests.codes.no_content:\r\n self.start_polling_and_refresh()\r\n return\r\n\r\n # Go through the oauth flow.\r\n self.auth_thread = StoppableThread(target=self.check_and_test_auth)\r\n self.auth_thread.start()\r\n return", "def default_client():\n\n credential_manager = CredentialManager()\n\n current_directory = os.path.abspath(inspect.getfile(inspect.currentframe()))\n save_file_path = os.path.dirname(current_directory)\n\n insta_username, insta_password = credential_manager.get_account('Instagram')\n \n mongo_client_host = InstagramAnalyzer.default_mongodb(credential_manager)\n\n\n client = InstagramAnalyzer(insta_username, insta_password,\n mongo_client_host, save_file_path)\n\n return client", "def __init__(self):\n\n self._session = requests.Session()", "def __init__(self, bot: commands.Bot):\n\n super().__init__(bot)\n self.current_prompt = ''", "def __init__(self, *kwargs):\n self.session = requests.Session()\n self.config_path = os.path.join(\n os.path.dirname(__file__), 'config.json')\n self.load_config()\n if self.application_token == '':\n self.set_application_token()\n self.token = self.get_token()\n self.get_settings()", "def __init__(self, **kwargs):\n self._username = kwargs.get('username', current_app.config.get('WORDAI_API_EMAIL', None))\n self._password = kwargs.get('password', current_app.config.get('WORDAI_API_PASSWORD', None))\n self._hash = kwargs.get('hash', current_app.config.get('WORDAI_API_KEY', None))", "def _init(self):\n pass", "def setup(self):\n\n self.parser = GingerIt()", "def __init__(self, auto_redirect=True):\n self.redirect = auto_redirect\n self.analytics_interface = AnalyticsInterface(settings.KONTAGENT_API_SERVER,\n settings.KONTAGENT_API_KEY)", "def __init__(self):\n self._discovered_adv: SwitchBotAdvertisement | None = None\n self._discovered_advs: dict[str, SwitchBotAdvertisement] = {}", "def __init__(self):\n #Connect to the twilio API\n with open(TWILIO_CREDENTIALS_PATH, \"r\") as f:\n self.config = {\"account_sid\": f.readline().strip(),\n \"auth_token\": f.readline().strip(),\n \"sender_number\": f.readline().strip()\n }\n\n if DEBUG:\n print(\"[DEBUG] CONFIG:{}\".format(self.config))\n\n #Connect to the firebase\n self.firebase = Firebase()\n self.firebase.connect()\n\n #Connect to the twilio API\n self.client = Client(self.config['account_sid'],self.config['auth_token'])\n self.running = True\n self.last_message_timing = datetime.datetime.now()", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def __init__(self,\n client_id,\n client_secret):\n self.__client_id = client_id\n self.__client_secret = client_secret", "def initialize(self):\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.initialize, just copy the code over\n\n # --------- END YOUR CODE ----------\n pass", "def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])", "def __init__(self, settings):\n \n # storing otmbs settings\n self.settings = settings", "def __init__(self, TOKEN: str) -> None:\n super(TelegramBot, self).__init__()\n self.TOKEN = TOKEN\n self.URL = f\"https://api.telegram.org/bot{TOKEN}/\"\n logging.debug(\"Telegram Bot ready\")", "def init(self) -> None:", "def __init__(self, username):\n self.spotify = spotipy.Spotify(simple_auth_token(username))", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass" ]
[ "0.7040264", "0.6685926", "0.66698956", "0.6649861", "0.6649861", "0.6602354", "0.6402114", "0.6402114", "0.62316155", "0.614077", "0.6138122", "0.60492367", "0.60134923", "0.5993596", "0.59222096", "0.59110945", "0.5900702", "0.58524567", "0.5842189", "0.5841665", "0.58260006", "0.5819154", "0.5818025", "0.57928646", "0.57815546", "0.57677644", "0.5766395", "0.5744598", "0.5727863", "0.57224864", "0.5705709", "0.5702105", "0.5702105", "0.5659269", "0.5658016", "0.5651361", "0.5650908", "0.5650059", "0.563391", "0.56317043", "0.56312793", "0.5609235", "0.56075525", "0.5605794", "0.5604203", "0.56039065", "0.5587123", "0.55760896", "0.5561028", "0.5556905", "0.5556905", "0.5556905", "0.5556905", "0.5556905", "0.5556905", "0.5556905", "0.5556905", "0.5546655", "0.5545979", "0.55453324", "0.5543128", "0.55362487", "0.5534966", "0.55320615", "0.5531181", "0.5531095", "0.55285925", "0.5524645", "0.55196214", "0.5502091", "0.550208", "0.5490302", "0.54852384", "0.5472697", "0.5469489", "0.5462217", "0.5454386", "0.5452111", "0.5450501", "0.5450336", "0.5439483", "0.54392946", "0.5438777", "0.54302746", "0.54257214", "0.54247135", "0.542305", "0.542305", "0.542305", "0.54211795", "0.54159206", "0.5415796", "0.54151493", "0.5412098", "0.5410545", "0.54094553", "0.5404253", "0.5404253", "0.5404253", "0.5404253" ]
0.75979745
0
Method allows user to log in through the web
def login(self): self.driver.get(self.login) PAUSE = 2 time.sleep(PAUSE) user_input = self.driver.find_element_by_name('username') pass_input = self.driver.find_element_by_name('password') login_button = self.driver.find_elements_by_xpath("//div[contains(text(),'Log In')]")[0] user_input.send_keys(self.username) pass_input.send_keys(self.password) login_button.click() time.sleep(PAUSE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login(self):", "def login():", "def login():", "def login(self):\n\t\treturn", "def log_in(self):\n\n # Get login page.\n self.get_endpoint(endpoint=self.config['paths']['login'])\n\n # Post log-in data.\n email_form = self.browser.find_element_by_xpath(\"//input[@id='email']\")\n pw_form = self.browser.find_element_by_xpath(\"//input[@id='password']\")\n email_form.send_keys(self.credentials['email'])\n pw_form.send_keys(self.credentials['password'])\n\n # Initial log-in returns /private endpoint.\n self.browser.find_element_by_xpath(\"//input[@type='submit']\").click()", "def do_login(self, backend, user):", "def login_user():\n pass", "def login(self):\n\t\tbot = self.bot\n\t\tbot.get(URL)\n\t\ttime.sleep(2)\n\t\tsign_in = bot.find_element_by_class_name(\"nav__button-secondary\").click()\n\t\ttime.sleep(2)\n\t\temail = bot.find_element_by_id(\"username\")\n\t\temail.send_keys(self.username)\n\t\ttime.sleep(2)\n\t\tpassword = bot.find_element_by_id(\"password\")\n\t\tpassword.send_keys(self.password)\n\t\ttime.sleep(2)\n\t\tsign_in = bot.find_element_by_class_name(\"btn__primary--large.from__button--floating\").click()", "def login(self):\r\n\r\n # Open browser with the login URL\r\n self.browser.open(self.config[\"base_url\"] + \"login\")\r\n\r\n # Select the login form\r\n self.browser.select_form('form[action=\"/login/\"]')\r\n\r\n # Fill the login form.\r\n self.browser[\"email\"] = self.config[\"email\"]\r\n self.browser[\"password\"] = self.config[\"password\"]\r\n\r\n # Submit form\r\n self.browser.submit_selected()", "def login():\n pass", "def login():\n login_page = Login()\n login_page.login_main_page()", "def login(self):\n self.open(base_url + '/login')\n self.type(\"#email\", test_user.email)\n self.type(\"#password\", test_user.password)\n self.click('input[type=\"submit\"]')", "def log_in(self):\n\t\tpass", "def logUserIn(self):\n self.browser.get(self.live_server_url)\n self.browser.get(self.live_server_url + reverse('registration:auth_login').rstrip())\n self.assertIn(\n 'login',\n self.browser.current_url\n )\n self.browser.find_element_by_name('username').send_keys(TEST_USER['username'])\n self.browser.find_element_by_id('id_password').send_keys(TEST_USER['password'])\n self.browser.find_element_by_id('submit-login').click()", "def log_in(self):\n if self.is_logged_in():\n return\n\n req_html = request.urlopen(\"https://www.linkedin.com/uas/login\").read()\n soup = BeautifulSoup(req_html)\n csrf = soup.find(id=\"loginCsrfParam-login\")['value']\n\n login_data = parse.urlencode({\n 'session_key': self.username,\n 'session_password': self.password,\n 'loginCsrfParam': csrf\n })\n\n data = login_data.encode()\n\n password_manager = request.HTTPPasswordMgrWithDefaultRealm()\n password_manager.add_password(None, \"https://www.linkedin.com/\", self.username, self.password)\n\n Registration.opener.add_handler(request.HTTPBasicAuthHandler(password_manager))\n\n response = request.urlopen(\"https://www.linkedin.com/uas/login-submit\", data)\n res_html = BeautifulSoup(response.read())\n\n Registration.jar.save(Registration.cookie_filename)\n\n return response", "def logIn(self, username='admin', password='password'):\n # Open webbrowser, go to admin page\n self.browser.get(self.live_server_url + '/accounts/login/')\n\n # Enter username in log-in form\n username_field = self.browser.find_element_by_name('username')\n username_field.send_keys(username)\n\n # Enter password\n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys('password')\n\n # Submit\n password_field.send_keys(Keys.RETURN)", "def log_in(self, ctx: Context):\n email = json.loads(ctx.users)['username']\n password = json.loads(ctx.users)['password']\n InputFunctions.send_keys_to_element_by_name(\n self.ctx, self.locators, \"email_input\", email\n )\n InputFunctions.send_keys_to_element_by_name(\n self.ctx, self.locators, \"password_input\", password\n )\n ClickFunctions.click_element_by_name(ctx, self.locators, \"login_button\")\n ClickFunctions.click_element_by_name(ctx, self.locators, \"back_to_content\")", "def _login(self, *args, **kwargs):\n pass", "def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res", "def login(self):\n #raise NotImplementedError(\"This method must be overridden\")", "def log_in(self):\n print('-=' * 12 + \" Log in \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n self._user = self.auth.log_in(mob_num, password)\n if self._user:\n print(\"you are logged in, Welcome '{}'\".format(self._user.username))\n self.homepage()\n else:\n print(\"Mobile number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.log_in, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)", "def i_am_in_the_login_page(browser):", "def doLogin(self):\n\t\tlogin_data = urllib.urlencode({\n\t\t\t'operatorName' : self.username,\n\t\t\t'password' : self.password,\n\t\t\t'submit' : 'Iniciar+sesi%C3%B3n',\n\t\t})\n\n\t\tresponse = self.opener.open(\"http://172.16.0.2/tdserver/login_deal.jsp\", login_data)\t\t### deberia devolver verdadero o falso segun se logueo o no", "def login_user(self):\r\n self.client.login(username=self.user.username, password=\"password\")", "def log_in():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n if PLAN.login_user(username, password):\n session['name'] = username\n flash(\"Login success ...\")\n return redirect(url_for('index'))\n flash(\"Login failed ...\")\n return render_template('login.html')\n return render_template('login.html')", "def get():\n return login()", "def login(self):\n\n self.__login_if_required()", "def Login(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def login(self):\n self.driver.get(f'{self.base_url}/signin')\n\n # Fill username and password\n enter_username = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'email')))\n enter_username.send_keys(self.username)\n enter_password = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'password')))\n enter_password.send_keys(self.password)\n\n # Press the Log In Button\n self.driver.find_element_by_xpath('//*[@id=\"root\"]/div/div[3]/div/div/div/div/div[2]/div/form/div/div[2]/button').click()\n\n # Wait for the page to load (5 seconds)\n sleep(5)", "def login(self):\n r = self._login_token()", "def login_bot(self):\n pass", "def login(self):\n driver = self.selenium_test.driver\n driver.get(self.selenium_test.get_server_url())\n self.selenium_test.wait_fn(self.preenche_username)\n driver.find_element_by_id('btnlogin').click()\n self.selenium_test.wait_to_be_logged_in()", "def login(self):\n url = 'https://ngb.to/login.php?do=login'\n\n params = {'do': 'login'}\n payload = {'vb_login_username': self.username,\n 'vb_login_password': self.password,\n 'url': \"index.php\",\n 'do': \"login\",\n 'vb_login_md5password': \"\",\n 'vb_login_md5password_utf': \"\",\n 's': \"\",\n 'securitytoken': \"guest\",\n 'cookieuser': \"1\"}\n\n self.session.post(url, data=payload, params=params)", "def loginAsManager(self):\n self.browser.open('http://nohost/plone/')\n self.browser.getLink('Log in').click()\n self.browser.getControl('Login Name').value = 'root'\n self.browser.getControl('Password').value = 'secret'\n self.browser.getControl('Log in').click()", "def einloggen(self):\n \n self.c.login(self.username.text(), self.password.text(), \"1\")", "def _NavigateWebviewLogin(self, username, password, wait_for_close):\n self._NavigateWebviewEntry('identifierId', username)\n self._ClickPrimaryActionButton()\n self._NavigateWebviewEntry('password', password)\n self._ClickPrimaryActionButton()\n if wait_for_close:\n py_utils.WaitFor(lambda: not self._GaiaWebviewContext(), 60)", "def login(self):\n # Enter login credentials\n WebDriverWait(self.driver, 120).until(\n EC.element_to_be_clickable(\n (By.ID, \"session_key-login\")\n )\n )\n elem = self.driver.find_element_by_id(\"session_key-login\")\n elem.send_keys(self.username)\n elem = self.driver.find_element_by_id(\"session_password-login\")\n elem.send_keys(self.password)\n # Enter credentials with Keys.RETURN\n elem.send_keys(Keys.RETURN)\n # Wait a few seconds for the page to load\n time.sleep(3)", "def click_login(self):\n self.login.click()\n return self.login", "def formlogin(self, username, password, use_current_url=False):\n if not use_current_url:\n self.open(self.ADDRESS_BOOK_DEFAULT_URL)\n self.getControl('User Name').value = username\n self.getControl('Password').value = password\n self.getControl('Log in').click()\n assert 'You have been logged-in successfully.' == self.message, \\\n 'Not successfully logged in: message={0.message!r}'.format(self)\n return self", "def do_login(self):\n self.content = self._login()\n if self.with_tags:\n self.rest_content = self._login_vapi()", "def do_login_login():\n print(inspect.stack()[1][3])\n print(request.form)\n query = select([User]).where(and_(User.columns.email == request.form['email'],User.columns.password==request.form['password'] ))\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if ResultSet:\n session['logged_in'] = True\n else:\n flash('wrong password!')\n # return str(get_flashed_messages())\n return home(result)", "def login(self):\n \n self.br.open(\"http://kanji.koohii.com/login\")\n self.br.form = list(self.br.forms())[0]\n self.br[\"username\"] = USER\n self.br[\"password\"] = PASSWORD\n my_response = self.br.submit()\n print \"Login successful\"", "def login(self):\n url = self._root + self._routes[\"login\"]\n self.r = self.reqsession.get(url) \n if self.r.url == 'https://console.zerodha.com/dashboard':\n cookies = self.reqsession.cookies.get_dict('console.zerodha.com')\n self.console_session = cookies['session']\n self.public_token = self.reqsession.cookies['public_token']\n return True\n else:\n raise Exception(\"Login failed or Kite session expired\")", "def login(self):\n self.logger.info(\"Logging in...\")\n url = 'https://chaturbate.com/auth/login/'\n result = self.req.get(url)\n\n soup = BeautifulSoup(result.text, \"html.parser\")\n csrf = soup.find('input', {'name': 'csrfmiddlewaretoken'}).get('value')\n\n result = self.req.post(url,\n data={\n 'username': self.username,\n 'password': self.password,\n 'csrfmiddlewaretoken': csrf\n },\n cookies=result.cookies,\n headers={'Referer': url})\n\n if self.is_logged(result.text) is False:\n self.logger.warning(\"Could not login\")\n return False\n else:\n return True", "def login(self):\n identity = request.environ.get('repoze.who.identity')\n came_from = str(request.GET.get('came_from', '')) or \\\n url('/')\n if identity:\n redirect(url(came_from))\n else:\n c.came_from = came_from\n c.login_counter = request.environ['repoze.who.logins'] + 1\n return render('/forms/login.mako')", "def login(self):\r\n \r\n # Get the csrf token from the main URL\r\n csrf = self.extract_csrf(API.url_login)\r\n \r\n # Construnct the payload\r\n payload = self.cfg['payload']['login'][0]\r\n payload['csrfmiddlewaretoken'] = csrf\r\n\r\n # Test the entry with it's json schema\r\n check.check_entry(path='schemas/login.json', test=payload)\r\n\r\n # Login request \r\n requests.post(API.url_login, payload, headers={'Referer' : API.url_login})", "def login(self):\n logging.debug(\"login called\")\n\n # Apply settings\n self.localisationsettings.apply_to_upcoming_session()\n self.admin_setting.apply_to_upcoming_session()\n self.macspoof_setting.apply_to_upcoming_session()\n self.network_setting.apply_to_upcoming_session()\n\n self.mainwindow.hide()\n self.gdmclient.do_login()", "def login_menu(self):\n print(\"\\nPlease enter your email and password\")\n email = self.validate_email()\n password = self.validate_password()\n self.authenticate_user(email, password)", "def login_action(login_page, request, driver):\n login_page.login(request.config.getoption(\"--username\"), request.config.getoption(\"--password\"))", "def login(self):\n self.client.login(username=self.user.username, password='test')", "def login(self, username, password):\n return self.app.post('/login', data = dict(\n username = username,\n password = password\n ), follow_redirects = True)", "def log_in(username='robot', password='test', email='robot@edx.org', name=\"Robot\"):\r\n url = '/auto_auth'\r\n params = { 'username': username, 'password': password, 'email': email, 'full_name': name }\r\n url += \"?\" + urllib.urlencode(params)\r\n world.visit(url)\r\n\r\n # Save the user info in the world scenario_dict for use in the tests\r\n user = User.objects.get(username=username)\r\n world.scenario_dict['USER'] = user", "def plans_login(self, username='', password=''):\n # the provided username and password ONLY get checked\n # by the plans server if our cookie is expired.\n # hence, if we've logged in recently, this will return True even\n # if un/pw are not provided or are otherwise bad.\n login_info = {'username': username,\n 'password': password,\n 'submit': 'Login'}\n response = self._get_page('index.php', post=login_info)\n # if login is successful, we'll be redirected to home\n success = response.url[-9:] == '/home.php'\n if success:\n self.parser.feed(response.text) # parse out username\n self.username = self.parser.username\n return success", "def do_login(request):\n distinct_id = request.session.pop('distinct_id')\n user = User.objects.get(id=distinct_id)\n login(request, user)\n return redirect_to_user_settings()", "def do_login(self, password):\n # Creating JSON string with authentication credentails.\n in_data = ('{{ \"username\":\"{username}\",'\n '\"password\":\"{password}\" }}'\n ).format(\n username=self.pub_user,\n password=password\n )\n\n url = self.base_url + \"/oasis/login\"\n response = self.do_request(url, in_data)\n json_response = json.loads(response.content)\n\n if json_response[\"success\"] == False:\n print(\"Invalid user id or password\")\n else:\n self.cookies = dict(sessionid=response.cookies['sessionid'])\n print(\"You are logged into Mid-tier\")\n\n logger.info( 'Log in response ' + str(response.content))", "def login(self):\n return self.client.login(username='Georgie', password='12345678')", "def login(self):\n try:\n self.driver = webdriver.Ie()\n self.driver.maximize_window()\n self.driver.get(self.url_login)\n self.driver.find_element_by_id('txtName').send_keys(self.username)\n self.driver.find_element_by_id('txtPassword').send_keys(self.password)\n self.driver.execute_script('frmLogin.action = \"login.aspx?action=login\";frmLogin.submit();')\n # Add/Edit User\n self.wait = WebDriverWait(self.driver, 10)\n self.wait.until(EC.element_to_be_clickable((By.ID, 'a151'))) # Add edit user button\n time.sleep(1)\n return\n except Exception:\n raise", "def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n\n # User class login function\n u = User.login(username, password)\n if u:\n self.login(u) # BlogHandler login function\n self.redirect('/blog')\n else:\n msg = 'Invalid login'\n self.render('login-form.html', error = msg)", "def login(self):\n self.button_login.click()\n return dashboard.DashboardPage(self._driver)", "def login_request(self):\n return self.make_request('login/request', base_uri=Base.URI, method='POST')", "def login(self, username, password):\n\t\turl = \"https://habitica.com/api/v3/user/auth/local/login\"\n\t\tpayload = {\"username\": username, \"password\": password}\n\t\treturn(postUrl(url, self.credentials, payload))", "def click_login_button(self):", "def do_login():\n\n isTeacher = False\n\n # check if this_user is admin or normal user\n this_user = User.query.filter_by(username=request.form['username']).first()\n \n # is this_user is not student or admin then check teacher table\n if this_user is None:\n this_user = Teacher.query.filter_by(username=request.form['username']).first()\n isTeacher = True\n\n # if this_user is still none -> invalid user\n if this_user is not None:\n if this_user.password == request.form[\"password\"]:\n session['authenticated'] = True\n session['username'] = this_user.username\n session['name'] = this_user.name\n session['isTeacher'] = isTeacher\n if session['username'] == \"admin\":\n session['wasAt'] = \"manageusers\"\n try:\n session['cpi'] = this_user.cpi\n session['grp_size'] = this_user.group_size\n except:\n pass\n else:\n flash(\"Incorrect Password, Please Try Again\") \n else:\n flash(\"Invalid Username, Please Try Again\")\n return home()", "def login(self):\n self.driver.find_element(*BaseLocators.PRIMARY_BUTTON).click()", "def login():\n if request.method=='GET':\n # get info and render\n return render_template('login.html')\n else:\n # auth\n username = request.form.get('username')\n password = request.form.get('password')\n\n users = DATABASE[\"users\"]\n\n if username in users:\n if password == users[username][\"Password\"]:\n # success, set session\n session['Name'] = username\n session['Type'] = users[username]['Type']\n\n # get info and redirect\n return redirect(url_for('manage_resources', user=username), 302)\n return Response(\"Incorrect Login Details\", 401)\n\n return \"Incorrect login credentials\"", "def log_in():\n form = LoginForm(request.form)\n if request.method == 'POST' and form.validate():\n if form.username.data != current_app.config['USERNAME']:\n flash('Invalid username.')\n elif form.password.data != current_app.config['PASSWORD']:\n flash('Invalid password.')\n else:\n session['logged_in'] = True\n flash('You were logged in.')\n\n return redirect(url_for('blog.show_posts'))\n\n return render_template('auth/log_in.html', form=form)", "def on_login(self, username):", "def on_login(self, username):", "def login():\n form = LoginForm()\n if request.method == \"GET\":\n return render_template('login.html', title='Sign In', form=form)\n if request.method == \"POST\":\n if 'loggedin' in session:\n return redirect(url_for('home'))\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n account = db.check_item(\"username\", username)\n if account is None:\n flash('Invalid username or password')\n return redirect(url_for('login'))\n else:\n if check_password_hash(str(account['password_hash']), password):\n session['loggedin'] = True\n session['username'] = account['username']\n session['admin_auth'] = account['admin_auth']\n flash('Login successfully!')\n return redirect(url_for('home'))\n flash('Invalid username or password')\n return redirect(url_for('login'))\n else:\n return redirect(url_for('login'))", "def login():\n # Initialise login form\n form = UserLoginForm()\n # Validate and process form data\n if form.validate_on_submit():\n # Get form data\n username = form.username.data\n password = form.password.data\n # Check if username and password is valid\n valid, userID = gdb.verifyuser(username, password)\n if(valid):\n user = gdb.getuserbyid(userID)\n login_user(user)\n return redirect(url_for('main.dashboard'))\n else:\n flash(\"Invalid username or password.\", category=\"error\")\n return redirect(url_for('main.login'))\n # Render template\n return render_template('login.html', form=form)", "def perform_login(self, user_name, user_pass):\n if self.api_type == 'real':\n self.input_user_name(user_name)\n self.input_user_pass(user_pass)\n self.click_login_button()", "def test_login(self):\n url_extend = 'user_auth/login/'\n self.browser.get(self.url + url_extend)\n\n # enter the username and password.\n username_field = self.browser.find_element_by_name('user_name')\n username_field.send_keys('user4')\n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys('user')\n\n # click login button.\n # get the first input button under the first form in login page.\n login_button = self.browser.find_element_by_xpath(\"//form[1]/fieldset[1]/input[@type='submit']\")\n try:\n login_button.click()\n except:\n raise Exception(\"Login Error!\")", "def perform_login(browser: RoboBrowser, username: str, password: str):\n browser.open(URL_LOGIN)\n form = browser.get_form()\n form['kennung'] = username\n form['passwort'] = password\n browser.submit_form(form)", "def login(self, username, password):\n return self.app.post('/login', data=dict(\n username=username,\n password=password\n ), follow_redirects=True)", "def admin_login(self, username, password, login_url=\"/admin/\"):\n from selenium.webdriver.common.by import By\n\n self.selenium.get(\"%s%s\" % (self.live_server_url, login_url))\n username_input = self.selenium.find_element(By.NAME, \"username\")\n username_input.send_keys(username)\n password_input = self.selenium.find_element(By.NAME, \"password\")\n password_input.send_keys(password)\n login_text = _(\"Log in\")\n with self.wait_page_loaded():\n self.selenium.find_element(\n By.XPATH, '//input[@value=\"%s\"]' % login_text\n ).click()", "def test_aio_can_login_to_web_portal(aio):", "def login(self):\n # the login url is just api, not api2\n url = 'https://simple-note.appspot.com/api/login'\n query = {'email': self.email, 'password': self.password}\n data = base64.b64encode(urllib.urlencode(query))\n try:\n fh = urllib2.urlopen(url, data)\n self.authtok = fh.read()\n except urllib2.HTTPError, e:\n # Received a non 2xx status code\n raise SimplenoteError('http error: {}'.format(e.code))\n except urllib2.URLError, e:\n # Non http error, like network issue\n raise SimplenoteError('url error: {}'.format(e.reason))\n fh.close()\n return True", "def go_to_login_page(self):\n WebDriverWait(self.driver, 15).until(\n EC.visibility_of_element_located(\n (By.XPATH, '//*[@class=\"ut-login-content\"]//button'))\n )\n print(\"Logging in...\")\n\n self.sleep_approx(random.randint(5, 10))\n self.driver.find_element(\n By.XPATH, '//*[@class=\"ut-login-content\"]//button').click()\n\n WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.ID, 'email'))\n )", "def post(self):\n\n username = self.request.get('username').lower()\n pwd = self.request.get('pwd')\n remember = self.request.get('remember')\n\n user = User.login(username, pwd) # class\n if user:\n self.login(user, remember) # cookie\n self.redirect('/blog')\n else:\n msg = 'Invalid login'\n self.render(\"login.html\", error=msg)", "def login(self):\n login_form = {\"kid\": \"\",\n \"uni\": self.server,\n \"login\": self.username,\n \"pass\": self.password}\n url = \"https://%s.ogame.gameforge.com/main/login\" % self.country_code\n result = self.session.post(url, data=login_form)", "def try_login(self, username, password):\n path = reverse('xhr_auth_login')\n self.client.logout()\n return self.client.post(path, {'username': username, 'password': password})", "def login(self, url=None):\n logger.info(u\"Attempting PACER site login\")\n\n # Clear any remaining cookies. This is important because sometimes we\n # want to login before an old session has entirely died. One example of\n # when we do that is when we get the page saying that \"This page will\n # expire in...[so many minutes].\" When we see that we just log in\n # fresh and try again.\n self.cookies.clear()\n if url is None:\n url = self.LOGIN_URL\n\n # Load the page in order to get the ViewState value from the HTML\n load_page_r = self.get(\n url,\n headers={\"User-Agent\": \"Juriscraper\"},\n auto_login=False,\n verify=False,\n timeout=60,\n )\n\n login_post_r = self.post(\n url,\n headers={\"User-Agent\": \"Juriscraper\"},\n verify=False,\n timeout=60,\n auto_login=False,\n data={\n \"javax.faces.partial.ajax\": \"true\",\n \"javax.faces.partial.execute\": \"@all\",\n \"javax.faces.source\": \"loginForm:fbtnLogin\",\n \"javax.faces.partial.render\": \"pscLoginPanel+loginForm+redactionConfirmation+popupMsgId\",\n \"javax.faces.ViewState\": self._get_view_state(load_page_r),\n \"loginForm:courtId_input\": \"E_ALMDC\",\n \"loginForm:courtId_focus\": \"\",\n \"loginForm:fbtnLogin\": \"loginForm:fbtnLogin\",\n \"loginForm:loginName\": self.username,\n \"loginForm:password\": self.password,\n \"loginForm:clientCode\": \"\",\n \"loginForm\": \"loginForm\",\n },\n )\n if u\"Invalid username or password\" in login_post_r.text:\n raise PacerLoginException(\"Invalid username/password\")\n if u\"Username must be at least 6 characters\" in login_post_r.text:\n raise PacerLoginException(\n \"Username must be at least six \" \"characters\"\n )\n if u\"Password must be at least 8 characters\" in login_post_r.text:\n raise PacerLoginException(\n \"Password must be at least eight \" \"characters\"\n )\n if u\"timeout error\" in login_post_r.text:\n raise PacerLoginException(\"Timeout\")\n\n if not self.cookies.get(\"PacerSession\"):\n logger.info(\n \"Did not get cookies from first log in POSTs. \"\n \"Assuming this is a filing user and doing two more \"\n \"POSTs.\"\n )\n reg_msg_r = self.post(\n url,\n headers={\"User-Agent\": \"Juriscraper\"},\n verify=False,\n timeout=60,\n auto_login=False,\n data={\n \"javax.faces.partial.ajax\": \"true\",\n \"javax.faces.source\": \"regmsg:chkRedact\",\n \"javax.faces.partial.execute\": \"regmsg:chkRedact\",\n \"javax.faces.partial.render\": \"regmsg:bpmConfirm\",\n \"javax.faces.partial.event\": \"change\",\n \"javax.faces.behavior.event\": \"valueChange\",\n \"javax.faces.ViewState\": self._get_xml_view_state(\n login_post_r\n ),\n \"regmsg\": \"regmsg\",\n \"regmsg:chkRedact_input\": \"on\",\n },\n )\n # The box is now checked. Submit the form to say so.\n self.post(\n url,\n headers={\"User-Agent\": \"Juriscraper\"},\n verify=False,\n timeout=60,\n auto_login=False,\n data={\n \"javax.faces.partial.ajax\": \"true\",\n \"javax.faces.source\": \"regmsg:bpmConfirm\",\n \"javax.faces.partial.execute\": \"@all\",\n \"javax.faces.ViewState\": self._get_xml_view_state(\n reg_msg_r\n ),\n \"regmsg\": \"regmsg\",\n \"regmsg:chkRedact_input\": \"on\",\n \"regmsg:bpmConfirm\": \"regmsg:bpmConfirm\",\n \"dialogName\": \"redactionDlg\",\n },\n )\n\n if not self.cookies.get(\"PacerSession\") and not self.cookies.get(\n \"NextGenCSO\"\n ):\n raise PacerLoginException(\n \"Did not get PacerSession and NextGenCSO \"\n \"cookies when attempting PACER login.\"\n )\n\n self.get(self.INDEX, auto_login=False)\n logger.info(u\"New PACER session established.\")", "def login(self):\n if self._cookie_cached(self.login_email):\n self.cookie_login(self.login_email)\n else:\n self.new_login(self.login_email, self.login_pass)", "def login(self):\n # Get instagram login page\n self.driver.get('{}/accounts/login/'.format(self.base_url))\n\n # find username and password inputs by name\n time.sleep(1.5)\n self.driver.find_element_by_name('username').send_keys(self.username)\n self.driver.find_element_by_name('password').send_keys(self.password)\n\n # Scroll the login mobile button into view\n # self.driver.execute_script(\"window.scrollTo(0, 50)\")\n scroll_helper(100, self.driver)\n\n # find login button by xpath\n self.driver.find_element_by_xpath(\n \"//div[contains(text(), 'Log In')]\").click()\n\n # Instagram throws a lot of notifications when logging in. This will redirect to clear some\n # and cancel the others\n time.sleep(4)\n self.driver.get(self.base_url)\n time.sleep(1)\n self.driver.find_element_by_xpath(\n \"//button[contains(text(), 'Cancel')]\").click()\n time.sleep(2)", "def login():\n if session['state'] != request.args['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n authorization_response = request.url\n FLOW.fetch_token(authorization_response=authorization_response)\n auth_session = FLOW.authorized_session()\n userinfo = auth_session.get(USERINFO_URL).json()\n session['userinfo'] = {\n 'name': userinfo['name'],\n 'email': userinfo['email']}\n sqlsession = SQLSESSION()\n user = User(name=userinfo['name'], email=userinfo['email'])\n try:\n sqlsession.add(user)\n sqlsession.commit()\n except IntegrityError:\n # user already exists in DB\n pass\n if 'target' not in session.keys():\n return redirect(\"/\")\n return redirect(session['target'])", "def login():\n if g.user:\n #return redirect(url_for('timeline'))\n return redirect(url_for('index'))\n error = None\n if request.method == 'POST':\n #user = query_db('''select * from user where username = ?''', [request.form['username']], one=True)\n try:\n pid = int(request.form['pid'])\n except:\n abort(404)\n user = User.query.filter_by(pid=pid).first()\n if user is None:\n error = 'Invalid username'\n elif not check_password_hash(user.pw_hash, request.form['password']):\n error = 'Invalid password'\n else:\n #flash(_('You were logged in'))\n session['user_id'] = user.user_id\n return redirect(url_for('index'))\n if error:\n flash(_('Wrong with phone number or password'))\n return render_template('login.html', error=error)", "def login():\n if request.method == 'POST':\n db = database.getdb()\n user = db.execute(\"SELECT * FROM flaskuser WHERE username=?\", (request.form['username'],)).fetchone()\n if check(user[\"pword\"], request.form[\"password\"]):\n session.clear()\n session[\"user_id\"] = 'admin'\n return redirect(url_for('index'))\n return render_template('login.html', title='Log In')", "def check_user_and_login(self) -> Response:\n pass", "def open_login_page(self):\n com_util.tap_on(self.driver, element['clickOnAtSign'])", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def on_start(self):\n self.login()", "def login(self, qturl):\n self.loginwindow.htmlView.load(qturl)\n self.loginwindow.show()", "def login(self):\r\n user_account = db.find_one({\"cpr_number\": request.form.get(\"CPR\")})\r\n if user_account is not None:\r\n if self.verify_password(user_account[\"password\"], request.form.get(\"password\")):\r\n return self.start_session(user_account)\r\n return jsonify({\"error\": \"Invalid login credentials\"}), 401", "def user_login():\n \n data = user_obj.user_login()\n return data", "def step_impl_1(context, username, pwd):\n\n br = context.browser\n br.get(context.server_url + '/accounts/login/')\n\n user = br.find_element_by_id(\"username\")\n pswd = br.find_element_by_id(\"password\")\n\n user.send_keys(username)\n pswd.send_keys(pwd)\n br.find_element_by_id(\"submit\").click()", "def login(self):\n\n self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, self.selectors['button_login_css'])))\n\n self.driver.find_element_by_css_selector(self.selectors['button_login_css']).click()\n return self.driver", "def login_user(self, username, pwd):\n self.browser.get(\"%s%s\" %\n (str(self.live_server_url), '/accounts/login/'))\n username_input = self.browser.find_element_by_id('id_username')\n password_input = self.browser.find_element_by_id('id_password')\n submission_button = self.browser.find_element_by_class_name(\n 'btn-success')\n\n username_input.send_keys(username)\n password_input.send_keys(pwd)\n submission_button.click()", "def login():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n\n user = get_user(username, password)\n\n if not user:\n flash('No such username and/or password', 'alert-danger')\n return redirect(url_for('login'))\n\n session['username'] = user.username\n session['user_id'] = user.id\n session['logged_in'] = True\n session['is_admin'] = user.is_admin\n\n return redirect(url_for('index'))\n\n return render_template('login.html')" ]
[ "0.80300766", "0.795173", "0.795173", "0.78314114", "0.76917493", "0.7586301", "0.7556712", "0.7542856", "0.75261706", "0.7460586", "0.74421704", "0.7429035", "0.74237245", "0.7412092", "0.7370598", "0.73602885", "0.7355475", "0.7346966", "0.7308393", "0.73024786", "0.72809607", "0.7276152", "0.7274363", "0.72641206", "0.71859455", "0.7179287", "0.7176726", "0.71617055", "0.71575075", "0.71042895", "0.70894295", "0.70836735", "0.70511895", "0.70136106", "0.70018667", "0.6991592", "0.69519866", "0.6943446", "0.69373566", "0.69248426", "0.6911905", "0.6901322", "0.68984437", "0.6895775", "0.68946373", "0.6894034", "0.68939334", "0.6887357", "0.6875651", "0.686483", "0.6859877", "0.6857097", "0.6847449", "0.6834903", "0.6828273", "0.682496", "0.6815553", "0.6802568", "0.6800443", "0.6787981", "0.6787182", "0.67834055", "0.67828363", "0.6776211", "0.6770427", "0.6755616", "0.67525136", "0.67525136", "0.6743277", "0.6741477", "0.6734531", "0.6732117", "0.67307276", "0.67273664", "0.6716589", "0.6700073", "0.6699821", "0.66974366", "0.6697147", "0.6696303", "0.669451", "0.66890025", "0.66869485", "0.66822845", "0.6680439", "0.66778535", "0.6677563", "0.66739446", "0.66655844", "0.66655606", "0.66655606", "0.66655606", "0.66655606", "0.6663027", "0.6661812", "0.66614175", "0.66588277", "0.66567624", "0.6656642", "0.6655706" ]
0.7128185
29
Method allows users to navigate through a user's profile page
def nav_user(self, user): self.driver.get(self.nav_url.format(user))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def profile_url(self):\n return reverse(\"auth_profile\", args=[self.user.username])", "def user_view(cls, user, profile):\r\n pass", "def profile():\n if g.user:\n return render_template('profile.html', user=g.user)\n return redirect(url_for('login'))", "def user_view(cls, user, profile):\n pass", "def user_profile():\n if CURR_USER_KEY in session:\n return render_template('/profile/detail.html')\n else:\n return redirect('/login')", "def nav_user(self, user):\n self.driver.get('{}/{}/'.format(self.base_url, user))", "def nav_user(self, user):\n bot = self.bot\n bot.get(self.nav_user_url.format(user))", "def me():\n if g.USER:\n return redirect(url_for(\"profile\", username=g.USER.username))\n return redirect(url_for(\"home\"))", "def users_page(request):", "def user_profile():\n\n # Make sure users are logged in to access there profile\n if 'logged' in session:\n user = session['username']\n users_recipes = mongo.db.recipes.find({'username': user})\n return render_template('my_profile.html', recipes=users_recipes,\n title=\"My Profile\", user=user)\n # If the user isn't logged but somehow managed to click 'my profie'\n else:\n flash('Only logged in users can see there profile, please log in',\n 'danger')\n return redirect(url_for('login'))", "def view_profile():\n user_id = session.get(\"user_id\")\n \n user = User.query.get(session[\"user_id\"])\n \n return render_template(\"editable_profile_page.html\", user=user)", "def profile():\n\n user_id = session.get(\"user_id\")\n userbuses = crud.show_all_userbus(user_id)\n\n \n\n if user_id:\n user = crud.get_user_by_id(user_id)\n return render_template('user_profile.html', user=user, userbuses=userbuses)\n \n else:\n flash('Please sign in')\n return render_template('login.html')", "def profile():\n # Check if user is loggedin\n if 'loggedin' in session:\n # We need all the account info for the user so we can display it on the profile page\n response = requests.get(\n \"http://localhost:8080/api/userbyid/\"+str(session['userid']))\n acc = json.loads(response.text)\n # Show the profile page with account info\n return render_template('profile.html', account=acc)\n # users is not loggedin redirect to login page\n return redirect(url_for('site.login'))", "def gotoUsers(self):\n self.elementClick(locator=self._navBar_users, locatorType=\"xpath\")", "def test_profile(self):\n\n # login in\n url_extend = 'user_auth/login/'\n username = 'user4'\n password = 'user'\n login_button = login(self.browser, self.url + url_extend, username, password)\n try:\n login_button.click()\n except:\n raise Exception(\"Login Error!\")\n # locate the profile memu.\n try:\n profile_menu = self.browser.find_element_by_id('profile')\n profile_menu.click()\n except:\n raise Exception(\"Cannot find profile menu!\")\n\n # check if we are at the profile page after we clicked the profile menu.\n self.assertEqual(self.browser.current_url, self.url + 'user_auth/profile/')", "def user_settings(request):\n return redirect('edit_profile')", "def test_user_profile_page(self):\n result = self.client.get(\"/profile\", follow_redirects=True)\n self.assertIn(b\"User ID\", result.data)", "def profile():\r\n user_data = load_user(current_user.id, current_user)\r\n if user_data is not None:\r\n user, followers, following = user_data\r\n\r\n return render_base_template(\"profile.html\", profile=user, followers=followers,\r\n following=following,\r\n os_projects=[])\r\n\r\n return abort(404)", "def clickViewProfile(self):\n self.waitForElement(locator=self._viewProfileBtn, locatorType=\"xpath\")\n element = self.getElementList(locator=self._viewProfileBtn, locatorType=\"xpath\")\n self.elementClick(element=element[0])", "def goto_user(self, username=CONF[\"user\"][\"name\"]):\n self.driver.get(\"{}/{}/?__a=1\".format(URL_BASE, username))", "def profile(username):\n # grab the session user's username from db\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n if session[\"user\"]:\n return render_template(\"profile.html\", username=username)\n return redirect(url_for(\"login\"))", "def open_user_page(self):\n self.switch_main_menu(\"Admin\")\n self.wait_unit_el_present(self.user_management_menu)\n self.click_menu(\"User Management\")\n self.click_menu(\"Users\")", "def profile(username):\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n\n tips = list(mongo.db.tips.find())\n return render_template(\"profile.html\", tips=tips)\n\n if session[\"user\"]:\n return render_template(\"profile.html\", username=username)\n\n return redirect(url_for(\"login\"))", "def profile(request):\n profile = request.user.profile\n return render(request, 'accounts/profile.html', {'profile': profile})", "def current_profile(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect(reverse('login'))\n return profile(request, request.user.username)", "def current_profile(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect(reverse('login'))\n return profile(request, request.user.username)", "def profile(username):\n user = session.get('username')\n if user is not None:\n if user == username:\n current_user = User.from_mongo(**mongo.db.users.find_one({\"name\": session.get('username')}))\n return render_template(\"user_profile.html\", title=\"My Profile\", user=current_user)\n else:\n user = User.from_mongo(**mongo.db.users.find_one({'name': username}))\n return render_template(\"user_profile.html\", title=f\"{user.name}'s Profile\", user=user)\n else:\n flash('Please log in to access user profile')\n return redirect(url_for('login'))", "def show_profile():\n print('LOGIN SESSION:', login_session)\n if 'userid' in login_session:\n category = session.query(Category).first()\n item = session.query(Item).first()\n return render_template('profile.html', login_session=login_session, root=app.instance_path, category=category,\n item=item)\n flash('Unfortunately you need to be logged in to see your profile', 'error')\n return redirect(url_for('show_homepage'))", "def test_view_a_user_profile(self):\n self.authorize_user(self.user_login_details)\n url = self.profiles_url + \\\n '{}'.format(self.user['user']['username']) + \"/\"\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def profile(request, username):\n # Get profile information for a user. Use iexact for case-insensitive query\n try:\n profile = User.objects.get(username__iexact=username)\n except ObjectDoesNotExist:\n profile = None\n return render(request, \"network/profile.html\", {\"profile\": profile})\n\n # Find all users following the user whose profile being visited\n followers = User.objects.filter(following=profile.id)\n\n # Get posts for users and put in paginator format\n posts = Post.objects.filter(author=profile).order_by('-timestamp')\n paginator = Paginator(posts, 10)\n\n page_number = request.GET.get('page')\n page_object = paginator.get_page(page_number)\n\n return render(request, \"network/profile.html\", {\n \"profile\": profile, \"followers\": followers, \"posts\": page_object\n })", "def profiles():\n \n if 'username' in session:\n profiles = mongo.db.profiles.find()\n return render_template('pages/profiles.html', title='Profiles', profiles = profiles)\n flash('Please login to view user profiles.', 'warning')\n return redirect(url_for('login'))", "def profile():\n \n return render_template(\"profile.html\")", "def get_absolute_url(self):\n\n return reverse('profile', args=[str(self.user.username)])", "def get(self):\n user = self.get_active_user()\n if user:\n self.render_newpage(user=user)\n else:\n self.redirect('/login')", "def profile_detail(request, pk):\n profile = request.user.userprofile\n user_relationships = profile.get_relationships()\n user_request = profile.get_friend_request()\n\n context = {\n # 'user': user,\n 'profile': profile,\n 'user_relationships': user_relationships,\n 'user_request': user_request\n }\n\n return render(request, 'accounts/profile_detail.html', context)", "def show_user():\n\n return render_template('user/show_by_user.html', title='Show Profile', user = current_user)", "def user(self):\n try:\n data_definitions = self.get_data_definitions(current_user.scheme_id)\n user_data = self.get_all_user_data(current_user.scheme_id, current_user.k_number)\n\n return render_template(\"user/dashboard_page.html\", title=\"Your Profile\", user_data=user_data)\n\n except Exception as e:\n self._log.exception(\"Could not execute get user logic\")\n return abort(500)", "def about_user(self, *args):\n user_for_about = len(args) == 2 and UserEntity.by_id(int(args[1]))\n if user_for_about:\n user_posts = UserInfoHandler.get_all_posts(\n user_for_about.key().id())\n if not user_posts:\n user_posts = []\n return self.render(\n 'user_info.html', userA=user_for_about, user_posts=user_posts)\n\n self.redirect(\"/\")", "def profile(request, username):\n if User.objects.filter(username=username).exists():\n profile_user = User.objects.filter(\n username=username)[0]\n if 'paige' in profile_user.username:\n raise Http404\n else:\n raise Http404\n\n page_username = get_rand_username(profile_user)\n\n request_type = request.GET.get('type', '')\n\n subnav_key, subnav_value, page_title = get_active_page(\n 'profile', request_type)\n\n header = possesive(page_username, page_title)\n title = possesive(profile_user.username, page_title)\n\n template_values = {\n 'page_title': title,\n 'nav_profile': 'active',\n subnav_key: subnav_value,\n 'header': header,\n 'user': request.user,\n 'profile_user': profile_user,\n 'header-classes': '',\n 'floor_percentile': get_floor_percentile(\n profile_user.profile),\n 'trend': get_day_trend(profile_user.profile),\n 'num_commprods': cpm.CommProd.objects.filter(\n user_profile=profile_user.profile).count(),\n 'num_votes': cpm.Rating.objects.filter(\n user_profile=profile_user.profile).count()\n }\n\n if request_type != '':\n return profile_search(request,\n template_values, profile_user)\n template_values.update(profile_query_manager(\n request.user, profile_user))\n return render_to_response('profile.html',\n template_values, request)", "def profile(self, name=\"johndoe\"):\r\n url = \"/account/%s\" % name\r\n return self.app.get(url, follow_redirects=True)", "def user_profile_view(request, user_id=0):\n\tuser = User.objects.filter(id=user_id)[0]\n\tstalker = request.user \n\tuser_indicator = 0 # Used to check if the user is logged in \n\tif (stalker.is_authenticated()):\n\t\tuser_indicator = 1\n\telse:\n\t\tuser_indicator = 0\n\t# show a form with data from database for 'GET' request\n\tif request.method == 'GET':\n\t\tperson = Person.objects.filter(user=user)[0]\n\t\taccount_form = AccountDetailForm(initial={\n\t\t\t'user_name': user.username,\n\t\t\t'user_email': user.email,\n\t\t\t'user_faculty': person.faculty,\n\t\t}) # Display user account detail\n\n\t\tlocation_form = LocationForm() # Display location box for user to type into\n\n\t\tlive_update_list = LiveUpdate.objects.all().order_by('date_time') # Display live update list\n\n\t\tfriend_update_list = FriendUpdate.objects.filter(receiver=user).order_by('date_time') # Display friend update list\n\t\tif len(friend_update_list) > 20:\n\t\t\tfriend_update_list = friend_update_list[:20]\n\n\t\tfriend_request_list = FriendRequest.objects.filter(receiver=user).order_by('-date_time') # Display friend request list\n\n\t\tfriend_list = person.friends.all() # Display all friends list\n\n\t\taddfriend_indicator = 0 # Help decide the status of the add friend button\n\t\tperson_2 = Person.objects.filter(user=stalker)[0]\n\t\tif (person_2 in friend_list):\n\t\t\taddfriend_indicator = 2 # stalker and user whose profile being shown are friends\n\t\telse:\n\t\t\trelevant_request = FriendRequest.objects.filter(sender=stalker,receiver=user)\n\t\t\tif len(relevant_request) == 0:\n\t\t\t\taddfriend_indicator = 0 # stalker has not added user whose profile is being shown as a friend\n\t\t\telse:\n\t\t\t\taddfriend_indicator = 1 # stalker has added user whose profile is being shown as a friend\n\n\t\treturn render(request, 'mugspot/userprofile.html', {\n\t\t\t\t'live_update_list':live_update_list,\n\t\t\t\t'friend_update_list': friend_update_list,\n\t\t\t\t'location_form': location_form,\n\t\t\t\t'account_form': account_form,\n\t\t\t\t'friend_list': friend_list,\n\t\t\t\t'friend_request_list': friend_request_list,\n\t\t\t\t'user_prof': user,\n\t\t\t\t'stalker': stalker,\n\t\t\t\t'user_indicator': user_indicator,\n\t\t\t\t'addfriend_indicator': addfriend_indicator,\n\t\t\t})", "def follow_user(self:'InstaClient', user:str, nav_to_user:bool=True):\n # Check User Vadility\n profile = self.get_profile(user)\n if not profile:\n raise InvalidUserError(user)\n\n # Navigate to User Page\n self._nav_user(user, check_user=False)\n \n if self._check_existence(EC.presence_of_element_located((By.XPATH, Paths.MESSAGE_USER_BTN))):\n # User already followed\n pass\n else:\n follow_button = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.FOLLOW_BTN)), url=ClientUrls.NAV_USER.format(user))\n self._press_button(follow_button)\n profile.requested_by_viewer = True\n return profile", "def account_profile(request):\n get_or_creat(request)\n return redirect(\"/\")", "def user_profile():\n user = current_user\n user_is_valid = True\n if not user.active:\n flash('This user account is under review. Please update your profile '\n + ' and contact the organizing team to access all functions of '\n + 'this platform.', 'warning')\n\n form = UserForm(obj=user, next=request.args.get('next'))\n form.roles.choices = [(r.id, r.name) for r in Role.query.order_by('name')]\n\n # Check conflicting PKs\n if form.email.data != user.email:\n if User.query.filter_by(email=form.email.data).first() is not None:\n flash('This e-mail address is already registered.', 'error')\n user_is_valid = False\n\n if user.sso_id:\n # Do not allow changing password on SSO\n del form.password\n\n # Validation has passed\n if form.is_submitted() and form.validate() and user_is_valid:\n # Assign roles\n user.roles = [Role.query.filter_by(\n id=r).first() for r in form.roles.data]\n del form.roles\n\n # Sanitize username\n user.username = sanitize_input(form.username.data)\n del form.username\n\n # Assign password if changed\n originalhash = user.password\n form.populate_obj(user)\n # Do not allow changing password on SSO\n if not user.sso_id:\n if form.password.data:\n user.set_password(form.password.data)\n else:\n user.password = originalhash\n\n user.updated_at = datetime.utcnow()\n db.session.add(user)\n db.session.commit()\n user.socialize()\n flash('Profile updated.', 'success')\n return redirect(url_for('public.user', username=user.username))\n\n if not form.roles.choices:\n del form.roles\n else:\n form.roles.data = [(r.id) for r in user.roles]\n return render_template('public/useredit.html',\n oauth_type=oauth_type(),\n user=user, form=form,\n active='profile')", "def profile(username):\n try:\n with Database() as database:\n # Makes sure the user exists\n user = database.checkForUser(username)\n if user == session['username']:\n if session['username'] == username:\n session['id'] = database.getID(session['username'])\n owned_texts, shared_texts = formatTexts(*database.getUsersTexts(session['id']))\n categories = database.loadCategories()\n return render_template('profile.html',\n owned_texts=owned_texts,\n shared_texts=shared_texts,\n username=username,\n categories=categories)\n flash(\"You cannot view other users profiles\")\n return redirect(url_for('index'))\n flash(\"User %s not found\" % username)\n return redirect(url_for('index'))\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(url_for('index'))", "def profile(request):\n user = request.user\n characters = get_all_characters(user)\n if get_logged_in_char(characters):\n context = get_profile_context(user, characters)\n return render(request, 'coreapp/individual.html', context)\n else:\n family_members = characters\n userlname = user.last_name\n context = {'family_members' : family_members,\n 'lastname' : userlname,\n }\n return render(request, 'coreapp/profile.html', context)", "def profile(request, id):\n u = get_object_or_404(User, pk=id)\n context = ProfileContext(u).get_context()\n return render(request, 'wantedly_app/profile.html', context)", "def viewprofile():\n user = current_user\n form = UserUpdateForm(obj=user)\n form.populate_obj(user)\n if form.validate_on_submit():\n form.populate_obj(user)\n\n db.session.commit()\n\n flash('You have successfully edited your profile!')\n return render_template('user/user.html', title=\"View Profile\",\n user=user, form=form, action='Edit')", "def get_redirect_url(self):\n return reverse(\"accounts:profile\",kwargs={\"username\": self.user.username})", "def get_absolute_url(self):\n return '/profile/%s' % self.id", "def get_user_profile(self):\n return self.request('get', 'id/users')", "def profile(request):\n return render(request, 'profile.html', context)", "def show_user_profile(user_id):\n\n # raise 401 if no one logged in\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n # define user of whose profile is being viewed\n profuser = User.query.get_or_404(user_id)\n # define logged in user for authenticated navbar details\n user = User.query.get(session[CURRENT_USER_KEY])\n if user_id == session[CURRENT_USER_KEY]:\n profile_active = 'active'\n else:\n profile_active = ''\n\n return render_template('user_profile.html', profuser=profuser, user=user, profile_active=profile_active)", "def profile(request, username):\n if not request.user.is_authenticated:\n print(\"ERROR: not authenticated\")\n return redirect(\"login\")\n else:\n user_info = {\n \"username\": request.user.username,\n \"email\": request.user.email,\n \"first_name\": request.user.first_name,\n \"last_name\": request.user.last_name,\n \"zipcode\": request.user.zipcode,\n }\n\n return render(request, \"profile_base.html\", user_info)", "def get_absolute_url(self):\n return reverse('profile', args=[str(self.id)])", "def show_user_profile(username):\n\n name = USERS[username]\n return f\"<h1>Profile for {name}</h1>\"", "def profile(request, user_name=None):\n \n # get the viewed user\n if user_name is None:\n user = request.user.get_profile()\n else:\n user = get_object_or_404(User, username=user_name)\n user = user.get_profile()\n \n # set display name\n if len(user.user.first_name) <= 0:\n user.display_name = user.user.username\n else:\n user.display_name = user.user.first_name + \" \" + user.user.last_name\n \n # set avatar path\n if len(user.avatar.name) <= 0:\n user.avatar_url = settings.MEDIA_URL + \"avatar/noavatar.png\"\n else:\n user.avatar_url = user.avatar.url\n \n # get tracked list, ownedlist and playlist\n trackedlist = user.trackedrecordlist_set.all()\n ownedlist = user.userentry_set.all()\n playlist = user.playlist_set.all()\n context = {\n 'profile_user': user,\n 'trackedlist': trackedlist,\n 'ownedlist': ownedlist,\n 'playlist': playlist\n }\n return render_to_response(\n 'usermgr/profile.html',\n context,\n context_instance = RequestContext(request))", "def test_user_profile(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser_id\n\n address = \"1215 Brookview Ave, Kettering, Ohio 45409\"\n\n resp = c.get(f\"/users/8989/\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\n '<h1 class=\"Display-4 text-center mt-3\"><b>Profile Information:</b></h1>',\n html,\n )\n self.assertIn(\"<p>testuser</p>\", html)\n self.assertIn(\"<p>test@test.com</p>\", html)\n self.assertIn(\"<p>662-996-3356</p>\", html)\n self.assertIn(\n '<a class=\"font-weight-bold btn winter-neva-gradient color-block btn-block my-4 waves-effect z-depth-0\" href=\"/users/8989/edit\">Edit Profile</a>',\n html,\n )", "def load_profile_page(self, url='', user=None):\n if user:\n url = 'http://www.linkedin.com/in/' + user\n if 'com/in/' not in url and 'sales/gmail/profile/proxy/' not in url:\n raise ValueError(\n \"Url must look like... .com/in/NAME or... '.com/sales/gmail/profile/proxy/EMAIL\")\n self.driver.get(url)\n # Wait for page to load dynamically via javascript\n try:\n myElem = WebDriverWait(self.driver, self.timeout).until(AnyEC(\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, self.MAIN_SELECTOR)),\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, self.ERROR_SELECTOR))\n ))\n except TimeoutException as e:\n raise ValueError(\n \"\"\"Took too long to load profile. Common problems/solutions:\n 1. Invalid LI_AT value: ensure that yours is correct (they\n update frequently)\n 2. Slow Internet: increase the time out parameter in the Scraper\n constructor\n 3. Invalid e-mail address (or user does not allow e-mail scrapes) on scrape_by_email call\n \"\"\")\n\n # Check if we got the 'profile unavailable' page\n try:\n self.driver.find_element_by_css_selector(self.MAIN_SELECTOR)\n except:\n raise ValueError(\n 'Profile Unavailable: Profile link does not match any current Linkedin Profiles')\n # Scroll to the bottom of the page incrementally to load any lazy-loaded content\n self.scroll_to_bottom()", "def profile(request):\n user = Info.objects.all()\n return render(request, 'kvent/profile.html',{user:'user'})", "def user_profile(request, slug):\n posts = Post.objects.filter(author__username=slug)\n\n following = request.user.profile.followers.filter(username=slug)\n return render(request,\n 'posts/profile.html',\n {'posts': posts,\n 'author': User.objects.get(username=slug),\n 'following': following})", "def show_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/profile.html', user=user)", "def show_user_profile(user_id):\n user = User.query.filter_by(user_id=user_id).first()\n\n return render_template(\"user_profile.html\", user=user)", "def userProfile(userid):\n images = get_uploaded_images()\n record = UserProfile.query.filter_by(id=userid).first()\n return render_template('userProfile.html', images=images, record =record)", "def login_success(request):\n if not hasattr(request.user, 'profile'):\n return redirect('index')\n else:\n return redirect('registration_process')", "def user_page(username):\n\n user = User.query.get_or_404(username)\n feedback = Feedback.query.filter_by(recipient=username).all()\n \n if \"user\" not in session:\n flash(\"Not logged in\")\n return redirect('/login')\n else:\n return render_template('user_page.html', user=user, feedback=feedback)", "def user_home(request, user_name):\n query = User.objects.filter(username=user_name)\n if query.count() == 0:\n raise Http404(\"Can't find a user named: %s\" % user_name)\n else:\n user = query[0]\n if UserProfile.objects.filter(user=user):\n user_profile = UserProfile.objects.filter(user=user)[0]\n groups = get_user_groups(user)\n return render_to_response('user_home.html', locals())", "def print_user_profile(self):\n print ('User Name: {name}').format(name=self.name)\n print ('User Email: {email}').format(email=self.email)\n print ('User Phone Number: {birthday}').format(birthday=self.phone_number)\n print ('User Country: {country}').format(country=self.country)\n print ('User Address: {address}').format(address=self.address)", "def active_user(self):\n return redirect(url_for('.user', name=current_user.name))", "def user_detail(request, slug):\n user = request.user\n profile = Profile.objects.get(slug=slug)\n albums = profile.albums.all()\n plc_albums = albums.exclude(is_public=False)\n pvt_albums = albums.exclude(is_public=True)\n\n friends = profile.friends.all()\n family = profile.relations.all()\n user_family = user.profile.relations.all()\n user_friends = user.profile.friends.all()\n\n receiver = FriendRequest.objects.filter(from_user=profile.user)\n sender = FriendRequest.objects.filter(to_user=profile.user)\n \n received = []\n sent = []\n for item in receiver:\n received.append(item.id)\n received.append(item.to_user)\n\n for item in sender:\n received.append(item.id)\n sent.append(item.from_user)\n\n template = 'profiles/user_detail.html'\n context = {\n 'profile': profile,\n 'friends': friends,\n 'family': family,\n 'albums': albums,\n 'plc_albums': plc_albums,\n 'pvt_albums': pvt_albums,\n 'received': received,\n 'sent': sent,\n 'user_family': user_family,\n 'user_friends': user_friends,\n }\n return render(request, template, context)", "def show_user_account_home():\n\n user = User.query.filter_by(user_id=int(session['user_id'])).one()\n print user\n\n return render_template(\"base.html\")\n # return render_template(\"user_account.html\", user_id=user.user_id, name=user.first_name)\n #, user_id=user.user_id, email=email, name=first_name)", "def follow_user(self, user):\n self.nav_user(user)\n follow_button = self.driver.find_element_by_xpath(\n \"//button[contains(text(), 'Follow')]\")\n follow_button.click()\n time.sleep(1)\n self.driver.get(self.base_url)", "def show_user_profile(user_id):\n # check if the logged in user has permission to view the user profile page\n if int(user_id) != session.get('logged_in'):\n return redirect(\"/search\")\n\n print user_id\n user = User.query.filter(User.id == user_id).one()\n email = user.email\n pic = user.pic\n username = user.username\n\n ensembles = user.ensembles\n\n points_per_ensemble ={}\n points = 0\n for ea in user.ensemble_associations:\n points += ea.points\n points_per_ensemble[ea.ensemble] = ea.points\n\n # if points:\n # flash(\"You got a point!\")\n\n movie_ensemble = {}\n for ensemble in ensembles:\n ensemble_points_pair = (points_per_ensemble.get(ensemble, 0), ensemble)\n if ensemble.movie not in movie_ensemble:\n movie_ensemble[ensemble.movie] = [ensemble_points_pair]\n else:\n movie_ensemble[ensemble.movie].append(ensemble_points_pair)\n\n for pair_lst in movie_ensemble.values():\n pair_lst.sort(reverse=True)\n\n # return render_template('../front_end/templates/user_profile.html',\n # pic=pic,\n # email=email,\n # username=username,\n # ensembles=ensembles,\n # movie_ensemble=movie_ensemble,\n # points=points,\n # )\n\n return jsonify(dict(pic=pic,\n email=email,\n username=username,\n ensembles=ensembles,\n movie_ensemble=movie_ensemble,\n points=points,))", "def display_profile(self):\n print(f\"Id: {self._id}\")\n print(f\"username: {self.username}\")\n print(f\"name: {self.name}\")\n print(f\"contact: {self.contact}\")\n print(f\"address: {self.address}\")", "def profile():\n\n if not session.get('oauth_token'):\n return redirect(url_for('login'))\n tokenString = \"bearer {0}\".format(session['oauth_token']['access_token'])\n headers = {\"Authorization\": tokenString}\n profileInfo = {'access_token': session['oauth_token']['access_token']}\n\n # get user summary\n userinfourl = '{}/userinfo'.format(baseUAAurl)\n userinfo = json.loads(requests.get(\n userinfourl, headers=headers, verify=False).text)\n session['userinfo'] = userinfo\n profileInfo['userinfo'] = json.dumps(session['userinfo'])\n\n # Method 1 : get user roles by orgs and space\n usersummaryurl = '{0}/v2/users/{1}/summary'.format(\n baseAPIurl, userinfo['user_id'])\n usersummary = json.loads(requests.get(\n usersummaryurl, headers=headers, verify=False).text)\n\n if usersummary.get('entity'):\n spaceWiseUserRoles = getSpaceWiseUserRoles(usersummary['entity'])\n else:\n # Method 2 : get user roles by orgs and space\n spaceWiseUserRoles = {}\n spaceurl = baseAPIurl + '/v2/spaces'\n spaceresponse = requests.get(spaceurl, headers=headers, verify=False)\n space_json_data = json.loads(spaceresponse.text)\n for spaceresource in space_json_data['resources']:\n entity = spaceresource['entity']\n spaceGuid = spaceresource['metadata']['guid']\n\n # get all auditors\n auditorurl = baseAPIurl + entity['auditors_url']\n auditorresponse = json.loads(requests.get(\n auditorurl, headers=headers, verify=False).text)\n if isInThisRole(auditorresponse, userinfo['user_name']):\n spaceWiseUserRoles[spaceGuid] = {\n 'role': 'auditor',\n 'name': spaceresource['entity']['name']\n }\n\n # get all developers\n devurl = baseAPIurl + entity['developers_url']\n devresponse = json.loads(requests.get(\n devurl, headers=headers, verify=False).text)\n if isInThisRole(devresponse, userinfo['user_name']):\n spaceWiseUserRoles[spaceGuid] = {\n 'role': 'developer',\n 'name': spaceresource['entity']['name']\n }\n\n # get all managers\n managerurl = baseAPIurl + entity['managers_url']\n managerresponse = json.loads(requests.get(\n managerurl, headers=headers, verify=False).text)\n if isInThisRole(managerresponse, userinfo['user_name']):\n spaceWiseUserRoles[spaceGuid] = {\n 'role': 'manager',\n 'name': spaceresource['entity']['name']\n }\n\n profileInfo['spaceWiseUserRoles'] = json.dumps(spaceWiseUserRoles)\n session['spaceWiseUserRoles'] = spaceWiseUserRoles\n\n # get user apps from all spaces\n url = '{}/v2/apps'.format(baseAPIurl)\n response = requests.get(url, headers=headers, verify=False)\n appsData = json.loads(response.text)\n appsUrls = {}\n\n # user accessible app url\n for resource in appsData['resources']:\n routes_url = baseAPIurl + \\\n resource['entity']['routes_url']\n routes_url_response = json.loads(requests.get(\n routes_url, headers=headers, verify=False).text)\n for app in routes_url_response['resources']:\n hostname = app['entity']['host']\n appsUrls[hostname] = {\n 'url': 'http://{}.local.pcfdev.io'.format(hostname),\n 'space_guid': app['entity']['space_guid'],\n 'userRole': getSpaceRole(spaceWiseUserRoles, app['entity'][\n 'space_guid'], userinfo['user_name'])}\n profileInfo['apps'] = appsUrls\n\n organization_guid = getOrganizationId(session, appsData)\n profileInfo['org_id'] = organization_guid\n profileInfo['org_users'] = json.dumps(getOrganizationUsers(\n session, organization_guid))\n return render_template('profile.html', data=profileInfo)", "def self_profile_view(request):\n context = RequestContext(request)\n context_dict = {}\n user = request.user\n user_profile_object = UserProfile.objects.get(user=user)\n if user_profile_object.is_new:\n return HttpResponseRedirect('/user/')\n\n # Social Profile\n try:\n social_profiles_object = SocialProfile.objects.get(parent=user_profile_object)\n except SocialProfile.DoesNotExist:\n social_profiles_object = None\n\n # Education Profile\n try:\n eduObjs = EducationDetails.objects.filter(parent=user_profile_object)\n except EducationDetails.DoesNotExist:\n eduObjs = None\n\n # Employment Profile\n try:\n empObjs = EmploymentDetails.objects.filter(parent=user_profile_object)\n except EmploymentDetails.DoesNotExist:\n empObjs = None\n\n try:\n timingsObj = Timings.objects.get(parent=user)\n except:\n timingsObj = None\n # TODO add personal details after the user model\n # is finalized\n # initialize all to None\n context_dict['full_name'] = None\n context_dict['gender'] = None\n context_dict['date_of_birth'] = None\n context_dict['city'] = None\n context_dict['country'] = None\n context_dict['college'] = None\n context_dict['email'] = None\n context_dict['contact_number'] = None\n context_dict['about'] = None\n context_dict['provider'] = None\n context_dict['picture_url'] = None\n context_dict['profile_url'] = None\n context_dict['edu_list'] = None\n context_dict['emp_list'] = None\n\n gender_options = {'male': \"M\", 'female': 'F'}\n\n name = user.first_name + \" \" + user.last_name\n context_dict['full_name'] = name\n\n gender = user_profile_object.gender\n if gender in gender_options.keys():\n context_dict['gender'] = gender_options[gender]\n\n date_of_birth = user_profile_object.date_of_birth\n if date_of_birth != '':\n context_dict['date_of_birth'] = date_of_birth\n\n city = user_profile_object.city\n if city != '':\n context_dict['city'] = city\n\n country = user_profile_object.country\n if country != '':\n context_dict['country'] = country\n\n email_field = user.email\n if email_field != None:\n context_dict['email'] = email_field\n\n contact = user_profile_object.contact\n if contact != None:\n context_dict['contact_number'] = contact\n\n about = user_profile_object.about\n if about:\n context_dict['about'] = about\n\n college = user_profile_object.college\n if college:\n context_dict['college'] = college\n\n picture_url = user_profile_object.picture\n if picture_url:\n context_dict['picture_url'] = picture_url\n\n provider = None\n\n profile_url = None\n\n if social_profiles_object:\n\n if social_profiles_object.profile_pic_url_linkedin:\n provider = \"LinkedIn\"\n picture_url = social_profiles_object.profile_pic_url_linkedin\n profile_url = social_profiles_object.profile_url_linkedin\n\n # If there is no pic uploaded, render LinkedIn pic\n if (not user_profile_object.picture):\n context_dict['pic_url'] = picture_url\n\n elif social_profiles_object.profile_pic_url_facebook:\n provider = \"Facebook\"\n picture_url = social_profiles_object.profile_pic_url_facebook\n profile_url = social_profiles_object.profile_url_facebook\n\n if provider:\n context_dict['provider'] = provider\n\n if context_dict['picture_url'] is None:\n context_dict['picture_url'] = picture_url\n\n if profile_url != None:\n context_dict['profile_url'] = profile_url\n if eduObjs:\n edu_list = []\n for obj in eduObjs:\n edu_list.append({'inst': obj.institution, 'city': obj.city, 'state': obj.state, 'country': obj.country,\n 'degree': obj.degree,\n 'branch': obj.branch, 'from': obj.from_year, 'to': obj.to_year, 'coun': obj.country})\n\n context_dict['edu_list'] = edu_list\n\n if empObjs:\n emp_list = []\n for obj in empObjs:\n emp_list.append({'org': obj.organization, 'loc': obj.location, 'pos': obj.position,\n 'from': obj.from_year, 'to': obj.to_year})\n\n context_dict['emp_list'] = emp_list\n\n context_dict['mentee_count'] = Request.objects.filter(mentorId=user.id, is_completed=True).count()\n rating_obj = {}\n try:\n rating_obj = Ratings.objects.get(mentor=user)\n average = int(round(rating_obj.average))\n rating_obj.activeStars = 'x' * average\n rating_obj.inactiveStars = 'x' * (5 - average)\n\n except ObjectDoesNotExist:\n rating_obj['count'] = 0\n rating_obj['one'] = 0\n rating_obj['two'] = 0\n rating_obj['three'] = 0\n rating_obj['four'] = 0\n rating_obj['five'] = 0\n rating_obj['average'] = 0\n\n context_dict['ratings'] = rating_obj\n\n context_dict['ratings'] = rating_obj\n\n # Specify timings too\n if timingsObj is not None:\n context_dict['weekday_l'] = timingsObj.weekday_l\n context_dict['weekday_u'] = timingsObj.weekday_u\n context_dict['weekend_l'] = timingsObj.weekend_l\n context_dict['weekend_u'] = timingsObj.weekend_u\n\n return render_to_response(\"mentor/profile-view.html\", context_dict, context)", "def profile():\n\n form = EditUserForm(obj=g.user)\n\n if form.validate_on_submit():\n if User.authenticate(g.user.username, form.password.data):\n g.user.username = form.username.data\n g.user.email = form.email.data\n g.user.image_url = form.image_url.data\n g.user.header_image_url = form.header_image_url.data\n g.user.bio = form.bio.data\n g.user.private = form.private.data\n db.session.commit()\n return redirect(f'/users/{g.user.id}')\n flash('Incorrect password', 'danger')\n return render_template('users/edit.html', user_id=g.user.id, form=form)", "def user_profile_page(request, pk=None):\n userprofile = get_object_or_404(User, pk=pk)\n profileposts = ProfilePost.objects.filter(user=userprofile).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all() \n return render(request, 'profile.html', {\"profile\": userprofile, 'profileposts': profileposts})", "def test_06_user_public_profile(self):\r\n # As Anonymou user\r\n url = \"/account/%s\" % self.name\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def user_profile(request):\n user = User.objects.get(email=request.user.email)\n bugs = Bug.objects.filter(author=request.user.id)\n features = Feature.objects.filter(author=request.user.id)\n context = { \n 'bugs' : bugs, \n 'features' : features,\n 'profile' : user,\n }\n \n return render(request, 'profile.html', context)", "def home_page():\n return redirect('/users')", "def user_profile(request, pk=None):\n # user is indentified by his email\n user = User.objects.get(email=request.user.email)\n # show profileposts, fist identifying the user and then ordering by date, new first\n profileposts = ProfilePost.objects.filter(user=request.user).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all()\n # show messages to user, ordering by date, new first. In template an if statement makes sure only the messages of the logged-in user show\n contactuserposts = ContactUser.objects.all().filter(date__lte=timezone.now()\n ).order_by('-date').all() \n return render(request, 'profile.html', {\"profile\": user, 'profileposts': profileposts, 'contactuserposts': contactuserposts})", "def __str__(self):\n return self.user.username + \"'s Profile\"", "def test_06_user_public_profile(self):\r\n # As Anonymou user\r\n url = \"/account/%s\" % self.name\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def profile(username):\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n return render_template(\"profile.html\", username=username)", "def test_redirect_profile(self):\n self.create_user_and_login(\n agreed_to_terms_of_service=True,\n filled_out=False\n )\n\n resp = self.client.get(DASHBOARD_URL)\n self.assertRedirects(resp, PROFILE_URL)", "def user_details(request, user_identifier):\n if not request.user.is_authenticated:\n user_details_url = reverse('user_details', kwargs={'user_identifier': user_identifier})\n login_url = reverse('login')\n return redirect(login_url + '?next=' + user_details_url)\n\n user_id = _to_user_id(user_identifier)\n\n if user_id is not None:\n return photos(request, user_id)\n else:\n return user_view(request, user_identifier)", "def login(self):\r\n self.driver.get(self.login_url)\r\n email_element = self.driver.find_element_by_id('email')\r\n email_element.send_keys(self.email)\r\n password_element = self.driver.find_element_by_id('pass')\r\n password_element.send_keys(self.password)\r\n password_element.submit()\r\n\r\n soup = BeautifulSoup(self.driver.page_source)\r\n profile_link = soup.find('a', {'title': 'Profile'})\r\n self.profile_name = profile_link.get('href')[25:] # link appears as http://www.facebook.com/PROFILE\r\n print(self.profile_name)", "def profile(username):\n try:\n user = mongo.db.users.find_one({\"username\": username})\n terms = list(mongo.db.terms.find(\n {\"submitted_by\": user[\"_id\"], \"rating\": {\"$gt\": -2}}))\n ordered = sortTermsAlphabetically(terms)\n toprated = sortTermsByRating(terms)\n games = list(mongo.db.games.find())\n return render_template(\n \"profile.html\", user=user, terms=ordered,\n toprated=toprated, games=games)\n except TypeError:\n flash(\"This user does not exist\", category=\"error\")\n return redirect(url_for(\"get_terms\"))", "def open_user_home_page(driver, user_name): \r\n if driver is None or not user_name:\r\n return False, f\" - Open {user_name}'s home page ...\"\r\n\r\n driver.get('https://500px.com/' + user_name)\r\n # waiting until the page is opened\r\n main_window_handle = None\r\n time_out = 30\r\n count_down = time_out\r\n while not main_window_handle and count_down > 0:\r\n main_window_handle = driver.current_window_handle\r\n count_down -= 1\r\n if count_down <= 0:\r\n return False, f\"Timed out ({time_out}s) while opening {user_name}'s home page. Please retry\"\r\n\r\n time.sleep(2)\r\n \r\n if check_and_get_ele_by_class_name(driver, 'not_found'):\r\n return False, f'User {user_name} does not exits'\r\n elif check_and_get_ele_by_xpath (driver, \"//span[contains(text(), 'Work in progress')]\"):\r\n return False, f'User {user_name} has no photos' \r\n elif check_and_get_ele_by_xpath (driver, \"//span[contains(text(), 'This page is no longer available')]\"):\r\n return False, f'User {user_name} no longer exits' \r\n\r\n elif check_and_get_ele_by_class_name(driver, 'missing') is None:\r\n return True, ''\r\n\r\n else:\r\n return False, f'Error reading {user_name}\\'s page. Please make sure a valid user name is used'", "def show_user(request):\n return _show_user(request)", "def show_user(request):\n return _show_user(request)", "def describe_my_user_profile():\n pass", "def profile(user_login, page=1):\n user = User.query.filter_by(user_github_login=user_login).first_or_404()\n quotes = Quote.get_quotes_with_pagination(\n page, user_id=user.user_id)\n\n prev_page = url_for(\n 'user.profile', user_login=user_login, page=quotes.prev_num)\n next_page = url_for(\n 'user.profile', user_login=user_login, page=quotes.next_num)\n\n return dict(\n user=user, quotes=quotes, prev_page=prev_page, next_page=next_page)", "def profile(request):\n context = {\n\n }\n template = loader.get_template('registration/profile.html')\n return HttpResponse(template.render(context, request))", "def profile(request):\n # Load last 5 orders as preview\n orders = Order._default_manager.filter(user=request.user)[0:5]\n return render(request, 'oscar/customer/profile.html', locals())", "def test_view_all_users_profiles(self):\n self.authorize_user(self.user_login_details)\n response = self.client.get(self.profiles_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def user_home(request):\n # crop = Crop.objects.get(user=request.user)\n return render(request, 'user_home.html', locals())", "def detail(id):\n\tu = User.objects.get_or_404(id=id)\n\t#c = get_school_context(u)\n\t#if not g.school==c:\n\t#\tflash(_(\"You've been redirected to the school the user is following.\"))\n\t#\treturn redirect(url_for_school('users.detail', school=c, id=u.id), code=301)\n\treturn render_template('user/detail.html',\n\t\ttitle = u.display_name,\n\t\tuser = u)", "def tutorial(request):\n\n username = request.session.get('username', False)\n profile = request.session.get('profile', False)\n if(username):\n context = {'username': username,'profile':profile}\n return render(request, 'MedTAG_sket_dock_App/index.html', context)\n else:\n return redirect('MedTAG_sket_dock_App:login')" ]
[ "0.72839487", "0.7275138", "0.72487134", "0.72350943", "0.7165833", "0.70807487", "0.7042888", "0.7016116", "0.69886965", "0.696752", "0.6904682", "0.6819126", "0.6698022", "0.6682588", "0.6592525", "0.6578378", "0.6544088", "0.6506118", "0.6474697", "0.64575464", "0.6455914", "0.64534855", "0.64393365", "0.64332914", "0.6420224", "0.6420224", "0.6414309", "0.6389359", "0.6380607", "0.63712543", "0.6367127", "0.63480973", "0.63130176", "0.6302239", "0.6287161", "0.62831956", "0.6269691", "0.62661433", "0.62630403", "0.62582237", "0.6245819", "0.6244434", "0.6233864", "0.62334", "0.622562", "0.62221414", "0.6180951", "0.6159108", "0.61515206", "0.612072", "0.61003697", "0.6091995", "0.60813266", "0.60740936", "0.60685235", "0.6061061", "0.60575795", "0.6049294", "0.6037119", "0.6035484", "0.60032696", "0.6000242", "0.5998298", "0.59950435", "0.599087", "0.59902", "0.59868443", "0.59538853", "0.594729", "0.59460396", "0.5933118", "0.5929808", "0.5910022", "0.59017617", "0.58916616", "0.58874327", "0.5878165", "0.58529776", "0.5852407", "0.58510345", "0.583877", "0.5837417", "0.58361685", "0.58304554", "0.5826818", "0.5813489", "0.5807712", "0.5798052", "0.57922125", "0.57919717", "0.57889134", "0.57889134", "0.57887506", "0.5781521", "0.57759184", "0.5775584", "0.5764964", "0.5764442", "0.5761716", "0.57573766" ]
0.7264308
2
Method goes to posts with a specific tag
def search_tag(self, tag): self.driver.get(self.tag_url.format(tag))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_tag(tag, page):\n per_page = current_app.config['POSTS_PER_PAGE']\n tag = Tag.query.filter_by(name=tag).first() or abort(404)\n posts = tag.posts.order_by(Post.id.desc())\n if not session.get('logged_in'): posts = posts.filter_by(visible=True)\n items = posts.limit(per_page).offset((page - 1) * per_page).all()\n pagination = Pagination(posts, page=page, per_page=per_page, \n total=posts.count(), items=items)\n flash(\"Posts tagged with '%s'\" % tag.name)\n return render_template('posts.html', pagination=pagination,\n endpoint_func=lambda x: url_for('main.show_tag', tag=tag.name, page=x))", "def tagged(request,slug):\n\n tag = get_object_or_404(Tag, slug=slug)\n books = Book.objects.filter(tags=tag)\n \n for book in books:\n book\n\n context = {\n 'tag':tag,\n 'books':books,\n }\n return render(request, 'favorite.html', context)", "def get_posts_tag(url_tag):\n return Posts.objects.filter(tags__tag=url_tag)", "async def szuru_tag(self, ctx: commands.Context, postid: int, operation: str, *tags):\n raise NotImplementedError(f\"Work in progress!\") # TODO", "def tag(request, tag_name):\n raise NotImplementedError", "def by_tag(articles_by_tag, tag):\n for a in articles_by_tag:\n if a[0].slug == tag:\n return a[1]", "def tagged_entries(request, tag_pk=None):\n tag = get_object_or_404(Tag, pk=tag_pk)\n\n entries = tag.blogentry_set.all()\n search_str = 'Tag search results for: %s' % (tag.tag)\n data = {'entries': paginate_objects(request, entries),\n 'action_str': search_str, 'blog_info': get_blog_info()}\n\n return render_to_response('blog/list_entries.html', data,\n context_instance=get_rq(request))", "async def tags(self, ctx, tag=None):\r\n\t\tnum = 0\r\n\t\tTags = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tfuz = self.bot.get_cog('FuzzySearch')\r\n\t\tif not fuz:\r\n\t\t\treturn await ctx.send('Can\\'t find FuzzySearch Cog')\r\n\r\n\t\tRes = fuz.fuzSearch(ctx, tag, Tags)\r\n\r\n\t\t\t\t\r\n\t\tif ctx.author.top_role.colour:\r\n\t\t\tcol = ctx.author.top_role.colour\r\n\t\telse:\r\n\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\tembed = discord.Embed(\r\n\t\t\ttitle = Res[num],\r\n\t\t\tdescription = Tags[Res]['data'],\r\n\t\t\tcolour = col\r\n\t\t)\r\n\t\tembed.set_footer(text='Last Edited {}'.format(Tags[Res]['time']))\r\n\t\tawait ctx.send(embed=embed)", "def find_by_id(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.get(path, params, **options)", "def tagged(self, tag_slug):\n return self.filter(tag__slug=tag_slug)", "def tag_view(request, tag_id, error='', message=''):\n tag = Tag.objects.get(id=tag_id)\n return index(request=request, error=error, message=message, tag=tag, tag_id=tag_id)", "def tagpage(request, tag):\n template_var = base_template_vals(request)\n template_var[\"tag\"] = tag\n try:\n template_var[\"events\"] = Event.objects.filter(is_approved=True\n ).filter(tags__name__in = [tag])\n except Event.DoesNotExist:\n raise Http404\n return render_to_response(\"event/tag_single.html\", template_var,\n context_instance=RequestContext(request))", "def show_tag(name):\n tag = Tag.query.filter_by(name=name).first()\n return redirect(url_for('articles.show_all') + '?t=' + str(tag.id))", "def test_post_tag_content(self):\n url = reverse(\n 'blog:post_tag_list',\n kwargs={'slug': self.post.slug}\n )\n response = self.client.get(url)\n self.assertEqual(200, response.status_code)\n self.assertTemplateUsed(response, 'blog/blog_index.html')", "def show_tag_details(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n posts = tag.posts\n\n return render_template('tags/tag_details.html', tag=tag, posts=posts)", "def test05_tags(self, tags_location):\n self.info(\"Open one of blogs posts\")\n self.open_random_blog()\n self.assertIn(\"posts\", self.driver.current_url)\n\n self.info(\"Get tags page, get one of tags[TAG].\")\n if tags_location == \"blogs_tags\":\n self.find_element(\"blog_tags_page\").click()\n\n self.info(\"Check that posts have [TAG].\")\n tmp = self.find_element(tags_location)\n tags = tmp.find_element_by_tag_name(\"li\")\n if tags:\n tag = random.choice(tags)\n tag_name = tag.text\n tag.click()\n self.assertIn(tag_name, self.driver.current_url)\n posts_with_tags = self.find_element(\"posts_with_tags\")\n post = random.choice(posts_with_tags.find_elements_by_class_name(\"post\"))\n post_name = post.text\n post.click()\n self.assertIn(post_name, self.driver.current_url)\n self.assertIn(tag_name, self.driver.page_source)", "def search_by_tags(request):\n resultTopics = []\n resultPosts = []\n if request.method == 'POST':\n data = request.data\n print(data)\n search_query = data['query']\n data_tags = list(set(data['tags']))\n print(data_tags)\n tagObjects = []\n if len(data_tags) > 0:\n tagObjects = Tag.objects.filter(hidden_tags__overlap=data_tags) | Tag.objects.filter(reduce(operator.and_, (Q(wikidataID=tag_id) for tag_id in data_tags)))\n for tagObject in tagObjects:\n print(\"LOL\")\n tag_topics = tagObject.topics.all()\n tag_posts = tagObject.posts.all()\n for topic in tag_topics:\n if topic not in resultTopics:\n resultTopics.append(topic)\n for post in tag_posts:\n if post not in resultPosts:\n resultPosts.append(post)\n # for tag in data[\"tags\"]:\n # try:\n # tagObjects = Tag.objects.filter(wikidataID=tag)\n # except Tag.DoesNotExist:\n # continue;\n # for tagObject in tagObjects:\n # tag_topics = tagObject.topics.all()\n # tag_posts = tagObject.posts.all()\n # for topic in tag_topics:\n # if topic not in resultTopics:\n # resultTopics.append(topic)\n # for post in tag_posts:\n # if post not in resultPosts:\n # resultPosts.append(post)\n print(resultTopics);\n print(resultPosts);\n\n query_topics = Topic.objects.filter(name__icontains=search_query)\n query_posts = Post.objects.filter(content__icontains=search_query)\n for topic in query_topics:\n if topic not in resultTopics:\n resultTopics.append(topic)\n for post in query_posts:\n if post not in resultPosts:\n resultPosts.append(post)\n\n all_relations = Relation.objects.all()\n for topic in resultTopics:\n for relation in all_relations:\n if (topic == relation.topic_from) and (relation.topic_to not in resultTopics):\n resultTopics.append(relation.topic_to)\n if (topic == relation.topic_to) and (relation.topic_from not in resultTopics):\n resultTopics.append(relation.topic_from)\n\n TopicSerializer.Meta.depth = 1\n PostNestedSerializer.Meta.depth = 1\n\n topicSerializer = TopicNestedSerializer(resultTopics, many=True)\n #topicSerializer.Meta.depth = 1\n postSerializer = PostNestedSerializer(resultPosts, many=True)\n #postSerializer.Meta.depth = 1\n\n return Response({'topics':topicSerializer.data, 'posts':postSerializer.data})", "def tagPosts(db, tags):\n c=db.cursor()\n print >>sys.stderr, \"Finding tagged posts from\", tags\n idents = []\n for tag in tags:\n c.execute(\"\"\"SELECT post_id FROM tags WHERE tag='%s'\"\"\" % tag)\n idents += [int(ident[0]) for ident in c.fetchall()]\n c.close()\n return list(set(idents))", "def go_to_edit_tag(tag_id):\n \n tag = Tag.query.get_or_404(tag_id)\n posts = Post.query.all()\n return render_template('tags/edit.html', tag=tag, posts=posts)", "def show_tag_details(tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n # find all posts associated with tag...\n # posts = PostTag.query.get('posts.tag_id')\n\n return render_template('tag_details.html', tag=tag)", "async def tag(self, ctx: \"IceTeaContext\", *, tag_name: str):\n tag_content = await ctx.guild_data.call_tag(tag_name, ctx.channel.id, ctx.author.id)\n if tag_content:\n await ctx.send(tag_content)\n else:\n await ctx.send(\"No Tag found\")", "def show_post(post_id):\n post = Post.query.get_or_404(post_id)\n tags = post.tags\n \n return render_template('posts/post.html', post=post, tags=tags)", "def show_edit_tag(tag_id):\r\n tag = Tag.query.get_or_404(tag_id)\r\n posts = Post.query.order_by(Post.title).all()\r\n\r\n return render_template('edit-tag.html', tag=tag, posts=posts)", "def get_tag_to_post(id_post):\n try:\n tags = Posts.objects.get(id=id_post)\n list_tags = [t.tag for t in tags.tags.all()]\n except:\n list_tags = []\n return list_tags", "def search_tumblr_tags_for_img(tag, count=16):\n tag = re.sub(r'[^a-z0-9]+', '-', tag.lower())\n tag = re.sub(r'^\\-|\\-$', '', tag)\n url_to_use = THUMBLR_URL % tag\n response = URLOPENER.open(url_to_use)\n soup = BeautifulSoup(response.read(), 'html.parser')\n posts_found = []\n for node1 in soup.find_all('li', 'post'):\n post = {'href': None, 'photo_stage_img': None, 'tags': [], 'notes': 0}\n node2 = node1.find('div', 'photo_stage_img')\n if node2:\n mobj = re.search(r'url\\((.*?)\\)', node2.get('style', ''))\n if mobj:\n post['photo_stage_img'] = mobj.group(1)\n node2 = node1.find('a', 'click_glass')\n if node2:\n post['href'] = node2['href'][:node2['href'].rindex('/')]\n post['href'] = post['href'].replace('https:', 'http:', 1)\n post['tags'] = []\n for node2 in node1.find('a', 'post_tag'):\n post['tags'].append(node2.replace(' ', '-'))\n node2 = node1.find('a', 'notes')\n if node2:\n post['notes'] = _force_to_int(node2.text)\n if 'photo_stage_img' in post:\n posts_found.append(post)\n count -= 1\n if count <= 0:\n break\n return posts_found", "def snippets_by_tag(request, slug):\n tag = get_object_or_404(Tag, slug__exact=slug)\n return list_detail.object_list(request,\n queryset=Snippet.objects.get_by_tag(slug),\n extra_context={ 'object': tag },\n template_name='cab/tag_detail.html',\n **base_generic_dict)", "def tagged(tag = ''):\n\tresults = queries.tagged(tag)\n\ttags = queries.tags()\n\treturn render_template('index.html', packages=results, tags=tags, currentFilter=tag)", "def projects_with_tag(request, tag):\n return tag.project_set.filter(user=request.user)", "def tags_new_page():\n post_ids =[int(n) for n in request.form.getlist(\"posts\")]\n posts = Post.query.filter(Post.id.in_(post_ids)).all()\n new_tag = Tag(name=request.form[\"name\"], posts=posts)\n\n db.session.add(new_tag)\n db.session.commit()\n\n flash(f\"'{new_tag.name}' Tag added. \")\n\n return redirect(\"/tags\")", "def read(self, request, tag=None):\n tags = Tag.objects\n if tag:\n t = tags.get(slug=tag)\n return t.entry_set.all()\n else:\n return tags.all()", "def search_by_hashtag(request):\n if request.method == \"POST\":\n token = request.data.get('token')\n post_id = request.data.get('post_id')\n type_ = request.data.get('type')\n hashtag = request.data.get('hashtag')\n\n if Token.objects.filter(key=token).exists():\n token = get_object_or_404(Token, key=token)\n posts_ids = PostHashtag.objects.filter(hashtag__contains=hashtag). \\\n values_list(\"post_id\", flat=True)\n\n if post_id == -1:\n posts = Post.objects.filter(pk__in=posts_ids).order_by(\"-date\")[:PAGE_OFFSET]\n elif type_ == 'old':\n posts = Post.objects.filter(pk__in=posts_ids, pk__lt=post_id).order_by(\"-date\")[:PAGE_OFFSET]\n else: # 'new'\n posts = reversed(Post.objects.filter(pk__in=posts_ids, pk__gt=post_id).order_by(\"date\")[:PAGE_OFFSET])\n\n serializer = PostSerializer(posts, context={'user_id': token.user_id}, many=True)\n return Response({\"success\": 66,\n \"posts\": serializer.data})\n else:\n return Response({\"error\": 17})", "def go_to_create_tag():\n\n posts = Post.query.all()\n return render_template('tags/new.html', posts=posts)", "def tags(request):\n return Tag.objects.filter(user=request.user)", "def tags():", "def test_tag_search(self):\n url = reverse_lazy('tag-list') + '?search={}'.format('testtag')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n results = response.data['results']\n self.assertEqual(len(results), 3)\n\n for tag in ('testtag1', 'testtag3'):\n result = list(filter(lambda it: it['title'] == tag, results))\n self.assertEqual(len(result), 1)\n result = result[0]\n\n self.assertEqual(len(result['posts']), 3)", "def get_tag(self, tag):\n resp = self.get(_u.build_uri(\"tag\", domain=self.domain),\n data={'tag': tag})\n return utils.handle_response(resp)", "def extract_recent_tag(self, tag):\n\n url_string = \"https://www.instagram.com/explore/tags/%s/\" % tag\n response = bs4.BeautifulSoup(requests.get(url_string).text, \"html.parser\")\n potential_query_ids = self.get_query_id(response)\n shared_data = self.extract_shared_data(response)\n\n media = shared_data['entry_data']['TagPage'][0]['tag']['media']\n posts = []\n for node in media['nodes']:\n post = self.extract_recent_instagram_post(node)\n posts.append(post)\n self.save_results(posts)\n\n end_cursor = media['page_info']['end_cursor']\n\n # figure out valid queryId\n for potential_id in potential_query_ids:\n url = \"https://www.instagram.com/graphql/query/?query_id=%s&tag_name=%s&first=12&after=%s\" % (\n potential_id, tag, end_cursor)\n try:\n data = requests.get(url).json()\n if 'hashtag' not in data['data']:\n # empty response, skip\n continue\n query_id = potential_id\n success = True\n break\n except JSONDecodeError as de:\n # no valid JSON retured, most likely wrong query_id resulting in 'Oops, an error occurred.'\n pass\n if not success:\n log.error(\"Error extracting Query Id, exiting\")\n sys.exit(1)\n\n while end_cursor is not None:\n url = \"https://www.instagram.com/graphql/query/?query_id=%s&tag_name=%s&first=12&after=%s\" % (\n query_id, tag, end_cursor)\n data = requests.get(url).json()\n if 'hashtag' not in data['data']:\n # empty response, skip\n continue\n end_cursor = data['data']['hashtag']['edge_hashtag_to_media']['page_info']['end_cursor']\n posts = self.extract_instagram_posts(data['data']['hashtag']['edge_hashtag_to_media']['edges'])\n self.save_results(posts)", "def tags_show_page(tag_id):\n\n tags= Tag.query.get_or_404(tag_id)\n return render_template('tags/tags_show.html', tags=tags)", "def view_post(request, slug_post):\n try:\n post = Entry.objects.filter(status=2).get(slug=slug_post)\n except Entry.DoesNotExist:\n raise Http404\n return render_to_response('blog/post.html', {'post':post, 'DISQUS_SHORTNAME':settings.DISQUS_SHORTNAME}, RequestContext(request))", "def get_posts(self):\r\n postList = []\r\n for tag in self.setting.imgurTags:\r\n try:\r\n req = requests.get('%s%s' % (self.setting.tagLink, tag), headers=self.setting.imgurHeaders)\r\n for post in req.json()['data']['items']:\r\n p = self.json_to_post(post, tag)\r\n if p is not None:\r\n postList.append(p)\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.CRITICAL, 'imgur.get_posts exception(%s): %s' % (tag, e))\r\n break\r\n return postList", "def tag(self, sent):\n # WORK HERE!!", "def update_post(request, pk):\n data = JSONParser().parse(request)\n\n if request.method == 'PATCH':\n if data['user_id']:\n user = User.objects.get(id = data['user_id'])\n else:\n user = request.user\n try:\n postObject = Post.objects.filter(id=pk, user = user).first()\n except Post.DoesNotExist:\n content = {'user forbidden': 'you should be user of the requested post.'}\n return Response(content, status=status.HTTP_403_FORBIDDEN)\n\n\n postObject.content = data['content']\n postObject.tags.clear()\n\n for tag in data[\"tags\"]:\n if len(tag)>0:\n if tag['name'] == '':\n continue\n try:\n tagObject = Tag.objects.get(wikidataID=tag['wikidataID'])\n except ObjectDoesNotExist:\n tagObject = Tag.objects.create(wikidataID=tag['wikidataID'], name=tag['name'])\n except MultipleObjectsReturned:\n return HttpResponse(\"Multiple tags exist for.\" + tag + \" Invalid State.\")\n\n unique_hidden_tags = list(set(tag['hidden_tags']))\n if unique_hidden_tags:\n tagObject.hidden_tags = unique_hidden_tags\n\n tagObject.save()\n postObject.tags.add(tagObject)\n postObject.save()\n return Response(status=status.HTTP_200_OK)", "def update_all_posts():\n for post in CURRENT_POSTS:\n update_tag(post)", "def view_blog(self):", "def tag(self, tag):\n self.tag = tag", "async def get_tag_command(self, ctx):\n await self.get_tag(ctx)", "def __getitem__(self, tag):\n return self.__tags.get(tag.lower(), 0)", "def get(self, post_id):\n Post.add_like(int(post_id), self.user.get_id())\n self.redirect('/blog')", "def get_post(self):\n\t\tself.post = graph.get_object(POST_ID)", "async def getposts(ctx, theme):\n q = Query(limit=100, tag=\"travelfeed\")\n for post in Discussions_by_created(q):\n continent_code = get_location(post['body'], \"continentcode\")\n link = \"https://steemit.com/\"+construct_authorperm(post['author'], post['permlink'])\n if post['author'] in curatorlist or post['author'] in whitelist:\n continue\n elif (continent_code == \"AF\" or continent_code == \"OC\" or continent_code == \"AN\") and (theme == \"Africa\" or theme == \"Oceania\" or theme ==\"Australia\" or theme == \"australiaoceaniaafrica\"):\n await bot.say(link)\n elif continent_code == \"AS\" and theme == \"Asia\":\n await bot.say(link)\n elif continent_code == \"EU\" and theme == \"Europe\":\n await bot.say(link)\n elif (continent_code == \"SA\" or continent_code == \"NA\") and theme == \"America\":\n await bot.say(link)\n elif (\"food\" in post['body'] or \"eat\" in post['body'] or \"restaurant\" in post['body']) and (theme == \"Food\" or theme ==\"foodoftheworld\"):\n await bot.say(link)\n elif (\"advice\" in post['body'] or \"budget\" in post['body'] or \"learn\" in post['body']) and (theme == \"Advice\" or theme == \"Travel Advice\" or theme == \"traveladvice\"):\n await bot.say(link)", "def tag(self, tag):\n \n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n #Handle this better?\n return\n \n if isinstance(tag, six.string_types):\n tname = tag\n try:\n tag = Tag(owner=self.owner, name=tag)\n tag.save()\n except IntegrityError:\n tag = Tag.objects.get(slug=makeslug(tname), owner=self.owner)\n \n tag.save() # If this isn't here there are crashes for some reason\n self.tags.add(tag)", "def search(self, tag):\n\n url = \"https://api.instagram.com/v1/tags/search?q={0}&access_token={1}\".format(tag, self.access_token)\n request = requests.get(url)\n return request.json()", "def find_posts(self):\n\n posts = self.soup.find_all(\"div\", class_=\"_401d\")\n print(f'[Scraper] Found {len(posts)} posts.')\n\n for post in posts:\n try: \n text = post.find(\"div\", class_=\"_6-cp\").div.get_text()\n a_tag = post.find('span', class_=\"_6-cm\").find('a')\n link = \"https://www.facebook.com\" + a_tag['href'] \n print(f\"[Scraper] {link}\")\n print(f\"[Scraper] {text}\")\n except:\n print(\"Error occured. Skipped a result.\")", "def filter_queryset(self, queryset):\n tags = self.request.GET.getlist(\"tag\")\n if tags:\n for tag in tags:\n queryset = queryset.filter(tag__tag=tag)\n return super().filter_queryset(queryset)", "def test_tagged_feed_link(self):\n TagFactory(name=\"green\", slug=\"green\")\n url = urlparams(reverse(\"questions.list\", args=[\"all\"]), tagged=\"green\")\n response = self.client.get(url)\n self.assertEqual(200, response.status_code)\n doc = pq(response.content)\n feed_links = doc('link[type=\"application/atom+xml\"]')\n self.assertEqual(2, len(feed_links))\n self.assertEqual(\"Recently updated questions\", feed_links[0].attrib[\"title\"])\n self.assertEqual(\"/en-US/questions/feed?product=all\", feed_links[0].attrib[\"href\"])\n self.assertEqual(\"Recently updated questions tagged green\", feed_links[1].attrib[\"title\"])\n self.assertEqual(\"/en-US/questions/tagged/green/feed\", feed_links[1].attrib[\"href\"])", "def tags(request, item_cls, item_pk):\n item = get_object_or_404(item_cls, pk=item_pk)\n return tags_data(item)", "def tag_match_to_url(tag):\n # TODO(fsiddi) when moving to Python 3.7, specify the type of tag (re.Match)\n tag_name = tag.group(0)\n tag_url = reverse('posts_list_tag', kwargs={'tag_name': tag_name[1:]})\n return f'<a href=\"{tag_url}\">{tag_name}</a>'", "def json_sluglist_by_tag(tag):\n tagobj = Tag.query.filter(Tag.value==tag).first()\n if tagobj is None:\n abort(404)\n posts = posts_base.filter(Post.tags.contains(tagobj))\n out = {'posts': []}\n for post in posts:\n out['posts'].append(post[0].slug)\n\n return jsonify(out)", "def post_to_activity(self, post):\n id = None\n if post.get('id'):\n # strip USERID_ prefix if it's there\n post['id'] = post['id'].split('_', 1)[-1]\n id = post['id']\n\n obj = self.post_to_object(post)\n activity = {\n 'verb': VERBS.get(post.get('type', obj.get('objectType')), 'post'),\n 'published': obj.get('published'),\n 'updated': obj.get('updated'),\n 'id': self.tag_uri(id) if id else None,\n 'url': self.post_url(post),\n 'actor': obj.get('author'),\n 'object': obj,\n }\n\n application = post.get('application')\n if application:\n activity['generator'] = {\n 'displayName': application.get('name'),\n 'id': self.tag_uri(application.get('id')),\n }\n return self.postprocess_activity(activity)", "def get_tagname(tags, tagid):\n for tag in tags:\n if tag['id'] == tagid:\n return tag['name']", "def getTag(self, authenticationToken, guid):\r\n pass", "def get(self, tag, index):\n raise NotImplementedError", "def find_tag(tag : str):\n\tprint(f\"finding tag {tag} . . .\")\n\n\tkeys = db.keys() # lists the database keys\n\n\tif \"tags\" not in keys: # to make sure there's a database\n\t\tdb[\"tags\"] = {} # creates the tag database\n\t\tprint(f\"Initiated databse . . .\")\n\t\n\ttags = db[\"tags\"] # sets the database to a variable for easy use\n\t# tags is a dictionary with keys and values\n\t# to access a tag, use tags[tag]\n\n\treturn_value = None\n\n\tif tag in tags:\n\t\treturn_value = {\n\t\t\t\"key\": tag, # gets the tag name\n\t\t\t\"value\": tags[tag], # gets the tag value frome db\n\t\t\t\"status\": 200\n\t\t}\n\t\tprint(f\"Tag {tag} found with value {tags[tag]}.\")\n\t\n\telif tag not in tags:\n\t\treturn_value = {\n\t\t\t\"key\": tag, # gets the supposed tag name\n\t\t\t\"value\": f\"Tag `{tag}` doesn't exist.\", # returns none\n\t\t\t\"status\": 404\n\t\t}\n\t\tprint(f\"Tag {tag} not found.\")\n\t\tif tag == None:\n\t\t\treturn_value[\"value\"] = None\n\t\n\telse:\n\t\treturn_value = {\n\t\t\t\"key\": None,\n\t\t\t\"value\": None,\n\t\t\t\"status\": 500\n\t\t}\n\t\tprint(f\"An error occured finding {tag}.\")\n\t\n\treturn return_value", "def show_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n\n return render_template(\"tags/tag_details.html\", tag=tag)", "def _next_link_for_tag(self, tag, sort):\r\n from r2.lib.db import tdb_sql as tdb\r\n import sqlalchemy as sa\r\n\r\n # List of the subreddit ids this user has access to\r\n sr = Subreddit.default()\r\n\r\n # Get a reference to reddit_rel_linktag\r\n linktag_type = tdb.rel_types_id[LinkTag._type_id]\r\n linktag_thing_table = linktag_type.rel_table[0]\r\n\r\n # Get a reference to the reddit_thing_link & reddit_data_link tables\r\n link_type = tdb.types_id[Link._type_id]\r\n link_data_table = link_type.data_table[0]\r\n link_thing_table = link_type.thing_table\r\n\r\n # Subreddit subquery aliased as link_sr\r\n link_sr = sa.select([\r\n link_data_table.c.thing_id,\r\n sa.cast(link_data_table.c.value, sa.INT).label('sr_id')],\r\n link_data_table.c.key == 'sr_id').alias('link_sr')\r\n\r\n # Determine the date clause based on the sort order requested\r\n if isinstance(sort, operators.desc):\r\n date_clause = link_thing_table.c.date < self._date\r\n sort = sa.desc(link_thing_table.c.date)\r\n else:\r\n date_clause = link_thing_table.c.date > self._date\r\n sort = sa.asc(link_thing_table.c.date)\r\n\r\n query = sa.select([linktag_thing_table.c.thing1_id],\r\n sa.and_(linktag_thing_table.c.thing2_id == tag._id,\r\n linktag_thing_table.c.thing1_id == link_sr.c.thing_id,\r\n linktag_thing_table.c.thing1_id == link_thing_table.c.thing_id,\r\n linktag_thing_table.c.name == 'tag',\r\n link_thing_table.c.spam == False,\r\n link_thing_table.c.deleted == False,\r\n date_clause,\r\n link_sr.c.sr_id == sr._id),\r\n order_by = sort,\r\n limit = 1)\r\n\r\n row = query.execute().fetchone()\r\n return Link._byID(row.thing1_id, data=True) if row else None", "async def rule34(self, ctx, *tags):\n embed = discord.Embed(title=\"Rule 34\", colour=0x9B59B6, type=\"rich\")\n if len(tags) == 0:\n while True:\n r34_post = await self.r34_random(ctx)\n embed.set_image(url=r34_post[\"url\"])\n\n msg = await ctx.send(embed=embed)\n\n await msg.add_reaction(\"🔄\")\n await msg.add_reaction(\"🚫\")\n\n try:\n reaction, user = await self.bot.wait_for(\"reaction_add\",\n check=lambda r, u: u.id != self.bot.user.id, timeout=60)\n except asyncio.TimeoutError:\n return await msg.delete()\n\n await msg.delete()\n if str(reaction) == \"🔄\":\n continue\n if str(reaction) == \"🚫\":\n return\n\n else:\n r34_posts = await self.r34_search(ctx, *tags)\n\n if len(r34_posts) == 0:\n await ctx.send(\"I was unable to find a post with those tags\")\n return\n else:\n index = 0\n while True:\n embed.set_image(url=r34_posts[index][\"url\"])\n embed.set_footer(text=\"({}/{})\".format(index + 1, str(len(r34_posts))))\n\n msg = await ctx.send(embed=embed)\n\n await msg.add_reaction(\"◀\")\n await msg.add_reaction(\"▶\")\n await msg.add_reaction(\"🚫\")\n\n try:\n reaction, user = await self.bot.wait_for(\"reaction_add\",\n check=lambda r, u: u.id != self.bot.user.id,\n timeout=60)\n except asyncio.TimeoutError:\n return await msg.delete()\n\n await msg.delete()\n if str(reaction) == \"◀\" and index > 0:\n index -= 1\n if str(reaction) == \"▶\" and index < len(r34_posts) - 1:\n index += 1\n if str(reaction) == \"🚫\":\n return", "def get(self, request, *args, **kwargs):\n tag = kwargs['tag_name']\n page = request.GET.get('page')\n sort = request.GET.get('sort')\n per_page = request.GET.get('pagesize')\n if not per_page or per_page == '0' or per_page == 0:\n per_page = qa_settings.QUESTION_PAGE_SIZE\n if not sort:\n sort = 'newest'\n tags = Tag.objects.filter(name=tag)\n question_list = []\n unsolved_question_list = []\n for t in tags:\n if not t.question.solved:\n unsolved_question_list.append(t.question)\n question_list.append(t.question)\n\n if sort == 'votes':\n question_list = sorted(question_list, key=lambda i: (\n i.votes_count, i.created_time), reverse=True)\n elif sort == 'answers':\n question_list = sorted(question_list, key=lambda i: (\n i.solved, i.answers_count, i.created_time), reverse=True)\n elif sort == 'unanswered':\n question_list = sorted(unsolved_question_list, key=lambda i: (\n i.answers_count, i.votes_count, i.created_time))\n elif sort == 'views':\n question_list = sorted(question_list, key=lambda i: (\n i.hits_count, i.created_time), reverse=True)\n else:\n question_list = sorted(\n question_list, key=lambda i: i.created_time, reverse=True)\n\n paginator = Paginator(question_list, per_page)\n try:\n questions = paginator.page(page)\n except PageNotAnInteger:\n questions = paginator.page(1)\n except EmptyPage:\n questions = paginator.page(paginator.num_pages)\n\n return render(request, 'qa/questions.html', {\"questions\": questions})", "def get_tag(tag_name, tag_list):\n for i in range(len(tag_list)):\n if tag_name == str(tag_list[i]):\n return tag_list[i]", "def detail(request, idea_id):\n idea = get_object_or_404(Idea, pk=int(idea_id))\n if request.method == 'POST':\n tag_form = IdeaTagForm(request.POST)\n if tag_form.is_valid():\n data = tag_form.clean()['tags']\n tags = [tag.strip() for tag in data.split(',')\n if tag.strip() != '']\n try:\n for t in tags:\n add_tags(idea, t, None, request.user, 'idea')\n except NameError: # catch if add_tags doesn't exist\n idea.tags.add(*tags)\n return HttpResponseRedirect(\n reverse('idea:idea_detail', args=(idea.id,)))\n else:\n tag_form = IdeaTagForm()\n\n voters = idea.voters.all()\n\n for v in voters:\n try:\n v.profile = v.get_profile()\n except (ObjectDoesNotExist, SiteProfileNotAvailable):\n v.profile = None\n\n downvoters = idea.downvoters.all()\n\n for dv in downvoters:\n try:\n dv.profile = dv.get_profile()\n except (ObjectDoesNotExist, SiteProfileNotAvailable):\n dv.profile = None\n\n idea_type = ContentType.objects.get(app_label=\"idea\", model=\"idea\")\n\n tags = idea.tags.extra(select={\n 'tag_count': \"\"\"\n SELECT COUNT(*) from taggit_taggeditem tt\n WHERE tt.tag_id = taggit_tag.id\n AND content_type_id = %s\n \"\"\"\n }, select_params=[idea_type.id]).order_by('name')\n\n tags_created_by_user = []\n if COLLAB_TAGS:\n for tag in tags:\n tag.tag_url = \"%s?tags=%s\" % (reverse('idea:idea_list'), tag.slug)\n for ti in tag.taggit_taggeditem_items.filter(tag_creator=request.user,\n content_type__name=\"idea\",\n object_id=idea_id):\n tags_created_by_user.append(tag.name)\n\n if 'Mobile' in request.META['HTTP_USER_AGENT']:\n mobile = True\n else:\n mobile = False\n\n return _render(request, 'idea/detail.html', {\n 'idea': idea, # title, body, user name, user photo, time\n 'support': request.user in voters or request.user in downvoters,\n 'tags': tags,\n 'tags_created_by_user': tags_created_by_user,\n 'voters': voters,\n 'downvoters': downvoters,\n 'tag_form': tag_form,\n 'mobile':mobile,\n })", "def queryset(self, request, queryset):\n for tag in get_resource_tags():\n if self.value() == tag[0]:\n return queryset.filter(tags__slug__iexact=tag[0])", "def handle_tag_search(self, tag_text):\n log.debug(\"Handling tag search: %s\", tag_text)\n tags = tag_text.split()\n self.filter_tags = tags\n self.current_selected = 0\n self._refresh()", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def tag_cmd(context, json, name):\n store: Store = context.obj[\"store\"]\n LOG.info(\"Fetch tags\")\n tag_objs = store.get_tags()\n template = schema.TagSchema()\n result = []\n for tag_obj in tag_objs:\n if name and (tag_obj.name not in name):\n continue\n LOG.debug(\"Use tag %s\", tag_obj.name)\n result.append(template.dump(tag_obj))\n if not result:\n LOG.info(\"Could not find any of the specified tags [%s]\", \", \".join(name))\n return\n if json:\n click.echo(jsonlib.dumps(result))\n return\n console = Console()\n console.print(get_tags_table(result))", "async def slashtag_info(self, ctx: commands.Context, *, tag: TagConverter):\n await tag.send_info(ctx)", "def post(request, slug):\n\tsingle_post = get_object_or_404(Post, slug=slug)\n\tsingle_post.views += 1\n\tsingle_post.save()\n\treturn render(request, 'blog/single_post.html', {'single_post': single_post})", "def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(tags__name=self.kwargs['tag_slug'])", "def bloggerVisitView(request, pk):\n blogger = Blogger.objects.get(id=pk)\n posts = blogger.post_set.all()\n paginator = Paginator(posts, 6) # Show 6 posts per page.\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n context = {\n \"blogger\": blogger,\n \"posts\": posts,\n \"page_obj\": page_obj,\n }\n return render(request, \"blog/blogger_visit.html\", context)", "def analyse_posts(self, event):\n\n # Iterate over each post listed in \"deployed\"\n for post in event[\"deployed\"]:\n title = post.title()\n\n # Don't send for drafts\n if post.is_draft or post.post_status != \"published\":\n self.logger.info(\n \"Skipping Draft Post {0} with status {1}\".format(\n title, post.post_status\n )\n )\n continue\n\n # Extract some details\n link = post.permalink(absolute=True)\n text = post.text()\n self.logger.info(\"Processing {0}\".format(link))\n\n # Calculate and retrieve the state-key for this URL\n key = \"webmention-info-{0}\".format(link)\n observed_links = self.site.state.get(key)\n\n if not observed_links:\n observed_links = []\n\n # Extract links from within the rendered page\n links = self.extract_links(text)\n\n # Set up a requests session so that HTTP keep-alives can be used where possible\n # this reduces connection overhead etc\n session = requests.session()\n\n # Set the user-agent for all requests in this session\n session.headers.update({\"User-Agent\": \"Nikola SSG Webmention plugin\"})\n\n # Send mentions for each\n for dest in links:\n\n # See whether a webmention's already been sent for this page and url\n # means we won't reping links every time a post is updated\n if dest in observed_links:\n continue\n\n sent, has_mentions = self.send_webmention(link, dest, session)\n\n # We want to cache two categories of link\n #\n # Has webmentions, sent successfully\n # Does not have webmentions\n\n if sent or not has_mentions:\n observed_links.append(dest)\n\n # Now that all links have been processed, save the state\n self.site.state.set(key, observed_links)", "def get_posts_for_tags(slug=None, id=None, *args, **kwargs):\n if slug and isinstance(slug, basestring):\n tags = slug.split(',')\n qs = Post.objects.filter(tags__slug__in=tags)\n elif slug and isinstance(slug, (QuerySet, Tag)):\n qs = Post.objects.filter(tags__in=slug) \n elif id and isinstance(id, basestring):\n ids = id.split(',')\n qs = Post.objects.filter(tags__id__in=ids) \n else:\n qs = Post.objects.none()\n return qs", "def update(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.put(path, params, **options)", "def run_get_post(m):\n\n doc = get_doc(m)\n assert doc is not None\n\n wp = get_wp(m)\n\n post = find_post(wp, doc.identifier)\n\n if post:\n post.content = \"…content elided…\"\n from pprint import pprint\n pprint(post.struct)\n return\n else:\n warn(f\"Didn't find post for identifier {doc.identifier}\")\n return", "def get(self, request, *args, **kwargs):\n page = request.GET.get('page')\n per_page = request.GET.get('pagesize')\n if not per_page or per_page == '0' or per_page == 0:\n per_page = qa_settings.TAG_PAGE_SIZE\n tag_list = Tag.objects.values('name').distinct()\n paginator = Paginator(tag_list, per_page)\n try:\n tags = paginator.page(page)\n except PageNotAnInteger:\n tags = paginator.page(1)\n except EmptyPage:\n tags = paginator.page(paginator.num_pages)\n return render(request, 'qa/tags.html', {'tags': tags})", "def retrieve(self, request, pk=None):\n\n\n \n\n\n try:\n # `pk` is a parameter to this function, and\n # Django parses it from the URL route parameter\n # http://localhost:8000/Posts/2\n #\n # The `2` at the end of the route becomes `pk`\n post = Post.objects.get(pk=pk)\n reactions = Reaction.objects.all()\n\n # Creates an empty list for reactions custom property set in model, and then filters through postReactions to provide objects with a\n # key/value pair of reaction label/number of that reaction the post has \n\n post.reactions=[]\n\n for reaction in reactions:\n number_of_reactions = PostReaction.objects.filter(post=post, reaction=reaction).count()\n post.reactions.append({reaction.label: number_of_reactions})\n\n associated_tags=Tag.objects.filter(related_post__post=post)\n user = RareUser.objects.get(user=request.auth.user)\n\n all_tags=serializer=TagSerializer(associated_tags, many=True, context={'request',request})\n my_post=serializer = PostSerializer(post, context={'request': request})\n \n single_post={}\n single_post['post']=my_post.data\n single_post['tags']=all_tags.data\n if user == post.user:\n single_post['myPosts']=True \n\n return Response(single_post)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def add_tagging(self, task_instance):", "def post_detail_blog(request, blog_pk):\n # recuperar el post\n # recupera posts\n posts = Post.objects.order_by('-created_at').filter(owner=blog_pk)\n\n # prepara el contexto de la plantilla\n context = {\n 'post_objects': posts\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/inicio.html', context)", "def get_post_tags(postid, posttags, tags):\n _tags = []\n _nametags = []\n for item in posttags:\n if item['post_id'] == postid:\n _tags.append(item['tag_id'])\n for tag in _tags:\n nametag = get_tagname(tags, tag)\n _nametags.append(nametag)\n return _nametags", "def __getitem__(self, tag):\n return self.get(tag)", "def test_post_rank(self):\n # Upvote last post\n post = self.posts[-1]\n url = reverse_lazy('post-vote', kwargs={'pk': post.pk})\n response = self.put_json(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Search first tag\n url = reverse_lazy('tag-list') + '?search={}'.format(self.tags[0])\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n result = response.data['results'][0]\n posts = result['posts']\n\n self.assertEqual(posts[0]['id'], post.pk)", "def get_queryset(self):\n return Entry.published.filter(tags__slug=self.kwargs['tag_slug'])", "def get(self, post_id):\n post = Post.get_by_id(int(post_id), parent=blog_key())\n self.redirect('/blog/%s' % str(post.key().id()))", "def new_tag_form():\r\n\r\n posts = Post.query.order_by(Post.title).all()\r\n\r\n return render_template('tag-form.html', posts=posts)", "def update_tag(tag):\n remove_tag(tag)\n add_tag(tag)", "def show_item_by_tag(plugin, item_id, tag_value):\n import alltheitems.item_page\n return alltheitems.item_page.item_page({\n 'id': plugin + ':' + item_id,\n 'tagValue': None if tag_value == 'null' else tag_value\n })", "def updateTag(self, authenticationToken, tag):\r\n pass", "def test_single_tag(self):\n self.request.log(\"Hello World\", tags=[\"tag1\"])\n self.request.end()\n entry = self.get_entry()\n assert len(entry['tags']) == 1\n assert entry['tags'][0] == \"tag1\"", "def check_tag(self, session, tag):\n if not tag:\n return False\n\n try:\n self._tag(session.get, key=tag, session=session)\n return True\n except exceptions.NotFound:\n return False", "def post(request, blog, urlname):\n\tif request.user.is_authenticated():\n\t\tblog = Blog.qa_objects.get(urlname=blog)\n\t\tpost = BlogEntry.qa_objects.get(blog=blog, urlname=urlname)\n\t\tposts = BlogEntry.qa_objects.filter(blog=blog).order_by('-posting_time')[:5]\n\t\tblogs = Blog.qa_objects.order_by('name')\n\telse:\n\t\tblog = Blog.objects.get(urlname=blog)\n\t\tpost = BlogEntry.objects.get(blog=blog, urlname=urlname)\n\t\tposts = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')[:5]\n\t\tblogs = Blog.objects.order_by('name')\n\tfyi = Article.objects.filter(news_type='FYI').order_by('-posting_time')[:5]\t\n\treturn render_to_response('blogs/post.html', {'blog': blog, 'post': post, 'posts': posts, 'fyi': fyi, 'blogs': blogs}, context_instance=RequestContext(request))", "def tagger():", "def tasks_with_tag(request, tag):\n return tag.task_set.filter(user=request.user).exclude(folder='trash')" ]
[ "0.66354024", "0.6565476", "0.649461", "0.6490434", "0.6155304", "0.61414886", "0.6071974", "0.6021756", "0.59992045", "0.5903175", "0.5880634", "0.5843738", "0.58239686", "0.5765947", "0.57510775", "0.5750838", "0.5728099", "0.57190305", "0.56963086", "0.56737995", "0.566945", "0.56650895", "0.56617254", "0.56345314", "0.56208545", "0.5619072", "0.5583281", "0.5578606", "0.5561596", "0.5558556", "0.55543584", "0.5553541", "0.5548332", "0.5542897", "0.5542894", "0.5479537", "0.546022", "0.5453168", "0.54358506", "0.54331565", "0.5431033", "0.5402743", "0.53986484", "0.5361509", "0.5327734", "0.5298246", "0.52953976", "0.52801174", "0.5270034", "0.5267184", "0.5262909", "0.52556795", "0.52369446", "0.5229711", "0.52284676", "0.521414", "0.52104986", "0.52035415", "0.51727676", "0.51664865", "0.5161151", "0.51469165", "0.5127539", "0.51265657", "0.5124741", "0.51239777", "0.51230454", "0.5121457", "0.51090044", "0.51068276", "0.50933516", "0.5086761", "0.5086761", "0.5062994", "0.505922", "0.50534934", "0.50424653", "0.50369406", "0.50295806", "0.5026755", "0.5015396", "0.50117207", "0.5009362", "0.4988581", "0.4988444", "0.4985982", "0.4971776", "0.49699828", "0.49672422", "0.49656212", "0.49632218", "0.49630815", "0.49616736", "0.4961096", "0.4957426", "0.49513897", "0.49438006", "0.49384966", "0.4934202", "0.49269956" ]
0.61920655
4
Method allows bot to automatically type and send dm to user
def direct_message(self, user, msg, num): PAUSE = 1 logging.info('Send message {} to {}'.format(msg,user)) self.driver.get(self.direct_url) self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[1]/div/div[2]/input')[0].send_keys(user) time.sleep(PAUSE) self.driver.find_elements_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]')[0].click() #Edge case to get rid of notification time.sleep(PAUSE) self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[2]/div/div/div[3]/button')[0].click() self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[1]/div/div[2]/div/button')[0].click() time.sleep(PAUSE) # The message will be placed and sent self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')[0].send_keys(msg) time.sleep(PAUSE) self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button')[0].click() # Special feature involving reacting with heart for x in range(num): self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/button[2]')[0].click() time.sleep(PAUSE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def send_dm(user, message, embed=None):\n\n if type(user) is discord.User or type(user) is discord.Member:\n if user.dm_channel is None:\n await user.create_dm()\n\n await user.dm_channel.send(message, embed=embed)", "async def _dm(self, ctx, user: str, *, message: str = None):\n if user is None:\n await ctx.send(\"Provided no user to search for.\")\n return\n else:\n try:\n user = ctx.guild.get_member_named(user)\n if user is None:\n user = ctx.guild.get_member(int(user))\n except Exception as e:\n await ctx.send(f\"Failed to fetch user: {e}\")\n \n if user is None:\n await ctx.send(f\"Failed to find that user: {user}\")\n return\n\n if user.bot is True:\n await ctx.send(\"I cannot send messages to other bots pandejo.\")\n return\n\n if not user.dm_channel:\n await user.create_dm()\n try:\n e = discord.Embed(description=message, color=discord.Colour.blurple())\n e.set_author(name=f\"Message from {ctx.author}!\", icon_url=ctx.author.avatar_url)\n e.set_footer(text=f\"Sent at {arrow.now(tz='US/Eastern').strftime('%X')} EST\", icon_url=ctx.bot.user.avatar_url)\n await user.send(embed=e)\n await ctx.send(f\"Sent your message to {user}.\")\n except Exception as e:\n await ctx.send(f\"Failed to send message to {user}. {e}\")", "async def dm(self, ctx, user: int, *, message):\n user = self.bot.get_user(user)\n try:\n await user.send(message)\n await ctx.send(f'{emote.check} | Success!')\n except:\n return await ctx.send(f\"{emote.xmark} | Could not send message!\")", "def send_dm(self:'InstaClient', user:str, message:str):\n # Navigate to User's dm page\n try:\n self._nav_user_dm(user)\n text_area = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.DM_TEXT_AREA)))\n text_area.send_keys(message)\n time.sleep(1)\n send_btn = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.SEND_DM_BTN)))\n self._press_button(send_btn)\n time.sleep(1)\n except Exception as error: \n if self.error_callback:\n self.error_callback(self.driver)\n LOGGER.error('INSTACLIENT: An error occured when sending a DM to the user <{}>'.format(user))\n raise error", "def send_text_to_user(user):", "async def _dmid(self, ctx, id: int, *, message: str = None):\n if not isinstance(id, str):\n return await ctx.send(\"You have not entered a valid ID\")\n\n if not message:\n return await ctx.send(\"You must give a message to send.\")\n\n try:\n user = await ctx.bot.fetch_user(int(id))\n except Exception as e:\n return await ctx.send(f\"Error happened while trying to fetch user.\\n{e}\")\n\n if user.bot is True:\n return await ctx.send(\"I cannot send messages to bots\")\n\n if not user.dm_channel:\n await user.create_dm()\n\n message = \" \".join(message)\n e = discord.Embed(description=message, color=discord.Color.blurple())\n e.set_author(name=f\"Message from {ctx.author}!\", icon_url=ctx.author.avatar_url)\n e.set_footer(text=f\"Sent at {arrow.now(tz='US/Eastern').strftime('%X')} EST\", icon_url=ctx.bot.user.avatar_url)\n try:\n await user.send(embed=e)\n return await ctx.send(f\"Message has been sent to `{user}`!\")\n except discord.Forbidden:\n return await ctx.send(\"Cannot send messages to this user\")\n except discord.HTTPException:\n return await ctx.send(\"Message failed.\")\n except Exception as e:\n await ctx.send(f\"Error while sending embed. {e}\")", "async def on_typing(self, user: \"steam.User\", when: \"datetime.datetime\") -> None:", "def yourmom(update, context):\n chat_id = update.message.chat_id\n bot = context.bot\n\n options = [\n \"Dat zei je mama gisteren ook.\",\n emoji.emojize(\"Dat zei je moeder gisteren ook. :woman_raising_hand:\"),\n \"Ik zou nu een je moeder grap kunnen maken maar ik houd me in.\",\n emoji.emojize(\"Je mama is lief hoor. :woman_raising_hand:\")]\n\n msg = random.choice(options)\n\n time.sleep(HUMAN_DELAY*len(msg))\n bot.send_message(chat_id=chat_id, text=msg,\n reply_to_message_id=update.message.message_id)", "async def userfromid(ctx, iden:int):\n user = bot.get_user(iden)\n await ctx.send(user.mention)", "async def me(self, ctx):\n if ctx.invoked_subcommand is None:\n findbots = sum(1 for member in ctx.guild.members if member.bot)\n\n embed = discord.Embed()\n\n if ctx.guild.icon:\n embed.set_thumbnail(url=ctx.guild.icon_url)\n if ctx.guild.banner:\n embed.set_image(url=ctx.guild.banner_url_as(format=\"png\"))\n\n embed.add_field(name=\"Server Name\", value=ctx.guild.name, inline=True)\n embed.add_field(name=\"Server ID\", value=ctx.guild.id, inline=True)\n embed.add_field(name=\"Members\", value=ctx.guild.member_count, inline=True)\n embed.add_field(name=\"Bots\", value=findbots, inline=True)\n embed.add_field(name=\"Owner\", value=ctx.guild.owner, inline=True)\n embed.add_field(name=\"Region\", value=ctx.guild.region, inline=True)\n embed.add_field(name=\"Created\", value=default.date(ctx.guild.created_at), inline=True)\n user=ctx.author\n if not user:\n return await ctx.send(f\"Could not find any UserID matching **{user_id}**\")\n try:\n await user.send(content=\"Server info\",embed=embed)\n await ctx.send(f\"✉️ Sent a DM to **{user.name}**\")\n except discord.Forbidden:\n await ctx.send(\"This user might be having DMs blocked or it's a bot account...\")", "async def autorole(self, ctx: commands.Context):", "async def eat(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n out = ch.eat(ctx.user_object, ' '.join(args).lower())\n await ctx.send(out)", "async def before_invoke(self, ctx):\n await ctx.channel.trigger_typing()", "async def mention(self, ctx):\n settings = await self.fetch_settings(ctx)\n if settings['require_mention']:\n settings['require_mention'] = False\n await ctx.send('🐱 Okay, I no longer need to be @mentioned for me to respond to messages!')\n else:\n settings['require_mention'] = True\n await ctx.send('🐱 Okay, I will now only respond to messages if they @mention me!')\n await self.db.set(ctx.guild.id, settings)", "async def treatme(self, ctx):\n await ctx.send(await self.cure_user(ctx, ctx.author))", "def on_bot_start(update, context):\n user = update.effective_user\n chat_id = update.effective_chat.id\n log.info(\n \"ADD %s, %s, %s, %s\", user.username, user.full_name, chat_id, user.language_code,\n )\n\n context.bot.send_message(\n chat_id=chat_id,\n text=c.MSG_PHONE_QUERY,\n reply_markup=ReplyKeyboardMarkup([[k.contact_keyboard]], one_time_keyboard=True),\n )\n\n # set some context data about this user, so we can rely on this later\n context.user_data[\"state\"] = c.State.EXPECTING_PHONE_NUMBER", "def handle(bot, update):\n print(update.message.text)\n bot.send_message(chat_id=update.message.chat_id,\n text='Hey! I\\'m Meditech Bot')", "async def typing(message: discord.Message, participants: int=2):\n await init_game(message, Typing, participants)", "async def dmsettings(self, ctx):\n\n emojify_settings = self.bot.get_cog(\"Server\").emojiy_settings\n settings = await self.ensure_dm_settings(ctx.author.id)\n\n mute_kick_ban = emojify_settings(settings['ban_kick_mute'])\n leg_session_open = emojify_settings(settings['leg_session_open'])\n leg_session_update = emojify_settings(settings['leg_session_update'])\n leg_session_submit = emojify_settings(settings['leg_session_submit'])\n leg_session_withdraw = emojify_settings(settings['leg_session_withdraw'])\n\n embed = self.bot.embeds.embed_builder(title=f\"Direct Messages for {ctx.author.name}\",\n description=f\"Check `{config.BOT_PREFIX}help dms` for help on \"\n f\"how to enable or disable these settings.\\n\\n\"\n f\"{mute_kick_ban} DM when you get muted, kicked or banned\\n\"\n f\"{leg_session_open} \"\n f\"*({self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} Only)* DM when \"\n f\"a Legislative Session opens\\n\"\n f\"{leg_session_update} \"\n f\"*({self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} Only)* DM when \"\n f\"voting starts for a Legislative Session\\n\"\n f\"{leg_session_submit} \"\n f\"*({self.bot.mk.LEGISLATURE_CABINET_NAME} Only)* DM when \"\n f\"someone submits a Bill or Motion\\n\"\n f\"{leg_session_withdraw} \"\n f\"*({self.bot.mk.LEGISLATURE_CABINET_NAME} Only)* DM when \"\n f\"someone withdraws a Bill or Motion\\n\",\n has_footer=False)\n await ctx.send(embed=embed)", "async def before_any_command(ctx):\n ctx.timer = time()\n try:\n await ctx.trigger_typing()\n except discord.errors.Forbidden:\n pass", "def start_bot(user_id, lang, bot, message_id):\n\n\n bot_collection[user_id] = TimeManagerBot(user_id, lang)\n message = bot_collection[user_id].get_help_message()\n bot_collection[user_id].save_settings(set_update='ALL')\n\n bot.edit_message_text(text=message, chat_id=user_id, message_id=message_id)", "async def admin(ctx):\n info = await(bot.application_info())\n mention = info.owner.mention\n message = \"My administrator is the glorious {}. Fear them, for they are mighty.\".format(mention)\n await(ctx.send(message))", "def mention(cls, user, message, mentioned):\r\n pass", "async def ud(self,word):\r\n defs = ud.define(word)\r\n for d in defs:\r\n await self.bot.say(d)", "async def moderation(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"ban_kick_mute\")\n\n if new_value:\n message = \":white_check_mark: You will now receive DMs when you get muted, kicked or banned by me.\"\n else:\n message = \":white_check_mark: You will no longer receive DMs when you get muted, kicked or banned.\"\n\n await ctx.send(message)", "async def inviteme(self):\r\n\r\n #Your code will go here\r\n await self.bot.say(\"Here is a link to Invite Me: http://bit.ly/TacoBot\")", "def start(bot, update):\n text = firebaselink.getMenu()\n print(str(update.message.from_user));\n bot.send_message(chat_id=update.message.chat_id, text=text, parse_mode=\"markdown\")", "def startDM(self):\n self.uidm.startDM()", "async def message(self, ctx:utils.Context, user:discord.User, *, content:str):\n\n await user.send(content)", "async def tell(client, data):\n conn = client.bot.dbs[data.server]\n split = data.split_message\n\n tables = db.get_table_names(conn)\n if 'tells' not in tables:\n asyncio.create_task(client.message(data.target, 'Tell table uninitialized, ask your nearest bot admin to restart the bot.'))\n\n if len(split) > 1:\n recipient = split[0]\n recipient = recipient.lower()\n message = ' '.join(split[1:])\n else:\n return\n \n telldata = (recipient, data.nickname, message, int(time.time()), '0', '0')\n db.set_row(conn, 'tells', telldata)\n db.ccache()\n\n asyncio.create_task(client.notice(data.nickname, 'Your message will be sent.'))", "async def devox(self, ctx):\n member = discord.utils.find(lambda m: m.id == 250865328194715658, ctx.channel.guild.members)\n await ctx.send(\"{} The great man who created this bot some people say he has too much power, but the truth is he doesnt have enough\".format(member.mention))", "async def me(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n await ctx.send(users.print_account(ctx.user_object))", "async def dmreply(self, ctx, user_id: int, *, message: str):\n\n destination = self.bot.get_user(user_id)\n if destination is None or destination.bot:\n await ctx.send(\"Invalid ID, user not found, or user is a bot.\")\n return\n\n description = await self.config.guild(ctx.guild).title()\n # content = _(\"You can reply to this message with {}contact\").format(prefix)\n e = discord.Embed(color=(await ctx.embed_colour()), description=message)\n\n # e.set_footer(text=content)\n if ctx.bot.user.avatar_url:\n e.set_author(name=description, icon_url=ctx.bot.user.avatar_url)\n else:\n e.set_author(name=description)\n\n try:\n await destination.send(embed=e)\n except discord.HTTPException:\n await ctx.send(\"Sorry, I couldn't deliver your message\")\n else:\n await ctx.send(\"Message delivered\")", "async def typewriter(typew):\n if not typew.text[0].isalpha() and typew.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n textx = await typew.get_reply_message()\n message = typew.pattern_match.group(1)\n if message:\n pass\n elif textx:\n message = textx.text\n else:\n await typew.edit(\"`Give a text to type!`\")\n return\n sleep_time = 0.03\n typing_symbol = \"|\"\n old_text = ''\n await typew.edit(typing_symbol)\n await asyncio.sleep(sleep_time)\n for character in message:\n old_text = old_text + \"\" + character\n typing_text = old_text + \"\" + typing_symbol\n await typew.edit(typing_text)\n await asyncio.sleep(sleep_time)\n await typew.edit(old_text)\n await asyncio.sleep(sleep_time)", "async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username", "def start(bot, update, session, chat, user):\n if chat.is_maintenance:\n call_tg_func(update.message.chat, 'send_message', ['Hello there'],\n {'reply_markup': admin_keyboard})\n else:\n call_tg_func(update.message.chat, 'send_message', [help_text],\n {'reply_markup': main_keyboard, 'parse_mode': 'HTML'})", "def send_letter_everyone(d):\n print(\"Letters have been sent to all the donors!!!\")\n d.send_letter_everyone()", "def mention(cls, user, message, mentioned):\n pass", "def handle(text, mic, profile):\n messages = [\"Neustart wird eingeleitet, bis gleich. \"]\n\n message = random.choice(messages)\n\n mic.say(message)\n os.system(\"sudo reboot\")", "async def kick(self, ctx, user_id: str, *, action_text: str):\n\n session = self.bot.helpers.get_db_session()\n try:\n self.bot.log.info(\n f\"CMD {ctx.command} called by {ctx.message.author} ({ctx.message.author.id})\"\n )\n # Get the user profile\n user = await self.bot.helpers.get_member_or_user(user_id, ctx.message.guild)\n if not user:\n return await ctx.send(\n f\"Unable to find the requested user. Please make sure the user ID or @ mention is valid.\"\n )\n # Don't allow you to kick yourself or the guild owner, or itself or other bots.\n if (\n user.id\n in [ctx.message.author.id, ctx.message.guild.owner.id, self.bot.user.id]\n or user.bot\n ):\n return await ctx.send(\n f\"Sorry, but you are not allowed to do that action to that user.\"\n )\n\n # Set some meta data\n action_type = \"Kick\"\n guild = ctx.message.guild\n settings = self.bot.guild_settings.get(guild.id)\n modmail_enabled = settings.modmail_server_id\n\n # Confirm the action\n confirm = await self.bot.prompt.send(\n ctx, f\"Are you sure you want to kick {user} ({user.id})?\"\n )\n if confirm is False or None:\n return await ctx.send(\"Aborting kicking that user.\")\n elif confirm:\n # Try to message the user\n try:\n # Format the message\n message = self.bot.constants.infraction_header.format(\n action_type=action_type, guild=guild\n )\n\n # Reduces the text to 1,800 characters to leave enough buffer for header and footer text\n message += f\"'{action_text[:1800]}'\"\n # Set footer based on if the server has modmail or not\n if modmail_enabled:\n message += self.bot.constants.footer_with_modmail.format(\n guild=guild\n )\n else:\n message += self.bot.constants.footer_no_modmail.format(\n guild=guild\n )\n await user.send(message)\n user_informed = (\n f\"User was successfully informed of their {action_type}.\"\n )\n msg_success = True\n except discord.errors.Forbidden as err:\n self.bot.log.warning(\n f\"Error sending {action_type} to user. Bot is either blocked by user or doesn't share a server. Error: {sys.exc_info()[0].__name__}: {err}\"\n )\n user_informed = f\"User was unable to be informed of their {action_type}. They might not share a server with the bot, their DM's might not allow messages, or they blocked the bot.\"\n msg_success = False\n\n # Try and log to the database\n new_kick = None\n try:\n # Get mod's DB profile\n db_mod = await self.bot.helpers.db_get_user(\n session, ctx.message.author.id\n )\n # Get the DB profile for the guild\n db_guild = await self.bot.helpers.db_get_guild(session, guild.id)\n # Get the DB profile for the user\n db_user = await self.bot.helpers.db_get_user(session, user.id)\n\n # Log the action to the database\n # Edit the action_text to indicate success or failure on informing the user.\n if msg_success:\n action_text += \" | **Msg Delivered: Yes**\"\n else:\n action_text += \" | **Msg Delivered: No**\"\n logged_action = models.Action(mod=db_mod, server=db_guild)\n new_kick = models.Kick(\n text=action_text,\n user=db_user,\n server=db_guild,\n action=logged_action,\n )\n session.add(new_kick)\n session.commit()\n db_logged = True\n except Exception as err:\n self.bot.log.exception(f\"Error logging {action_type} to database.\")\n db_logged = False\n\n # Create the embed of info\n description = (\n f\"**Member:** {user} ({user.id})\\n\"\n f\"**Moderator:** {ctx.message.author} ({ctx.message.author.id})\\n\"\n f\"**Reason:** {action_text[:1900]}\"\n )\n\n embed = discord.Embed(\n color=0x0083FF,\n timestamp=datetime.utcnow(),\n title=f\"A user was kicked | *#{new_kick.id}*\",\n description=description,\n )\n embed.set_author(name=f\"{user} ({user.id})\", icon_url=user.avatar_url)\n # Try and get the logs channel\n logs = discord.utils.get(guild.text_channels, name=\"bot-logs\")\n\n if not logs:\n # If there is no normal logs channel, try the sweeper (legacy) logs channel\n logs = discord.utils.get(guild.text_channels, name=\"sweeper-logs\")\n\n if logs:\n # Checks if the bot can even send messages in that channel\n if (\n logs.permissions_for(logs.guild.me).send_messages\n and logs.permissions_for(logs.guild.me).embed_links\n ):\n await logs.send(embed=embed)\n\n # Now that we've handled messaging the user, let's kick them\n try:\n if isinstance(user, discord.member.Member):\n reason_text = f\"Mod: {ctx.message.author} ({ctx.message.author.id}) | Reason: {action_text[:400]}\"\n await guild.kick(user, reason=reason_text)\n if db_logged:\n response = f\"A {action_type} was successfully logged and actioned for: {user} ({user.id}).\\n\\n{user_informed}\"\n else:\n response = f\"A {action_type} was unable to be logged, however it was successfully actioned for: {user} ({user.id}).\\n\\n{user_informed}\"\n await ctx.send(response)\n else:\n raise Exception(\n \"User is not in the guild, unable to kick them.\"\n )\n except Exception as err:\n self.bot.log.warning(\n f\"Failed to kick user. Error: {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Successfully logged a {action_type} for: {user} ({user.id}), however **unable to kick them.** This could mean they weren't in the server.\\n\\n{user_informed}\"\n )\n\n except discord.HTTPException as err:\n self.bot.log.error(\n f\"Discord HTTP Error responding to {ctx.command} request via Msg ID {ctx.message.id}. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n except DBAPIError as err:\n self.bot.log.exception(\n f\"Error with database calls in CMD {ctx.command} for: ({user_id}). {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n session.rollback()\n except Exception as err:\n self.bot.log.exception(\n f\"Error responding to {ctx.command} via Msg ID {ctx.message.id}. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n finally:\n session.close()", "async def createdm(self, ctx, user: discord.User):\n try:\n dm_channel = await ex.get_dm_channel(user=user)\n if dm_channel is not None:\n ex.cache.mod_mail[user.id] = ctx.channel.id\n await ex.conn.execute(\"INSERT INTO general.modmail(userid, channelid) VALUES ($1, $2)\", user.id, ctx.channel.id)\n await dm_channel.send(f\"> {ctx.author.display_name} ({ctx.author.id}) has created a DM with you. All messages sent here will be sent to them.\")\n await ctx.send(f\"> A DM has been created with {user.id}. All messages you type in this channel will be sent to the user.\")\n else:\n await ctx.send(\"> I was not able to create a DM with that user.\")\n except Exception as e:\n await ctx.send(f\"ERROR - {e}\")\n log.console(e)", "def send_to_telegram(text):\n\n bot = telegram.Bot(token='')\n chat_id = ''\n bot.send_message(chat_id=chat_id, text=text)\n time.sleep(2)", "def handle_command(channel, command, user_id):\n global eaters\n\n print(command, user_id)\n\n try: \n # Search for action words to find if the bot should do something\n bot_followup = next(v for k,v in bot_capabilities.items() if command.find(k) >= 0) \n \n # Bot doing an action behind the scenes of the conversation\n # print(str(datetime.now())+\": Family Bot does - '{}'\".format(bot_followup.action))\n if bot_followup.action == \"add_self_to_eaters\": # TODO: make this an eval function\n if user_id: # Given user ID, find user name and add to eaters\"\n try:\n r = slack.users.info(user_id)\n first_name = r.body['user']['profile']['first_name'].lower()\n eaters.add(first_name)\n print(str(datetime.now())+\": {} <--> '{}'\".format(user_id, first_name))\n except:\n print(\"ERROR: add someone to eaters\")\n else:\n print(\"ERROR \"+user_id)\n\n if isinstance(bot_followup.response, list): # Bot responding the conversation\n response = choice(bot_followup.response)\n else:\n response = bot_followup.response\n elif bot_followup.action == \"add_person_to_eaters\":\n l = command.split(\" \")\n current_eater = l[l.index('add')+1]\n eaters.add(current_eater) # TODO: add logical to check that we have data\n print(eaters)\n if isinstance(bot_followup.response, list): # Bot responding the conversation\n response = choice(bot_followup.response)\n else:\n response = bot_followup.response\n elif bot_followup.action == \"fit_model\":\n # eaters |= set(command.split()[1:]) # parse command line args\n print(eaters)\n eaters = validate_eaters(eaters)\n eaters = [_.title() for _ in list(eaters)]\n print(eaters)\n try:\n results_raw = model.recommend(eaters)\n response = \"\"\"Sounds good. Based on your group preferences here are my suggestions: \n #1) {restaurant_1} (Resturant): {dishes_1}\n #2) {restaurant_2} (Resturant): {dishes_2}\n #3) {restaurant_3} (Resturant): {dishes_3}\n\n Should I order for y'all?\n \"\"\".format(restaurant_1=results_raw[0][0], dishes_1=\" | \".join(results_raw[0][1]),\n restaurant_2=results_raw[1][0], dishes_2=\" | \".join(results_raw[1][1]),\n restaurant_3=results_raw[2][0], dishes_3=\" | \".join(results_raw[2][1])\n )\n eaters = set()\n except TypeError:\n print(\"Passed empty eater set\")\n response = \"I don't know who wants to eat. Please tell me who is hungry.\"\n else:\n if isinstance(bot_followup.response, list): # Bot responding the conversation\n response = choice(bot_followup.response)\n else:\n response = bot_followup.response\n\n # print(str(datetime.now())+\": Family Bot does - '{ }'\".format(bot_followup.action))\n # print(str(datetime.now())+\": Family Bot says, '{}'\".format(response))\n \n\n except StopIteration:\n response = \"Not sure what you mean. Try something else.\"\n\n slack_client.api_call(\"chat.postMessage\",\n channel=channel,\n text=response, \n as_user=True)", "def on_typing(self, author_id, metadata=None):\n pass", "def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")", "def start(update: Update, _: CallbackContext) -> None:\n user = update.effective_user\n update.message.reply_markdown_v2(\n fr'Hi {user.mention_markdown_v2()}\\!',\n reply_markup=ForceReply(selective=True),\n )", "def start(update: Update, _: CallbackContext) -> None:\n user = update.effective_user\n update.message.reply_markdown_v2(\n fr'Hi {user.mention_markdown_v2()}\\!',\n reply_markup=ForceReply(selective=True),\n )", "def proceed_chatting_message(msg: telebot.types.Message):\n _, user, receiver = utils.get_game_user_opponent(msg.from_user)\n bot.send_message(\n receiver.user_id,\n f'**{user.first_name}:** __{msg.text}__',\n parse_mode='Markdown',\n )", "def send_to_telegram(text):\n\n bot = telegram.Bot(token='')\n chat_id = ''\n bot.send_message(chat_id=chat_id, text=text)\n time.sleep(15)", "def _do_start(self, chat_id, user_id, args, update):\n \n self.tclient.send_message('Hallo! Ich bin ein Bot, um dir zu helfen, dir deine Nasensprüche zu merken!', chat_id)", "def send_to_telegram(text):\n\n bot = telegram.Bot(token='')\n # chat_id = -1001371737931\n chat_id = ''\n bot.send_message(chat_id=chat_id, text=text)\n time.sleep(5)", "async def do_motd():\n\n download = urllib.request.urlopen(server_api)\n data = json.loads(download.read())\n motd = data['motd']\n await bot.send_message(c, f'MOTD: {motd}')", "def send_typing_action(func):\n\n @wraps(func)\n def command_func(update, context, *args, **kwargs):\n context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=telegram.ChatAction.TYPING)\n return func(update, context, *args, **kwargs)\n\n return command_func", "async def on(self, ctx, *, nickname=\"\"):\n nickname = nickname.strip()\n mention_here = True\n mention_everyone = True\n if nickname == \"\":\n nickname = \"Dank Bot |Music on voice!\"\n try:\n await self.bot.change_nickname(ctx.message.server.me, nickname)\n await self.bot.say(\"Hey, music is playing on voice channel come! @here\")\n await self.bot.delete_message(ctx.message)\n except discord.Forbidden:\n await self.bot.say(\"I cannot do that, I miss the `Change Nickname` or `Manage Messages` permission\")", "async def setdmreply(self, ctx: commands.Context):\n if not ctx.invoked_subcommand:\n pass", "def telebot():\n payload = json.loads(request.data)\n message = payload.get('message', payload.get('edited_message',''))\n msg_from = message.get('from')\n user_id = msg_from.get('id')\n user_first_name = msg_from.get('first_name','')\n user_last_name = msg_from.get('last_name','')\n user_is_bot = msg_from.get('is_bot')\n chat = message.get('chat')\n chat_id = chat.get('id')\n command = message.get('text')\n \n if user_is_bot or message == '':\n return jsonify({'method': 'sendMessage','chat_id' : chat_id,'text': 'Sorry I can\\'t answer you!'})\n \n bot_response = {\n 'method': 'sendMessage',\n 'chat_id' : chat_id,\n 'text': f'[{user_first_name} {user_last_name}](tg://user?id={user_id}) {command}',\n 'parse_mode':'Markdown',\n }\n\n return jsonify(bot_response)", "async def welcome_command(ctx):\n await ctx.send(f\"Hello! I am a bot made by {ctx.bot.owner}\")", "def start(update: Update, context: CallbackContext) -> None:\n user = update.effective_user\n update.message.reply_markdown_v2(\n fr'Hi {user.mention_markdown_v2()}\\!',\n reply_markup=ForceReply(selective=True),\n )", "def start(update: Update, context: CallbackContext) -> None:\n user = update.effective_user\n update.message.reply_markdown_v2(\n fr'Hi {user.mention_markdown_v2()}\\!',\n reply_markup=ForceReply(selective=True),\n )", "async def çıkış(con):\r\n check=str(con.message.channel)\r\n if check == 'Direct Message with {}'.format(con.message.author.name):#COMMAND USED IN DM\r\n await bot.send_message(con.message.channel,\"**You must be in a `server voice channel` to use this command**\")\r\n\r\n if check != 'Direct Message with {}'.format(con.message.author.name):#COMMAND NOT IN DM\r\n \r\n # IF VOICE IS NOT CONNECTED\r\n if bot.is_voice_connected(con.message.server) == False:\r\n await bot.send_message(con.message.channel,\"**Bot kanala bağlanmamış !**\")\r\n\r\n # VOICE ALREADY CONNECTED\r\n if bot.is_voice_connected(con.message.server) == True:\r\n bot.loop.create_task(queue_songs(con,True))", "def on_accept(self, update, _context):\n self.updater.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Alege timpul\",\n reply_markup=InlineKeyboardMarkup(k.build_dynamic_keyboard_first_responses()),\n )", "def execute_for_command(self, skill_input: SkillInput, services: AssistantServicesBase):\n voice = skill_input.adjective.lower()\n if voice in (\"female\", \"male\"):\n services.settings_service.voice = voice\n services.settings_service.save_settings()\n services.user_interaction_service.speak('Okay, I will use a %s voice from now on.' % (voice), True)\n else:\n services.user_interaction_service.speak('I don\\'t understand what voice you want')", "async def _defacto(self, ctx: commands.Context):\n responses = ['DI FACTO', 'di facto']\n await ctx.send(random.choice(responses))", "async def send_dms(self):\n # Player 1 info\n info = \"\"\n for k, v in self.p1_moves.items():\n info += f\"Type **{k}** for **{v}**\\n\"\n\n embed = discord.Embed(\n title=f\"RPS in #{self.channel.name}\", description=info)\n await self.players[0].send(embed=embed)\n\n # Player 2 info\n if self.players[1].id != bot.user.id:\n info = \"\"\n for k, v in self.p2_moves.items():\n info += f\"Type **{k}** for **{v}**\\n\"\n embed = discord.Embed(\n title=f\"RPS in #{self.channel.name}\", description=info)\n await self.players[1].send(embed=embed)", "def start(update, context):\n context.bot.send_message(\n chat_id=update.effective_chat.id, text=\"Hi human\")", "async def _bot(ctx):\n await ctx.send('Yes, the bot is cool.')", "async def infectme(self, ctx):\n await ctx.send(await self.infect_user(ctx, ctx.author))", "async def monster(member : monster):\n await bot.say (result)", "def start_command_handler(update, context):\n add_typing(update, context)\n buttons = MultiItems(\"What would you like to receive?\", [\"Text\", \"File\", \"GoogleDoc\", \"Gallery\"])\n add_suggested_actions(update, context, buttons)", "async def give(message):\n # get information about the user\n uid = message['from']['id']\n name = message['from']['first_name']\n\n if not re.fullmatch(r'/give [1-9]+[0-9]*( @\\w+)+', message['text']):\n # message fails to parse\n return 'Message does not match the required format. Check rules in /help.'\n\n # get amount of give money\n money = int(message['text'].split(' ')[1])\n # get list of aliases from message\n aliases_raw = message['text'].replace('@', '').split(' ')[2:]\n\n # get parsed aliases and aliases that were failed to validate\n aliases, fail_verification, fail_verification_str = await auxiliary.check_presence_users(aliases_raw)\n\n if message['from']['username'] in aliases:\n return 'You cannot lend money to yourself twice!'\n\n if len(fail_verification) != 0:\n # some aliases fail validation\n return 'User with alias(es) ' + fail_verification_str + 'do(es) not registered in bot.'\n\n # amount on money per each user\n share_give = money / (len(aliases) + 1)\n # string with aliases of debtors\n aliases_str = ''\n for alias_ in aliases:\n aliases_str += '@' + alias_ + ' '\n # get user id of debtor\n debtor_uid = (await queries.get_user_by_alias(alias_))['uid']\n # update table debts\n await _change_debts_dictionary(debtor_uid, uid, share_give)\n message_give = name + ', you have given ' + str(share_give) + ' to ' + aliases_str\n return message_give", "def send_message(self, text):\n self.__telegram_info.message.reply_text(text)", "def start_typing(self):\n response = requests.post(\n self._server_url + _TYPING_URL,\n data={\"id\": self._chat_id}\n )", "async def send_keyboard(application: Application):\n reply_markup = ReplyKeyboardMarkup(\n [\n [\n KeyboardButton(\"/help\"),\n KeyboardButton(\"/status\"),\n ],\n [\n KeyboardButton(\"/alarm\"),\n KeyboardButton(\"/stop\"),\n ],\n [\n KeyboardButton(\"/last\"),\n KeyboardButton(\"/lines\"),\n ],\n [\n KeyboardButton(\"/last_vid\"),\n KeyboardButton(\"/last_vids\"),\n ],\n ]\n )\n await application.bot.sendMessage(\n chat_id=QKZKID,\n text=\"qdoor 🚪 alarm bot started ! 🚨\",\n reply_markup=reply_markup,\n )\n await application.bot.sendMessage(\n chat_id=MESKOID,\n text=\"qdoor 🚪 alarm bot started ! 🚨\",\n reply_markup=reply_markup,\n )", "def send(self, text):\n log.msg('me %s' % (text))\n self.sendLine(text)", "def __send(self) -> None:\n # region Docstring\n # endregion\n\n if len(self.entryline.get_text().strip()) > 0:\n self.udp.transmission(\n \"CHA\", \"01\", self.username, self.entryline.get_text().strip()\n )\n self.__addmsg(f\"<b>(YOU): </b><br>{self.entryline.get_text().strip()}<br>\")\n self.entryline.set_text(\"\")", "async def invite(self, ctx):\n perms = discord.Permissions.text()\n perms.update(read_messages=True, manage_messages=True,\n mention_everyone=False, send_tts_messages=False)\n await ctx.send(f'Invite me here:\\n<{discord.utils.oauth_url(self.bot.user.id, perms)}>')", "async def on_message_activity(self, turn_context: TurnContext):\n reply = MessageFactory.list([])\n # Get the state properties from the turn context.\n welcome_user_state = await self.user_state_accessor.get(\n turn_context, WelcomeUserState\n )\n\n if not welcome_user_state.did_welcome_user:\n welcome_user_state.did_welcome_user = True\n\n text = turn_context.activity.text.lower()\n\n if text in (\"hello\", \"hi\",\"intro\",\"help\",\"menu\"):\n #await self.__send_intro_card(turn_context)\n reply.attachments.append(self.create_signin_card())\n await turn_context.send_activity(reply)\n\n \n else:\n # This example hardcodes specific utterances. You should use LUIS or QnA for more advance language\n # understanding.\n print(\"Printing action------\",turn_context.activity.text)\n print(\"Printing JSON------\",turn_context._activity.value)\n \n\n if turn_context._activity.value is not None:\n print(\"Printing type------\",turn_context._activity.value[\"type\"])\n print(\"Printing customer id------\",turn_context._activity.value[\"customerId\"])\n print(\"Printing password------\",turn_context._activity.value[\"password\"])\n\n customerId = turn_context._activity.value[\"customerId\"]\n password = turn_context._activity.value[\"password\"]\n terms = turn_context._activity.value[\"terms\"]\n isvalid = True\n if (customerId is None) or (str(customerId).strip()==\"\"):\n isvalid = False\n await turn_context.send_activity(\"Please enter valid Customer ID\")\n if (password is None) or (str(password).strip()==\"\"):\n isvalid = False\n await turn_context.send_activity(\"Please enter valid Password\")\n if (terms is None or terms in (\"false\")):\n isvalid = False\n await turn_context.send_activity(\"Please accept the terms and conditions.\")\n\n if (isvalid and turn_context._activity.value[\"type\"] in (\"Login\")):\n # defining a params dict for the parameters to be sent to the API\n PARAMS = {'userName': customerId, 'password': password}\n # sending get request and saving the response as response object\n r = requests.get(url=\"http://localhost:8080/login\", params=PARAMS)\n # extracting data in json format\n data = r.json()\n print(\"printing response \", data[\"loginStatus\"])\n if (data[\"loginStatus\"] is not None and data[\"loginStatus\"] in (\"success\")):\n await turn_context.send_activity(\"Login Succeded\")\n await turn_context.send_activity(\"An OTP is sent to your registered mobile number xxxxxxxx90.\")\n await turn_context.send_activity(\"Please enter the OTP.\")\n else:\n await turn_context.send_activity(\"Login Failed. Please try again\")\n # for key in turn_context._activity.value:\n # print(turn_context._activity.value[key])\n \n else:\n text = turn_context.activity.text.lower()\n \n if text in (\"369\"):\n await turn_context.send_activity(\"Thanks!!\")\n await self.__send_intro_card(turn_context)\n elif text in (\"sign-in\", \"login\"):\n await self.__login_otp_card_card(turn_context)\n elif text in (\"hello\", \"hi\",\"intro\",\"help\",\"menu\"):\n await self.__send_intro_card(turn_context)\n #await turn_context.send_activity(f\"You said { text }\")\n elif text in (\"account balance\"):\n await self.__send_accountbalance_card(turn_context)\n await turn_context.send_activity(\"Also, your deposit xxxxxxxxx9243 is closed pre-maturely as per your request and amount is credited to your third party account.\")\n elif text in (\"xxxxxxxxx4567\"):\n await self.__list_accountTransaction_card(turn_context)\n await self.__mobile_billDue_card(turn_context)\n elif text in (\"yes, pay my mobile bill\"):\n await self.__show_invoice_card(turn_context)\n await self.__show_selectAccountForBill_card(turn_context)\n elif text in(\"debit from xxxxxxxxx4567\"):\n await turn_context.send_activity(\"An OTP is sent to your registered mobile number xxxxxxxx90.\")\n await turn_context.send_activity(\"Please enter the OTP.\")\n elif text in (\"1234\"):\n await turn_context.send_activity(\"Transaction Successful !! Mobile bill paid for $100 from your account number xxxxxxxxx4567\")\n await turn_context.send_activity(\"As a loyal customer, we are happy to offer you one year free VISA card which comes with $25 movie voucher.\\n\\n Also your balance reward points 514 from card xxxxxxxxxxxx7653 will be added to the new card.\")\n await self.__show_congratulations_card(turn_context)\n elif text in (\"credit card\"):\n await turn_context.send_activity(\"Credit card xxxxxxxxxxxx7653 \\n\\n Current outstanding is $0.00 \\n\\n Card closed on 09/01/2020 \\n\\n Balance reward points are 514\")\n elif text in (\"service requests\"):\n await turn_context.send_activity(\"Currently there are no open service requests.\")\n elif text in (\"xxxxxxxxx4566\"):\n await turn_context.send_activity(\"Your current account xxxxxxxxx4566 is Active, but there are no transactions on it.\")\n elif text in (\"debit from xxxxxxxxx4566\"):\n await turn_context.send_activity(\"Insufficient account balance. Please choose another account\")\n await self.__show_selectAccountForBill_card(turn_context)\n #else:\n #await self.__send_intro_card(turn_context)", "def start(msg: telebot.types.Message):\n logger.info(f'New /start command from id: {msg.from_user.id}.')\n\n bot.send_message(\n msg.from_user.id,\n 'Hello, welcome to TicTacDrop!',\n reply_markup=buttons.get_play_markup()\n )\n\n utils.save_user(msg.from_user)", "async def on_ready(self):\n self.send_message = self.bot.get_cog('Text').send_message", "def start(bot, update):\n\n if update.message.chat.type == 'private':\n location_keyboard = telegram.KeyboardButton(text=\"Send current location\", request_location=True)\n postal_code = telegram.KeyboardButton(text=\"Input a postal code\")\n custom_keyboard = [[location_keyboard, postal_code]]\n chat_reply = \"Hello hello! You want to send me your current location or input a postal code?\"\n reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard, one_time_keyboard=True, resize_keyboard=True)\n bot.send_message(chat_id=update.message.chat_id, text=chat_reply, reply_markup=reply_markup)\n else:\n chat_reply = \"Hello hello! Please type /find@SGParkingBot and the postal code of the place you want to check (e.g. /find@SGParkingBot 098585). If you want to directly send me your location, talk to me in private ;)\"\n bot.send_message(chat_id=update.message.chat_id, text=chat_reply)", "def on_message(msg, server):\n global MY_INFO\n\n if MY_INFO is None:\n MY_INFO = server.slack.login_data['self']\n # MY_INFO['id']\n\n pprint.pprint(msg)\n text = msg.get(\"text\", \"\").lower()\n text += msg.get(\"file\", {}).get(\"preview\", \"\")\n recommendation = room_recommender(text)\n if recommendation:\n trigger_string, room_name = recommendation\n room_id = CHANNELS[room_name]['id']\n response_text = \"Hi, I noticed you were talking about “{trigger_string}”\\n You may have better luck posting this in <#{room_id}|{room_name}>\"\n response_msg = response_text.format(\n trigger_string=trigger_string,\n room_id=room_id,\n room_name=room_name\n )\n dm_user(server, msg.get('user'), response_msg)", "async def mention(self, ctx, value: bool=None):\n\t\tif value is None:\n\t\t\tv = await self.config.guild(ctx.guild).doMention()\n\t\t\tif v:\n\t\t\t\tawait ctx.send('Players are being mentioned when their turn begins.')\n\t\t\telse:\n\t\t\t\tawait ctx.send('Players are not being mentioned when their turn begins.')\n\t\telse:\n\t\t\tawait self.config.guild(ctx.guild).doMention.set(value)\n\t\t\tif value:\n\t\t\t\tawait ctx.send('Players will be mentioned when their turn begins.')\n\t\t\telse:\n\t\t\t\tawait ctx.send('Players will not be mentioned when their turn begins.')", "def start(update, context):\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Hi! I'm a dictionary bot!\\n\"\n \"Send me a word, and I'll send you back its Oxford meaning.\")", "def start(update: Update, context: CallbackContext) -> None:\n if update.effective_chat:\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"I'm a bot, please talk to me!\")", "def send(self, irc, msg, args, user, targets, text):\n # Let's get the from user.\n public = irc.isChannel(msg.args[0])\n sent = []\n for target in targets:\n id = self.db.send(user.id, target.id, public, text)\n s = format('note #%i sent to %s', id, target.name)\n sent.append(s)\n irc.reply(format('%L.', sent).capitalize())", "async def wire(self, ctx, target: discord.Member = None) -> None:\n\n attacker = ctx.author\n\n if ctx.channel.id != bots_and_commands_channel_id:\n return await ctx.send(f\"**{attacker.mention}, you can only use this command in {self.bots_txt.mention}!**\")\n\n if await self.is_user_knocked_out(attacker.id):\n return await ctx.send(f\"**{attacker.mention}, you can't use your skill, because you are knocked-out!**\")\n\n if not target:\n return await ctx.send(f\"**Please, inform a target member, {attacker.mention}!**\")\n\n if attacker.id == target.id:\n return await ctx.send(f\"**{attacker.mention}, you cannot wire yourself!**\")\n\n if target.bot:\n return await ctx.send(f\"**{attacker.mention}, you cannot wire a bot!**\")\n\n target_currency = await self.get_user_currency(target.id)\n if not target_currency:\n return await ctx.send(f\"**You cannot wire someone who doesn't have an account, {attacker.mention}!**\")\n\n if target_currency[7] == 'default':\n return await ctx.send(f\"**You cannot wire someone who has a `default` Sloth class, {attacker.mention}!**\")\n\n if await self.is_user_protected(target.id):\n return await ctx.send(f\"**{attacker.mention}, {target.mention} is protected, you can't wire them!**\")\n\n if await self.is_user_wired(target.id):\n return await ctx.send(f\"**{attacker.mention}, {target.mention} is already wired!**\")\n\n confirmed = await ConfirmSkill(f\"**{attacker.mention}, are you sure you want to wire {target.mention}?**\").prompt(ctx)\n if not confirmed:\n return await ctx.send(\"**Not hacking them, then!**\")\n\n await self.check_cooldown(user_id=attacker.id, skill_number=2)\n\n try:\n current_timestamp = await self.get_timestamp()\n await self.update_user_is_wired(target.id, 1)\n await self.insert_skill_action(\n user_id=attacker.id, skill_type=\"wire\", skill_timestamp=current_timestamp,\n target_id=target.id, channel_id=ctx.channel.id\n )\n await self.update_user_action_skill_two_ts(attacker.id, current_timestamp)\n # Updates user's skills used counter\n await self.update_user_skills_used(user_id=attacker.id)\n\n except Exception as e:\n print(e)\n return await ctx.send(f\"**For some reason I couldn't wire your target, {attacker.mention}!**\")\n\n else:\n wire_embed = await self.get_wire_embed(\n channel=ctx.channel, perpetrator_id=attacker.id, target_id=target.id)\n await ctx.send(embed=wire_embed)", "def type_command(ctx, name_from, name_to):", "async def suggest(self, ctx, *, string=None):\n\n if not string:\n await ctx.send(\"Give a suggestion.\")\n ctx.command.reset_cooldown(ctx)\n return\n\n channel = ctx.bot.get_channel(520909751681548307)\n await channel.send(embed=discord.Embed(color=self.bot.embed_color,\n description=string)\n .set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n .set_footer(text=f\"From {ctx.guild.name}\")\n )\n\n await ctx.send(f\"Your suggestion has been sent!\")", "def idme(bot, update):\n update.message.reply_text(\"Your ID is: \" + str(update.message.from_user.id))", "async def wouldyourather(message: discord.Message, opt: options=None):\n # If there are no options, the bot will ask the questions (if there are any to choose from)\n if opt is None:\n assert message.channel.id not in sessions, \"**A would you rather session is already in progress.**\"\n sessions.add(message.channel.id)\n\n assert db.data[\"questions\"], \"**There are ZERO questions saved. Ask me one!**\"\n\n question = random.choice(db.data[\"questions\"])\n choices = question[\"choices\"]\n await client.say(message, \"Would you rather **{}** or **{}**?\".format(*choices))\n\n timeout = db.data[\"timeout\"]\n replied = []\n\n # Wait for replies from anyone in the channel\n while True:\n reply = await client.wait_for_message(timeout=timeout, channel=message.channel,\n check=lambda m: m.author not in replied)\n # Break on timeout\n if reply is None:\n break\n\n # Check if the choice is vlaid\n choice = get_choice(choices, reply.content)\n if choice is None:\n continue\n\n # Register that this author has replied\n replied.append(reply.author)\n\n # Update the answers in the DB\n # We don't care about multiples, just the amount (yes it will probably be biased)\n question[\"answers\"][choice] += 1\n\n name = reply.author.display_name\n response = random.choice(db.data[\"responses\"]).format(name=name, NAME=name.upper(), choice=choices[choice])\n await client.say(message, response)\n\n # Say the total tallies\n await client.say(message, \"A total of {0} would **{2}**, while {1} would **{3}**!\".format(\n *question[\"answers\"], *choices))\n db.save()\n sessions.remove(message.channel.id)\n\n # Otherwise, the member asked a question to the bot\n else:\n db.data[\"questions\"].append(dict(\n choices=list(opt),\n answers=[0, 0]\n ))\n db.save()\n\n answer = random.choice(opt)\n await client.say(message, \"**I would {}**!\".format(answer))", "async def test_typing(doof, repo_info, event_loop, mocker):\n typing_sync = mocker.Mock()\n\n async def typing_async(*args, **kwargs):\n \"\"\"Wrap sync method to allow mocking\"\"\"\n typing_sync(*args, **kwargs)\n\n mocker.patch.object(doof, 'typing', typing_async)\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=['hi'],\n loop=event_loop,\n )\n assert doof.said(\"hello!\")\n typing_sync.assert_called_once_with(repo_info.channel_id)", "async def dm(self, ctx, member: discord.Member = None, *, message=None):\n\n await ctx.message.delete()\n\n if not message:\n return await ctx.send(\"**Inform a message to send!**\", delete_after=3)\n\n if not member:\n return await ctx.send(\"**Inform a member!**\", delete_after=3)\n\n check_member = ctx.guild.get_member(member.id)\n if check_member:\n return await member.send(message)\n await ctx.send(f\"**Member: {member} not found!\", delete_after=3)", "def text(message, user=None):\n\n def clear_status(user_id):\n db.set_user_status(0, user_id)\n return True\n\n if not user:\n teleBot.send_message(message.chat.id, \"Для использования бота необходимо зарегистрироваться /start.\")\n return\n\n if user.status < 1:\n if user.status == -222:\n teleBot.send_message(message.chat.id, \"Спасибо, ваша жалоба будет рассмотрена в ближайшее время.\")\n teleBot.send_message(\"337804063\", f\"Пользователь {message.chat.id} отправил жалобу: {message.text}\")\n db.set_user_status(0, user.id)\n return\n\n if user.status == -1:\n site = db.new_site(user.id, message.text)\n # href = 'https://0fc752a06314.ngrok.io'\n href = ServerConfiguration.HOST\n db.select_site(user.id, site.id)\n teleBot.send_message(message.chat.id,\n f\"Создан сайт {site.title}. Вот ссылочка: {href}/site/{site.slug}\",\n reply_markup=ACTION_KEYBOARD)\n db.set_user_status(0, user.id)\n return\n teleBot.send_message(message.chat.id, \"И куда это записывать? Выберите действие! \")\n return\n\n column = code_to_action(user.status)\n teleBot.send_message(message.chat.id, f\"Успешно! Можете вызвать меню /set ещё раз или \"\n f\"воспользоваться предыдущим для продолжения работы с сайтом \")\n\n if column == -1 or not column:\n teleBot.send_message(message.chat.id, \"Неизвестная ошибка сервера.\")\n return\n\n def prepear_text(text):\n return text.replace('$', ':dol:')\n\n data = {\n column: prepear_text(message.text),\n 'last_update': datetime.now()\n }\n\n db.update_site_data(user.selected, **data)\n\n # teleBot.send_message(message.chat.id)", "def send_command(update: Update, context: CallbackContext):\n playerName = update.message.chat.username.lower()\n\n if players[playerName].username is None:\n update.message.reply_text(messages.NOT_REGISTERED)\n return ConversationHandler.END\n\n if players[playerName].chat_id is None:\n update.message.reply_text(messages.ERROR_CHAT_ID)\n return ConversationHandler.END\n\n send_menu = [[InlineKeyboardButton(config.ANGEL_ALIAS, callback_data='angel')],\n [InlineKeyboardButton(config.MORTAL_ALIAS, callback_data='mortal')]]\n reply_markup = InlineKeyboardMarkup(send_menu)\n update.message.reply_text(messages.SEND_COMMAND, reply_markup=reply_markup)\n\n return CHOOSING", "def _do_mein_spruch(self, chat_id, user_id, args, update):\n spruch = self.db.get_active_spruch(user_id)\n user_name = update[\"message\"][\"from\"][\"first_name\"]\n \n if not spruch:\n self.tclient.send_message('Ich habe noch keinen Nasenspruch von dir gespeichert, {}.'.format(user_name), chat_id)\n else:\n self.tclient.send_message('{}: <i>{}</i>'.format(user_name, spruch.text), chat_id)", "def handle(msg):\n # Get text or data from the message\n text = msg.get(\"text\", None)\n data = msg.get(\"data\", None)\n\n if data is not None:\n # This is a message from a custom keyboard\n chat_id = msg[\"message\"][\"chat\"][\"id\"]\n content_type = \"data\"\n elif text is not None:\n # This is a text message from the user\n chat_id = msg[\"chat\"][\"id\"]\n content_type = \"text\"\n else:\n # This is a message we don't know how to handle\n content_type = \"unknown\"\n \n if content_type == \"text\":\n message = msg[\"text\"]\n logging.info(\"Received from chat_id={}: {}\".format(chat_id, message))\n\n if message == \"/start\":\n # Check against the server to see\n # if the user is new or not\n # TODO\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/register', json=payload)\n response = json.loads(r.content)\n if response['exists']:\n message = \"Welcome back!\"\n else:\n message = \"Welcome!\"\n bot.sendMessage(chat_id, message)\n\n \n elif message == \"/rate\":\n # Ask the server to return a random\n # movie, and ask the user to rate the movie\n # You should send the user the following information:\n # 1. Name of the movie\n # 2. A link to the movie on IMDB\n # TODO\n\n # Create a custom keyboard to let user enter rating\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/get_unrated_movie', json=payload)\n response = json.loads(r.content)\n movieid = response['id']\n movieinfo = '%s: %s' % (response['title'], response['url'])\n bot.sendMessage(chat_id, movieinfo)\n my_inline_keyboard = [[\n InlineKeyboardButton(text='1', callback_data=str(movieid)+' rate_movie_1'),\n InlineKeyboardButton(text='2', callback_data=str(movieid)+' rate_movie_2'),\n InlineKeyboardButton(text='3', callback_data=str(movieid)+' rate_movie_3'),\n InlineKeyboardButton(text='4', callback_data=str(movieid)+' rate_movie_4'),\n InlineKeyboardButton(text='5', callback_data=str(movieid)+' rate_movie_5')\n ]]\n keyboard = InlineKeyboardMarkup(inline_keyboard=my_inline_keyboard )\n bot.sendMessage(chat_id, \"How do you rate this movie?\", reply_markup=keyboard)\n\n \n elif message == \"/recommend\":\n # Ask the server to generate a list of\n # recommended movies to the user\n payload = {'chat_id':chat_id, 'top_n':3}\n r = requests.post(host_addr+'/recommend', json=payload)\n response = json.loads(r.content)\n # print(response)\n if response['movies']==[]:\n message = 'You have not rated enough movies, we cannot generate recommendation for you.'\n bot.sendMessage(chat_id, message)\n else:\n bot.sendMessage(chat_id, \"My recommendations:\")\n for item in response['movies']:\n movieinfo = '%s: %s' % (item['title'], item['url'])\n bot.sendMessage(chat_id, movieinfo)\n\n\n else:\n # Some command that we don't understand\n bot.sendMessage(chat_id, \"I don't understand your command.\")\n\n elif content_type == \"data\":\n # This is data returned by the custom keyboard\n # Extract the movie ID and the rating from the data\n # and then send this to the server\n # TODO\n # print(data)\n info = str.split(data)\n movieid = int(info[0])\n rate = info[1][-1]\n logging.info(\"Received rating: {}\".format(rate))\n bot.sendMessage(chat_id, \"Your rating is received!\")\n # logging.info('Movie id = %d' % movieid)\n payload = {'chat_id':chat_id, 'movie_id': movieid, 'rating': rate}\n r = requests.post(host_addr+'/rate_movie', json=payload)\n response = json.loads(r.content)\n logging.info('Update status: '+response['status'])", "def do_listen(self, args):\n poem = args.contains('poem')\n deletebot = args.contains('deletebot')\n\n if deletebot:\n if poem:\n self.deletebot.generate_poem()\n else:\n self.deletebot.generate_email()\n else:\n if poem:\n self.keepbot.generate_poem()\n else:\n self.keepbot.generate_email()", "async def katıl(con,channel=None):\r\n check = str(con.message.channel)\r\n\r\n if check == 'Direct Message with {}'.format(con.message.author.name):#COMMAND IS IN DM\r\n await bot.send_message(con.message.channel, \"**You must be in a `server voice channel` to use this command**\")\r\n\r\n if check != 'Direct Message with {}'.format(con.message.author.name):#COMMAND NOT IN DM\r\n voice_status = bot.is_voice_connected(con.message.server)\r\n\r\n if voice_status == False:#VOICE NOT CONNECTED\r\n await bot.join_voice_channel(con.message.author.voice.voice_channel)\r\n\r\n if voice_status == True:#VOICE ALREADY CONNECTED\r\n await bot.send_message(con.message.channel, \"**Bot zaten bir kanala bağlı !**\")", "async def get_user(self):\n\n def check_if_dm(message: discord.Message):\n\n if isinstance(message.channel, discord.channel.DMChannel) and message.author != self.bot.user:\n return True\n else:\n return False\n\n token = ''.join(str(random.randint(1,10)) for i in range(7))\n\n log.warn(f\"To use the bot, you need to link your Discord account to the bot. To link your Discord account to the bot, open a new direct message with the bot and send it the following code: {token}\")\n\n while True:\n\n message = await self.bot.wait_for('message',check=check_if_dm)\n\n if message.content == token:\n \n await message.channel.send(\"Linking account...\")\n log.info(\"Token recieved, linking account\")\n\n conf.user = message.author.id\n\n await message.channel.send(\"Account Linked\")\n log.info('Account linked.')\n break\n\n else:\n\n log.info(\"That is not the correct token, please send the correct token printed previously\")", "async def send_sentence(self):\n await client.send_message(self.channel, \"**Type**: \" + self.sentence)", "async def addme(self, ctx):\n invite_url = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(8))\n embed = self.bot.embeds.embed_builder(title='Add this bot to your own Discord server',\n description=invite_url,\n has_footer=False)\n await ctx.send(embed=embed)" ]
[ "0.6927895", "0.67939085", "0.67668486", "0.6541166", "0.63509613", "0.62982935", "0.629352", "0.61479294", "0.61420435", "0.6082516", "0.60807395", "0.6071102", "0.59702575", "0.59686375", "0.5959844", "0.59589547", "0.5958455", "0.5929721", "0.58878124", "0.5874848", "0.58734095", "0.5786945", "0.5780502", "0.5775661", "0.57688767", "0.5714562", "0.56907815", "0.56813", "0.56749064", "0.56716764", "0.56592345", "0.5654886", "0.5653935", "0.56442344", "0.56421036", "0.56419224", "0.56377864", "0.5599923", "0.5598582", "0.55912787", "0.5588981", "0.5587447", "0.5578327", "0.5576345", "0.55742955", "0.55640113", "0.55640113", "0.5548122", "0.553718", "0.5531367", "0.55226934", "0.55217224", "0.5512241", "0.55033493", "0.5499575", "0.5499236", "0.54953337", "0.54901123", "0.54901123", "0.54895127", "0.5488481", "0.5488416", "0.54780513", "0.5469675", "0.54649156", "0.5463867", "0.5460192", "0.545624", "0.5450327", "0.54501724", "0.54494315", "0.54485375", "0.5444808", "0.5443465", "0.54429996", "0.5438138", "0.54362226", "0.54218686", "0.54171395", "0.541438", "0.5413573", "0.54102385", "0.54096955", "0.5403881", "0.5403824", "0.54035366", "0.54028064", "0.5402784", "0.5399251", "0.539299", "0.5392119", "0.5387913", "0.5387074", "0.53855604", "0.538345", "0.53829855", "0.5378541", "0.5375879", "0.5372831", "0.53726715", "0.537254" ]
0.0
-1
Method finds the button to follow or unfollow users. It filters the follow elements to find buttons. The default method looks for only follow buttons.
def find_buttons(self, button_txt): button = self.driver.find_elements_by_xpath("//*[text()='{}']".format(button_txt)) return button
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click_to_follow(browser):\n # browser.find_elements_by_css_selector(\"button\")\n # my_button_xpath: str = \"//button\"\n # browser.find_elements_by_xpath(my_button_xpath)\n\n # <button>\n my_follow_btn_xpath: str = \"//button[contains(text(), 'Follow')][not(contains(text(), 'Following'))]\"\n follow_btn_elements: t.List = browser.find_elements_by_xpath(\n my_follow_btn_xpath\n )\n\n # <a>\n # my_follow_btn_xpath: str = \"//a[contains(text(), 'Follow')][not(contains(text(), 'Following'))]\"\n # follow_btn_elements: t.List = browser.find_elements_by_xpath(\n # my_follow_btn_xpath\n # )\n\n # All elements\n # my_follow_btn_xpath: str = \"//*[contains(text(), 'Follow')][not(contains(text(), 'Following'))][not(contains(text(), 'Followers'))]\"\n # follow_btn_elements: t.List = browser.find_elements_by_xpath(\n # my_follow_btn_xpath\n # )\n\n for btn in follow_btn_elements:\n # Attempt to click each follow button on the page\n time.sleep(2) # self-throttle\n try:\n btn.click()\n except:\n pass", "def getUserFollowers(user):\n sleep(5)\n first = user+\"communaute/\"\n driver.get(first)\n sleep(5)\n followers = set()\n followers_page = []\n div_links = []\n\n nb_follower_div = driver.find_element_by_xpath(\"//div[@class='inner-nav-item current ']\").is_displayed()\n if nb_follower_div:\n nb_follower_text = driver.find_element_by_xpath(\"//div[@class='inner-nav-item current ']\").text\n nb_follower = nb_follower_text.split(\"(\",2)[1].split(\")\",2)[0]\n nb_follower = int(nb_follower)\n print(\"NB Followers : \",nb_follower)\n\n pagination = driver.find_elements_by_xpath(\"//a[@class='xXx button button-md item']\")\n page_links = [elem.get_attribute('href') for elem in pagination]\n page_links.insert(0,first)\n\n if nb_follower > 0:\n for num in page_links:\n if page_links.index(num) > 0:\n driver.get(num)\n sleep(5)\n div_links = driver.find_elements_by_xpath(\"//a[@class='xXx']\")\n followers_page = [elem.get_attribute('href') for elem in div_links]\n for link in followers_page:\n if link_patterns in link:\n followers.add(link)\n return followers", "def main_action(self, follow) -> set:\n dr = self.driver\n to_collect = self.to_collect\n\n prev_users_found = 0\n count = 0\n while count < 3 and prev_users_found < self.total_users:\n user_els = dr.find_elements_by_xpath(\"//a[@title]\")\n\n for i, user in enumerate(user_els):\n f_username = user.get_attribute(\"title\")\n self.users_found.add(f_username)\n\n if follow:\n self.click_follow_users()\n self.check_users_followed()\n\n n_users_found = len(self.users_found)\n logging.info(f\"{n_users_found} {to_collect} found out of {self.total_users} from {self.context.name}\")\n # TODO: better logging progress (enlighten)\n\n if n_users_found == prev_users_found:\n count += 1\n else:\n count = 0\n\n if self.total_users + 2 > 5:\n self.check_modal()\n scroll(dr, down=True, intensity=2)\n prev_users_found = n_users_found\n sleep_random()\n else:\n break\n\n logging.info(f\"All {to_collect} from {self.context.name} collected successfully\")\n return self.users_found", "def follow_user(self, user):\n self.nav_user(user)\n follow_button = self.driver.find_element_by_xpath(\n \"//button[contains(text(), 'Follow')]\")\n follow_button.click()\n time.sleep(1)\n self.driver.get(self.base_url)", "def btn_follow_clicked(self, widget, data=None):\n print \"follow clicked\"\n #Going to put random stuff here.", "def follow(self, follower, followee):\n pass", "def scrapeFollowersFromAnAccount(mode=\"followers\"):\n global api\n api.s.driver.get(\"https://www.instagram.com/\" + Target_User) \n usernameToLook = scrapeAccountName() #The idea is by searching for hashtag, you want to do the scraping of any user\n api.s.transfer_driver_cookies_to_session()\n usernameToLook = api.castUsernameToUserID(usernameToLook) #Same as the previous comment\n username = \"tylie77221\"\n password = \"tylie772211\"\n device_id = generate_device_id(get_seed(username, password))\n uuid = generate_UUID(True)\n rank_token = \"{}_{}\".format(usernameToLook, uuid)\n return api.getUserFollowers(usernameToLook, rank_token, selection=mode)", "def unfollow_profile(self):\n self.find_clickable_element(self.ISFOLLOWED_BTN).click()", "def follow_user(cls, user, following):\r\n pass", "def btn_unfollow_clicked(self, widget, data=None):\n print \"unfollow clicked\"", "def get_queryset(self, *args, **kwargs):\n following_username = self.kwargs.get(self.look_url_kwarg)\n following_users = FollowUser.objects.filter(\n following_username=following_username)\n\n return following_users", "def follow_closely(api_, follow_username):\n big_list = True\n max_id = ''\n following = []\n\n while big_list:\n api_.getSelfUsersFollowing(maxid=max_id)\n followers_ = api_.LastJson\n for f in followers_['users']:\n following.append(f)\n big_list = followers_['big_list']\n if not big_list:\n break\n # this key only exists if there is more pages\n max_id = followers_['next_max_id']\n\n for f in following:\n if f['username'] == follow_username:\n return True, f", "def follow_user(cls, user, following):\n pass", "def get_facebook_login_button(self, dom):\n for node in dom.iter('node'):\n if \"Facebook\" in node.attrib[\"text\"] and node.attrib[\"clickable\"]:\n return node\n\n return None", "def resolve_following(self, info):\n user = info.context.user\n follow_request = FollowRequest.objects.filter(follower=user.id, pending=False)\n return [follow.following for follow in follow_request]", "def user_follow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.append(following)\n db.session.commit()\n return {'followed': True}", "def follows(self):\r\n return relationships.Follows(self)", "async def follow(follow):\n await follow.edit(\n f\"`FOLLOW {DEFAULTUSER} ON` \\n\\n\"\n f\"[InstaGram](https://www.instagram.com/mayur_karaniya) \\n\\n\"\n f\"[FaceBook](https://www.facebook.com/mkaraniya) \\n\\n\"\n f\"[YouTube](https://www.youtube.com/channel/UCeKQxQK7XZ3jGi3541uWATg?sub_confirmation=1) \"\n )", "def follow_followers(self):\n self.logger.log(\"starting follow_followers...\")\n follow = perform_with_ran_delay(self.instagram.get_followers, self.account.identifier, 150, 15, delayed=True)\n for acc in follow[\"accounts\"]:\n try:\n try:\n # print(\"{} follows me, do I follow him ? > {} \".format(acc.username,self.is_user_following(acc.identifier)))\n if not self.is_user_following(acc.identifier):\n if self.add_following(acc.identifier):\n self.logger.log(\"following: {}\".format(acc.username))\n else:\n self.logger.log(\"follow not working at the moment\")\n except Exception as e:\n print(e)\n self.logger.log(str(e))\n continue\n finally:\n sleep(3)", "def follow_following_followers(self):\n self.logger.log(\"starting follow_following_followers...\")\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n try:\n try:\n followw = perform_with_ran_delay(self.instagram.get_followers, acc, 150, 15,\n delayed=True)\n accountstofollow = followw[\"accounts\"]\n random.shuffle(accountstofollow)\n if len(accountstofollow) > 10:\n accountstofollow = accountstofollow[:10]\n for ac in accountstofollow:\n if not self.is_user_following(ac.identifier):\n self.add_following(ac.identifier)\n self.logger.log(\"following: {}\".format(ac.username))\n except Exception as e:\n print(e)\n self.logger.log(str(e))\n finally:\n sleep(3)", "def getFollowers():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT following FROM followers WHERE user = (SELECT username FROM users WHERE id = ?)\", [user_id])\n tempFollowers = cur.fetchall()\n followers = []\n for follower in tempFollowers:\n followers.append(follower[0])\n return followers", "def follows(self):\r\n request = http.Request('GET', '{0}/follows/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json", "def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))", "def select_user_following(args):\n is_parameter_exists([\n constants.ID\n ], args)\n\n # Request User\n request_user = args[constants.USER]\n\n # Requested User ID\n requested_user_id = args[constants.ID]\n\n # Page Number\n page_number = 1 if constants.PAGE_NUMBER not in args else int(args[constants.PAGE_NUMBER])\n\n # Check User Id\n if not User.objects.filter(id=requested_user_id).exists():\n raise ApiError(constants.NOT_EXIST_OBJECT)\n\n # Following QuerySet\n queryset = UserFollow.objects.filter(following_user=requested_user_id).values_list('followed_user', flat=True)\n\n # User Ids\n user_ids = get_results_from_queryset(queryset, 10, page_number)\n\n # is_finished\n is_finished = not user_ids.has_next()\n\n # Filter Query\n filter_query = Q(id__in=user_ids)\n\n # Users\n users, _, _ = __get_users(filter_query, request_user, 10)\n\n return users, page_number, is_finished", "def follows_target_check(twitter,top_followers_list):\n yes_follow_list = []\n not_follow_list = []\n following_dict = {}\n target = 'HillaryClinton'\n \n for user in top_followers_list:\n params = {'source_id':user, 'target_screen_name':target}\n response = twitter.request('friendships/show', params)\n data = response.json()\n #print(\"DATAAA::\",data)\n if response.status_code == 200:\n #print(\"IN BIGG IFFFFF:::\")\n following_dict = data['relationship']['source']\n #print(\"following_dict::\",following_dict)\n check = following_dict['following']\n #print(\"check::\",check)\n if check:\n #print(\"IN IFFFFF:::\")\n yes_follow_list.append(user)\n \n else:\n #print(\"IN ELSEEEE:::\")\n not_follow_list.append(user)\n \n else:\n print('Got error %s \\nsleeping for 15 minutes.' % response.text)\n sys.stderr.flush()\n time.sleep(61 * 15)\n \n print(\"YES_LIST:::\",yes_follow_list) \n print(\"NO_LIST:::\",not_follow_list) \n return not_follow_list", "def find_new_followers(self):\n api = self.api\n geocode = self.geocode\n queries = self.queries\n hits_per_query = self.hits_per_query\n\n self.log.debug(\"Initialize\")\n self.log.debug(\"[ ********* FIND NEW FOLLOWERS *********** ]\")\n\n if self.strategy == UserProfile.FOLLOW or self.strategy == UserProfile.TWEET:\n # Find statuses that match our interests\n self.log.debug(\"Strategy set to FOLLOW or TWEET\")\n n = hits_per_query\n search_dict = dict()\n search_dict['lang'] = \"en\"\n if not geocode is None:\n search_dict['geocode'] = geocode\n statuses = list()\n self.log.debug(\"Queries:\")\n for q in queries:\n search_dict['q'] = q\n results = [c for c in Cursor(api.search, **search_dict).items(n)]\n self.log.debug(\" => %s: %s hits\" % (q, len(results)))\n statuses.extend(results)\n #self.log.debug(\"Statuses: %s\" % \"\\n\".join([str(s.__dict__) for s in statuses]))\n # Get all the screen names of senders and receivers\n screen_names = ([t.from_user for t in statuses] +\n [t.to_user for t in statuses if t.to_user])\n\n # Convert the strings to Tweepy user objects\n users, remainder = lookup_users_by_screen_name(self.api, screen_names)\n\n elif self.strategy == UserProfile.STEAL:\n users = []\n stolen_from = {}\n for competitor in list(self.competitors):\n self.log.debug(\"[ ********* STEAL %s *********** ]\" % competitor)\n try:\n competitor_friends_ids = self.api.friends_ids(competitor)\n competitor_followers_ids = self.api.followers_ids(competitor)\n\n filter_known_users_to_reduce_api_hits = False\n\n if filter_known_users_to_reduce_api_hits is True:\n new_competitor_friends_ids = [id for id in competitor_friends_ids if not len(TwitterAccount.objects.filter(twitter_id=id)) > 0 ]\n old_competitor_friends_ids = [id for id in competitor_friends_ids if len(TwitterAccount.objects.filter(twitter_id=id)) > 0 ]\n new_competitor_followers_ids = [id for id in competitor_followers_ids if not len(TwitterAccount.objects.filter(twitter_id=id)) > 0 ]\n old_competitor_followers_ids = [id for id in competitor_followers_ids if len(TwitterAccount.objects.filter(twitter_id=id)) > 0 ]\n # print new_competitor_friends_ids\n # print old_competitor_friends_ids\n # print new_competitor_followers_ids\n # print old_competitor_followers_ids\n\n print \"start lookups\"\n new_competitor_friends, remaining_friends = utils.lookup_users_by_id(self.api, new_competitor_friends_ids)\n new_competitor_followers, remaining_followers = utils.lookup_users_by_id(self.api, new_competitor_followers_ids)\n print \"end lookups\"\n else:\n # get all the tweepy users\n print \"start lookups\"\n new_competitor_friends, remaining_friends = utils.lookup_users_by_id(self.api, competitor_friends_ids)\n new_competitor_followers, remaining_followers = utils.lookup_users_by_id(self.api, competitor_followers_ids)\n print \"end lookups\"\n\n print \"%s has %s friends\" % (competitor, len(new_competitor_friends))\n print \"%s has %s followers\" % (competitor, len(new_competitor_followers))\n\n # holy crap this is so fucked up i'm ashamed that this code is getting written like this!\n\n for u in new_competitor_friends + new_competitor_followers:\n stolen_from.update({u.screen_name.lower(): competitor})\n\n except Exception, e:\n print e\n # didn't get all the users, don't remove the competitor\n # from the competitor list\n pass\n else:\n # got all the competitors friends and followers and converted them\n # to tweepy users.\n users += new_competitor_friends\n users += new_competitor_followers\n # add them to the users list to be processed in the next block (for user in users)\n # then pop the name off the competitors list in the UserProfile\n # fuck it for now i'm going to just cycle the item to the bottom of the competitor list so we can start getting maximal coverage within api constraints by making sure the top person is new every time\n self.competitors.append(self.competitors.pop(0))\n # return # for now\n self.profile.competitors = \"\\r\\n\".join(self.competitors)\n self.profile.save()\n\n # use the profile competitors list\n # for each name in competitors list\n # add all friends\n\n # should filter out garbage users. something like:\n users = [u for u in users if not Target.objects.filter(hunter=self.user, hunted__screen_name=u.screen_name.lower())]\n\n for user in users:\n twitter_account, created = utils.get_or_create_twitter_account(user)\n target, created = Target.objects.get_or_create(\n hunter=self.user, hunted=twitter_account)\n print target.hunted.screen_name, created\n if created:\n try:\n screen_name = user.screen_name.lower()\n match = lambda x: screen_name in \\\n (x.from_user.lower(), x.to_user and x.to_user.lower())\n if not self.strategy == UserProfile.STEAL:\n trigger_tweet = filter(match, statuses)[0].text\n else:\n try:\n trigger_tweet = \"Steal from user: %s\" % stolen_from.get(screen_name.lower(), \"someone. i lost it. sorry.\")\n except Exception, e:\n print \"YUCK. ERRORS.\"\n print \"YUCK. ERRORS.\"\n print e\n print e\n except Exception, e:\n self.log.exception(\"Could not get trigger tweet for %s\" %\n user.screen_name.lower())\n trigger_tweet = \"Error: Couldn't retrieve tweet.\"\n self.log.debug(\"Saved twitter account %s (trigger: %r)\" %\n (twitter_account.screen_name,\n trigger_tweet[:50]))\n target.reason = trigger_tweet\n target.status = Target.ON_DECK\n target.save()\n else:\n pass\n # print \" - Previously followed this dudesicle: %s\" % user.screen_name", "def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.followList.get(followerId, [followerId]):\n self.followList[followerId] = self.followList.get(followerId, [followerId]) + [followeeId]\n # print(self.followList)", "def follows(self):\n return relationships.Follows(self)", "def get_followings_of_a_user(tx: Transaction, email: str) -> BoltStatementResult:\n query = f\"\"\"\n MATCH (:Person {{email: '{email}'}})-[:FOLLOWS]->(follower) RETURN follower.full_name AS full_name, follower.email AS email, follower.profile_image AS profile_image\n \"\"\"\n return tx.run(query)", "def follow(request):\n user_id = request.POST.get('user_id')\n action = request.POST.get('action')\n if user_id and action:\n try:\n user = User.objects.get(id=user_id)\n if request.user == user:\n return JsonResponse({'status':0,'msg':'You can\\'t follow yourself'})\n if action == 'follow':\n if request.user.is_following(user):\n return JsonResponse({'status':0,'msg':'already following'})\n else:\n request.user.follow(user)\n return JsonResponse({'status':1,'msg':'followed'})\n else:\n if request.user.is_following(user):\n request.user.unfollow(user)\n return JsonResponse({'status':1,'msg':'un-followed'})\n else:\n return JsonResponse({'status':0,'msg':'not following'})\n except User.DoesNotExist:\n return JsonResponse({'status':0,'msg':'user not found'})\n return JsonResponse({'status':0})", "def follow_by_id(self, uid: int) -> None:\n self.api.follow(uid)", "def get_followings(request):\n user_id = request.GET.get(\"user_id\")\n if not user_id:\n return {\"error\": \"User Id should be provided\"}, 400\n following_data = Following.objects.filter(user_profile_id=user_id, is_active=True).first()\n return following_data, 200", "def resolve_followers(self, info):\n user = info.context.user\n follow_request = FollowRequest.objects.filter(following=user.id, pending=False)\n return [follow.follower for follow in follow_request]", "def remote_follow_page(request):\n user = get_user_from_username(request.user, request.GET.get(\"user\"))\n data = {\"user\": user}\n return TemplateResponse(request, \"ostatus/remote_follow.html\", data)", "def is_follower(self, you, them):\n if self.filter(from_user=them, to_user=you).count() > 0:\n return True\n return False", "def get_follow(self):\n return self.follow", "def process_like_and_follow(self):\n follow, media, unfollow = self.prepare_process_like_and_follow()\n follow_acts, media_acts, unfollow_acts = len(follow), len(media), len(unfollow)\n all_acts = round(self.limits_per_hour.get('follow') + self.limits_per_hour.get('like') +\n self.limits_per_hour.get('unfollow'))\n wait_time = 3600 // all_acts + 1\n while follow_acts or media_acts or unfollow_acts:\n time.sleep(wait_time + trunc_gauss(0, 10, -40, 40))\n rc = random.choices(['f', 'l', 'u'], [follow_acts, media_acts, unfollow_acts])[0]\n\n if rc == 'f':\n fo = follow.pop(0)\n self.following_and_storing(fo)\n follow_acts -= 1\n elif rc == 'l':\n mo = media.pop(0)\n self.liking(mo)\n media_acts -= 1\n elif rc == 'u':\n uo = unfollow.pop(0)\n self.unfollowing_and_removing(uo)\n unfollow_acts -= 1", "def auto_unfollow_nonfollowers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n # put user IDs here that you want to keep following even if they don't\n # follow you back\n users_keep_following = set([])\n\n not_following_back = following - followers\n\n # make sure the \"already followed\" file exists\n if not os.path.isfile(ALREADY_FOLLOWED_FILE):\n with open(ALREADY_FOLLOWED_FILE, \"w\") as out_file:\n out_file.write(\"\")\n\n # update the \"already followed\" file with users who didn't follow back\n already_followed = set(not_following_back)\n af_list = []\n with open(ALREADY_FOLLOWED_FILE) as in_file:\n for line in in_file:\n af_list.append(int(line))\n\n already_followed.update(set(af_list))\n del af_list\n\n with open(ALREADY_FOLLOWED_FILE, \"w\") as out_file:\n for val in already_followed:\n out_file.write(str(val) + \"\\n\")\n\n for user_id in not_following_back:\n if user_id not in users_keep_following:\n t.friendships.destroy(user_id=user_id)\n print(\"unfollowed %d\" % (user_id))", "def follows(self, name):\r\n url = '{0}/{1}/{2}'.format(self.get_url(), 'following', name)\r\n\r\n return http.Request('GET', url), resource.parse_boolean", "def get_follows(self):\n return [c.id for c in self.conf.follows]", "def follower(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for a in account:\n a = Account(a, morphene_instance=stm)\n print(\"\\nFollowers statistics for @%s (please wait...)\" % a.name)\n followers = a.get_followers(False)\n followers.print_summarize_table(tag_type=\"Followers\")", "def get_posts_of_followings_of_a_user(tx: Transaction, email: str) -> BoltStatementResult:\n query = f\"\"\"\n MATCH (:Person {{email: '{email}'}})\n -[:FOLLOWS]->(user:Person)\n -[:POSTED]->(post:Post)\n RETURN DISTINCT {{content:post.content, modified:post.modified, created:post.created, uuid:post.uuid, user_email:user.email}} AS posts\"\"\"\n return tx.run(query)", "def scrapeFollowingFromAnAccount():\n global api", "def follow(self, followerId, followeeId):\n if followerId in self.follows:\n self.follows[followerId].add(followeeId)\n else:\n self.follows[followerId] = set([followeeId])", "def getFollowers():\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = GetInstagramAnswer.igApi.getUserFollowers(GetInstagramAnswer.igApi.username_id, maxid=next_max_id)\n followers.extend(GetInstagramAnswer.igApi.LastJson.get('users',[]))\n next_max_id = GetInstagramAnswer.igApi.LastJson.get('next_max_id','')\n return \"You have currently \"+str(len(followers))+\" Followers on Instagram.\"", "def followed_by(self):\r\n return relationships.FollowedBy(self)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followerId != followeeId and followeeId in self.followList.get(followerId, []):\n self.followList[followerId].remove(followeeId)\n # print(self.followList)", "def get_followers(self):\n rsp = self.session.get(self.url + \"/followers\")\n soup = self.getSoup(rsp.content)\n followers = soup.find_all('div', class_ = 'zm-person-item')\n if not followers:\n return\n i, follower = 0, None\n for follower in followers:\n i += 1\n yield follower.find('a', recursive = False)['href']\n while not i % Page_Items_Num:\n data = {\n 'offset' : i,\n 'start' : follower['id'].split('-')[-1],\n '_xsrf' : self.session.getCookie()['_xsrf']\n }\n rsp = self.session.post(self.url + \"/followers\", data = data)\n if rsp.json()['r'] == 0:\n followers = self.getSoup(rsp.json()['msg'][1]).find_all('div', class_ = 'zm-person-item')\n for follower in followers:\n i += 1\n yield follower.find('a', recursive = False)['href']\n else:\n return", "def set_follow(self, follow):\n self.follow = follow", "def resolve_follow_requests(self, info):\n user = info.context.user\n return FollowRequest.objects.filter(following=user.id, pending=True).order_by(\"-request_date\")", "def by_followee_id(cls, followee_id, request):\n\t\treturn request.dbsession.query(Follow).filter_by(followee_id=followee_id).all()", "def get_to_unfollow(self, to_unfollow, n_unfollow):\n if self.monitored_users:\n current_monitored = \\\n list(filter(lambda x: datetime.fromtimestamp(float(x['followDate'])) + timedelta(days=14)\n < datetime.now() and x['username'] not in self.white_list, self.monitored_users))\n to_unfollow.extend([u['user'] for u in current_monitored])\n\n if len(to_unfollow) < n_unfollow:\n if not self.self_followings:\n self.self_followings = self.get_followings()\n add_followings = [f['pk'] for f in self.self_followings.get('users', [])\n if f['username'] not in self.white_list]\n\n if add_followings:\n if len(add_followings) > n_unfollow - len(to_unfollow):\n to_unfollow.extend([random.choice(add_followings) for _ in range(n_unfollow - len(to_unfollow))])\n else:\n to_unfollow.extend(add_followings)\n else:\n to_unfollow = [random.choice(to_unfollow) for _ in range(n_unfollow)]\n\n return to_unfollow", "def follow(request, usertofollow):\n to_follow = Member.objects.get(user__username=usertofollow)\n user = Member.objects.get(user=request.user)\n user.following.add(to_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def add_untracked_followers(self):\n\n self.log.debug(\"CHECK FOR UNTRACKED FOLLOWERS\")\n followers_ids_api = self.api.followers_ids()\n target = Target.objects.filter(hunter=self.user)\\\n .filter(status=Target.FOLLOWER)\n followers_ids_django = [t.hunted.twitter_id for t in target]\n\n untracked_followers_ids = filter(\n lambda x: unicode(x) not in followers_ids_django,\n followers_ids_api)\n\n untracked_followers, remainder = lookup_users_by_id(self.api,\n untracked_followers_ids)\n for untracked_follower in untracked_followers:\n twitter_account, created = \\\n utils.get_or_create_twitter_account(untracked_follower)\n target, created = Target.objects.get_or_create(\n hunter=self.user, hunted=twitter_account)\n if target.status == Target.PURGATORY:\n # Yay someone we targeted reciprocated follow\n self.follow_reciprocated(target)\n else:\n print target.status\n # Either a totally external follow, an ingrate changed mind,\n # or someone who we chatted became interested and followed\n # Either way the action is the same, follow him\n target.status = Target.FOLLOWER\n target.save()\n self.log.debug(\" => Add follower: %s\" % twitter_account.screen_name)", "def doesfollow(user):\n return jsonify({\n 'follows': isFollowed(g.user,user)\n })", "def follow(self, followerId, followeeId):\r\n if followerId != followeeId:\r\n self.follows[followerId].add(followeeId)", "def follow(user, people):\n api = get_api(user)\n current_screen_name = api.VerifyCredentials().GetScreenName()\n\n # don't let a user follow themselves\n screen_names = [person.twitter_screen_name for person in people]\n if current_screen_name in screen_names: screen_names.remove(current_screen_name)\n\n followed = []\n not_followed = []\n\n for screen_name in screen_names:\n try:\n api.CreateFriendship(screen_name=screen_name)\n followed.append(screen_name)\n except twitter.TwitterError:\n not_followed.append(screen_name)\n\n return 'followed %s people' % len(followed)", "def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200", "def following(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for a in account:\n a = Account(a, morphene_instance=stm)\n print(\"\\nFollowing statistics for @%s (please wait...)\" % a.name)\n following = a.get_following(False)\n following.print_summarize_table(tag_type=\"Following\")", "def auto_follow(q, count=1, result_type=\"recent\"):\n\n result = search_tweets(q, count, result_type)\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n do_not_follow = get_do_not_follow_list()\n\n for tweet in result[\"statuses\"]:\n try:\n if (tweet[\"user\"][\"screen_name\"] != TWITTER_HANDLE and\n tweet[\"user\"][\"id\"] not in following and\n tweet[\"user\"][\"id\"] not in do_not_follow):\n\n t.friendships.create(user_id=tweet[\"user\"][\"id\"], follow=False)\n following.update(set([tweet[\"user\"][\"id\"]]))\n\n print(\"followed %s\" % (tweet[\"user\"][\"screen_name\"]))\n\n except TwitterHTTPError as e:\n print(\"error: %s\" % (str(e)))\n\n # quit on error unless it's because someone blocked me\n if \"blocked\" not in str(e).lower():\n quit()", "def author_following(self):\n\t\tpass", "def follows(self, other):\n\t\treturn self.followed.filter(followers.c.followed_by == other.id).count() > 0", "def is_following(self, you, them):\n if self.filter(from_user=you, to_user=them).count() > 0:\n return True\n return False", "def is_following_by_username(self, id):\n return self.followed.filter(followers.c.followed_id == id).count() > 0", "def find_buttons(self, button_text):\n\n buttons = self.bot.find_elements_by_xpath(\"//*[text()='{}']\".format(button_text))\n\n return buttons", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followerId not in self.users.keys() or followeeId not in self.users.keys():\n return\n if followeeId not in self.users[followerId].followees.keys():\n return\n self.users[followerId].followees.pop(followeeId)\n\n\n\n # Your Twitter object will be instantiated and called as such:\n # obj = Twitter()\n # obj.postTweet(userId,tweetId)\n # param_2 = obj.getNewsFeed(userId)\n # obj.follow(followerId,followeeId)\n # obj.unfollow(followerId,followeeId)", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "async def get_new_followers(self, bearer_token:str, user_id:str, after:str) -> list:\n\n headers = {\"Client-Id\": self.TWITCH_PARAMS['client_id'], \"Authorization\": f\"Bearer {bearer_token}\"}\n params = {\"to_id\": user_id, \"first\": 100}\n if after:\n params[\"after\"] = after\n output = []\n while True:\n async with self.bot.session.get(self.TWITCH_USER_FOLLOWS_URL, params=params, headers=headers) as r:\n data = await r.json()\n # self.logger.info(data)\n output.extend(data.get('data', list()))\n if len(data.get('data', list())) < 100:\n break\n params['after'] = data.get('pagination', {}).get('cursor', None)\n return output, data.get('pagination', {}).get('cursor', None)", "def userFollowers(nick):\n if (len(nick) != 1):\n print \"Has d'introduir només un nick\"\n return\n i.userFollow(nick[0])", "def followed_by(self):\n return relationships.FollowedBy(self)", "def user_unfollow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.remove(following)\n db.session.commit()\n return {'unfollowed': True}", "def follow(self, followerId: int, followeeId: int) -> None:\n self.user_followed[followerId].append(followeeId)", "def user_follow_users(self, ids=None, **kwargs):\n return self._put(\n API.MY_FOLLOWING.value, type=\"user\", ids=\",\".join(ids or []), **kwargs\n )", "def get_auth_button(self, dom):\n\n # sign up\n for node in dom.iter('node'):\n text = node.attrib[\"text\"].lower()\n if \"sign\" in text and \"up\" in text and node.attrib[\"clickable\"]:\n return node\n\n # sign in\n for node in dom.iter('node'):\n text = node.attrib[\"text\"].lower()\n if ((\"sign\" in text and \"up\" in text) or (\"log\" in text and \"in\" in text)) and node.attrib[\"clickable\"]:\n return node\n\n return None", "def unfollowUser(following):\n \n cur, user_id, con = initialise(3, True)\n cur.execute(\"DELETE FROM followers WHERE user = (SELECT username FROM users WHERE id = ?) AND following = ?\", (user_id, following))\n finish(con)", "def set_followups(self, elem_name, item_name):\n self.elems[elem_name].followup = self.items[item_name]", "def get_followers_of_a_user(tx: Transaction, email: str) -> BoltStatementResult:\n query = f\"\"\"\n MATCH (follower)-[:FOLLOWS]->(:Person {{email: '{email}'}}) RETURN follower.full_name AS full_name, follower.email AS email, follower.profile_image AS profile_image\n \"\"\"\n return tx.run(query)", "async def get_following_by_page(db, account: str, page: int, page_size: int, follow_type: str):\n account_id = await _get_account_id(db, account)\n state = (2, 3) if follow_type == 'ignore' else (1, 3)\n\n sql = \"\"\"\n SELECT name,reputation,state FROM hive_follows hf\n LEFT JOIN hive_accounts ON hf.following = id\n WHERE hf.follower = :account_id\n AND state IN :state\n ORDER BY hf.created_at DESC\n LIMIT :limit OFFSET :offset\n \"\"\"\n\n return await db.query_all(sql, account_id=account_id,\n state=state, limit=page_size, offset=page*page_size)", "def followers(self, page=None, per_page=None):\r\n url = '{0}/{1}'.format(self.get_url(), 'followers')\r\n params = base.get_params(('page', 'per_page'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def following(self, page=None, per_page=None):\r\n url = '{0}/{1}'.format(self.get_url(), 'following')\r\n params = base.get_params(('page', 'per_page'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def prepare_process_like_and_follow(self):\n follow = []\n media = []\n unfollow = []\n\n coef = self.users_to_follow / self.limits_per_hour.get('follow', 1)\n media_to_like = round(coef*self.limits_per_hour.get('like'))\n num_to_unfollow = round(coef*self.limits_per_hour.get('unfollow'))\n\n feed_likes = media_to_like // 2\n feed_likes_list = []\n following_likes = round((media_to_like // 2) * 3 / 4)\n following_likes_list = []\n followers_likes = media_to_like - feed_likes - following_likes\n\n monitored_ids = [i[\"user\"] for i in self.monitored_users]\n\n for posts in self.hashtag_feed_list(self.search_hashtags):\n if len(follow) < self.users_to_follow:\n for m in posts:\n if self.check_if_suit(m):\n user_id, username = self.get_user_from_post(m)\n if user_id and user_id not in [i[\"user\"] for i in follow] \\\n and user_id not in monitored_ids:\n follow.append({'user': user_id, 'username': username})\n following_likes_list.append(m)\n\n if len(follow) >= self.users_to_follow:\n break\n\n for p in following_likes_list:\n if p in posts:\n posts.remove(p)\n\n if feed_likes > 0:\n if len(posts) > feed_likes:\n feed_likes_list.extend([i['id'] for i in (random.choice(posts) for _ in range(feed_likes))\n if i['id'] not in feed_likes_list])\n else:\n feed_likes_list.extend([i['id'] for i in posts[:feed_likes] if i['id'] not in feed_likes_list])\n feed_likes -= len(feed_likes_list)\n if feed_likes <= 0:\n if len(follow) >= self.users_to_follow:\n break\n if len(follow) >= self.users_to_follow and feed_likes <= 0:\n break\n\n media.extend(feed_likes_list)\n\n if len(following_likes_list) < following_likes:\n followings = []\n get_n_followings = following_likes - len(following_likes_list)\n if following_likes_list:\n following_likes_list = [self.get_media_id_from_post(i) for i in following_likes_list]\n following_likes_list.extend([i for i in self.get_following_likes(followings, get_n_followings)\n if i and i not in media])\n media.extend(following_likes_list)\n else:\n media.extend([self.get_media_id_from_post(i) for i in following_likes_list[:following_likes]])\n\n media.extend([i for i in self.get_followers_likes(followers_likes) if i and i not in media])\n\n unfollow = self.get_to_unfollow(unfollow, num_to_unfollow)\n\n return follow, media, unfollow", "def followers(self):\r\n request = http.Request('GET', '{0}/followers/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json", "def followers(self):\r\n request = http.Request('GET', '{0}/followers/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json", "def followers(self):\r\n request = http.Request('GET', '{0}/followers/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json", "def by_follower_id(cls, follower_id, request):\n\t\treturn request.dbsession.query(Follow).filter_by(follower_id=follower_id).all()", "def test_notify_following(self):\n # We'll use users[0] as the \"followed\"\n\n self.users[1].follow(self.users[0], \"test\")\n self.users[2].follow(self.users[0], \"test\")\n self.users[3].follow(self.users[0], \"test\")\n\n notify_following(self.users[0], \"test\", CancelledTicketNotification,\n {\"ticket\": \"test\", \"event\": {\"id\": \"1\"},\n \"event_name\": \"test\"},\n ignore=[self.users[1]])\n\n for user in self.staff + self.admins + [self.users[0],\n self.users[1],\n self.users[4]]:\n self.assertEquals(Notification.objects.get_for_user(\n user).count(), 0)\n self.assertEquals(Notification.objects.get_for_user(\n self.users[2]).count(), 1)\n self.assertEquals(Notification.objects.get_for_user(\n self.users[3]).count(), 1)", "def search_helper(name_list, operation, twitter_dict): \r\n return_list = []\r\n \r\n for name in name_list:\r\n if operation == 'following':\r\n search_specified_list = twitter_dict[name]['following']\r\n for following_names in search_specified_list: \r\n if following_names not in return_list: \r\n return_list.append(following_names) \r\n \r\n elif operation == 'followers':\r\n followers = all_followers(twitter_dict, name)\r\n for followers_name in followers: \r\n if followers_name not in return_list: \r\n return_list.append(followers_name) \r\n \r\n return return_list", "def like_following(self):\n self.logger.log(\"starting like_following...\")\n count_following = self.account.follows_count\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n acc = perform_with_ran_delay(self.instagram.get_account_by_id, acc)\n self.logger.log(\" {} > {} posts\".format(acc.username, acc.media_count))\n if acc.media_count > 0:\n\n posts = perform_with_ran_delay(self.instagram.get_medias, acc.username, 50)\n if posts:\n for m in posts:\n try:\n perform_with_ran_delay(self.instagram.like, m.identifier)\n self.logger.log(\"liking 1 post from \"+acc.username)\n random_delay()\n except Exception as e:\n self.logger.log(\"skipping 1 post from \"+acc.username)\n self.logger.log(e)\n random_delay()\n continue", "def follow(self, followerId, followeeId):\n if followerId not in self.follow_map:\n self.follow_map[followerId] = set()\n \n self.follow_map[followerId].add(followeeId)", "def get_follows_route(request):\n\n db_conn = request['db_conn']\n current_user = get_current_user(request)\n user_id = request['params'].get('user_id')\n if user_id:\n user = get_user({'id': user_id}, db_conn)\n if not user:\n return abort(404)\n if (user != current_user and\n user['settings']['view_follows'] != 'public'):\n return abort(403)\n else:\n user = current_user\n if not user:\n return abort(401)\n params = dict(**request['params'])\n params['user_id'] = user['id']\n follows = list_follows(params, db_conn)\n return 200, {\n 'follows': [deliver_follow(follow, access='private')\n for follow in follows]\n }", "def getFollowings(self,id=None,**kwargs):\n # GET /followings [/$id]\n debugMain('getEntitiesIFollow')\n if id is None:\n return self._genericGet('/followings',**kwargs)\n else:\n return self._genericGet('/followings/%s'%id,**kwargs)", "def follow(self, followerId, star):\n self.followstar[followerId] = self.followstar.get(followerId, set()) | set([star])", "async def get_followers_by_page(db, account: str, page: int, page_size: int, follow_type: str):\n account_id = await _get_account_id(db, account)\n state = (2,3) if follow_type == 'ignore' else (1,3)\n\n sql = \"\"\"\n SELECT name,reputation,state FROM hive_follows hf\n LEFT JOIN hive_accounts ON hf.follower = id\n WHERE hf.following = :account_id\n AND state IN :state\n ORDER BY hf.created_at DESC\n LIMIT :limit OFFSET :offset\n \"\"\"\n\n return await db.query_all(sql, account_id=account_id,\n state=state, limit=page_size, offset=page*page_size)", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def _user_following_info(self, uid: int = 0) -> List[_InstagramUser]:\n # If no uid was specified, use the authenticated user's uid\n if uid == 0:\n uid = self.uid\n\n followings: List[Dict[str, Any]] = self.api.getTotalFollowings(uid)\n user_followings = list([_InstagramUser(x) for x in followings])\n return user_followings", "def getListButton(self, id, myScrolledList):\n for button in myScrolledList.buttonsList:\n if button['extraArgs'][1] == id:\n return button" ]
[ "0.7135644", "0.66539246", "0.6400043", "0.63385546", "0.6108239", "0.598225", "0.5863128", "0.57678187", "0.5675595", "0.56031513", "0.5592132", "0.556185", "0.55301255", "0.5489775", "0.5486279", "0.54782057", "0.547135", "0.5470326", "0.54567957", "0.54306024", "0.5411247", "0.5388454", "0.5383007", "0.53790605", "0.53639376", "0.53591156", "0.53468657", "0.5324181", "0.5314061", "0.53139895", "0.5306114", "0.52946067", "0.5293571", "0.52908236", "0.5289865", "0.5278619", "0.5255268", "0.52481455", "0.52174044", "0.521524", "0.52135044", "0.5209717", "0.5204586", "0.5198057", "0.5175899", "0.516785", "0.51658773", "0.5145736", "0.51454294", "0.513851", "0.5135316", "0.5130875", "0.512322", "0.5121224", "0.5119053", "0.5110669", "0.50972414", "0.5094387", "0.5093525", "0.50907457", "0.5084646", "0.5079838", "0.50618213", "0.5061573", "0.505881", "0.5058161", "0.5054078", "0.5054078", "0.50492144", "0.5047863", "0.5041963", "0.50408155", "0.5033171", "0.50311494", "0.5030554", "0.5029252", "0.50263953", "0.4995767", "0.49865246", "0.49828157", "0.49805534", "0.49770755", "0.4973736", "0.4973736", "0.4973736", "0.4968724", "0.49658534", "0.49619323", "0.4952998", "0.49419892", "0.49320894", "0.49256265", "0.49146783", "0.4913896", "0.48969078", "0.48969078", "0.48969078", "0.48969078", "0.48890454", "0.48861083" ]
0.51461345
47
Method likes a specific number of a user's posts.
def latest_likes(self, user, number_posts, likes): WAIT = 1 if likes: action = 'Like' else: action = 'Unlike' self.nav_user(user) image_container = [] image_container.extend(self.driver.find_elements_by_class_name('_9AhH0')) for image in image_container[:number_posts]: image.click() time.sleep(WAIT) try: self.driver.find_element_by_xpath("//*[@aria-label='{}']".format(action).click()) except Exception as e: print(e) self.driver.find_elements_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]/button')[0].click() # clicks the heart symbol time.sleep(WAIT) self.driver.find_elements_by_xpath('/html/body/div[4]/div[3]/button')[0].click() #Makes sure to close out of current picture time.sleep(WAIT) # Tested users_list = [] def get_likes_list(self, username): """ Method gets a list of users who like a post """ api = self.api api.searchUsername(username) result = api.LastJson username_id = result['user']['pk'] #Gets the user ID user_posts = api.getUserFeed(username_id) # gets the user feed result = api.LastJson media_id = result['items'][0]['id'] #gets the most recent post api.getMediaLikers(media_id) #gets users who liked users = api.LastJson('users') for user in users: #appends the users to the list users_list.append({'pk':user['pk'], 'username':user['username']})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def like_user_posts(self, user:str, n_posts:int, like:bool=True):\n\n action = 'Like' if like else 'Unlike'\n\n self._nav_user(user)\n\n imgs = []\n elements = self._find_element(EC.presence_of_all_elements_located((By.CLASS_NAME, '_9AhH0')))\n imgs.extend(elements)\n\n for img in imgs[:n_posts]:\n img.click() \n time.sleep(1) \n try:\n self.driver.find_element_by_xpath(\"//*[@aria-label='{}']\".format(action)).click()\n except Exception as e:\n LOGGER.error(e)\n\n self.driver.find_elements_by_class_name('ckWGn')[0].click()", "def like(self, n: int) -> None:\n\n # YOUR CODE HERE\n self.likes += 1", "def like(self, data_base, user):\n cursor = data_base.cursor()\n cursor.execute(f\"UPDATE post SET likes = likes + 1 WHERE id = '{self.id}'\") # Increments the likes\n cursor.execute(f\"INSERT INTO user_like (user_id, post_id) VALUES ({user.id}, {self.id})\")\n if self.commit_to_db:\n data_base.commit()\n cursor.close()", "def like(request, post_id):\n if request.method == \"PUT\":\n liked = None\n user = request.user\n post = Post.objects.get(id=post_id)\n # If user already liked, decrement the like count and remove as 'liker'\n if user in post.liked_by.all():\n post.liked_by.remove(user)\n post.likes -= 1\n post.save()\n liked = False\n # Else increase like count and add user\n else:\n post.liked_by.add(user)\n post.likes += 1\n post.save()\n liked = True\n # Return data for updating dynamically with javascript\n return JsonResponse({\"total_likes\": post.likes, \"liked\": liked})", "def post_likes(request):\n if request.method == 'POST':\n if not request.user.is_authenticated:\n raise PermissionDenied\n pk = request.POST.get('post_id')\n try:\n post = PostsModel.objects.get(id=pk)\n except PostsModel.DoesNotExist:\n post = None\n try:\n likes = LikesModel.objects.get(user=request.user, post=post)\n except LikesModel.DoesNotExist:\n likes = None\n\n\n if likes == None:\n likes = LikesModel.objects.create(user=request.user, post=post)\n post.likes_count = F('likes_count') + 1\n post.save(update_fields=[\"likes_count\"])\n class_likes = 'fa fa-heart mx-2'\n else:\n if likes.is_liked == True:\n likes.is_liked = False\n likes.save()\n post.likes_count = F('likes_count') - 1\n post.save(update_fields=[\"likes_count\"])\n class_likes = 'fa fa-heart-o mx-2'\n elif likes.is_liked == False:\n likes.is_liked = True\n likes.save()\n post.likes_count = F('likes_count') + 1\n post.save(update_fields=[\"likes_count\"])\n class_likes = 'fa fa-heart mx-2'\n post.refresh_from_db()\n response_data = {'_code' : 0, '_status' : 'ok', '_likes': post.likes_count, '_class_likes': class_likes}\n else:\n response_data = {'_code' : 1, '_status' : 'no' }\n\n return JsonResponse(response_data)", "def like(self):\n self.like_count = self.like_count + 1 if self.like_count else 1", "def order_posts_to_likes(sender, instance, created=False, **kwargs):\n user = instance.user\n post = instance.post\n tags = post.tags.all()\n\n likes = Like.objects.filter(user=user, post=post)\n liked_posts = [ i.post for i in likes if i.liked_status==True]\n\n for post in liked_posts:\n liked_tags = post.tags.all()\n for tag in liked_tags:\n if not tag in tags:\n tags.append(tag)\n\n similar_posts = Post.objects.filter(tags__in=tags).distinct()\n try:\n UserPostWeight.objects.filter(user=user).delete()\n except:\n pass\n weight = 2\n for post in similar_posts:\n UserPostWeight.objects.create(user=user, post=post, weight=weight)\n weight+=2\n \n\n\n pass", "def users_like_changed(sender, instance, **kwargs):\n instance.total_likes = instance.users_like.count()\n instance.save()", "def get_user_likes(self) -> int:\n return -1", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def like(request, pk):\n update_posts_expiration()\n post = 0;\n post = Post.objects.get(id=pk)\n #if the post is expired you user can't like it\n if post.is_expired == True:\n return Response(\"You can't interact with a expired post\")\n else:\n #if the post is not expired then increment the likes count by 1 and save a serializer of like to the database with user and post information\n if request.method == \"POST\":\n if request.user != post.user:\n post.likes_count += 1\n post.save()\n serializer = LikeSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=request.user, liked_post=post)\n return Response(\"you liked the post with title: \" + post.title)\n else:\n return Response(serializer.errors)\n else:\n return Response(\"you can't like your own post\")\n \n\n return Response(\"you didn't like the post yet\")", "def like(self, request, pk=None):\n\n user_wall_post = self.get_object()\n user_wall_post.likes.add(self.request.user)\n to_user = user_wall_post.owner\n from_user = request.user\n\n UserNotification.create_post_friend_liked_notification(from_user, to_user, 'Right', id=pk)\n return Response(status=201)", "def show_likes(user_id):\n\n\n user = User.query.get_or_404(user_id)\n\n return render_template('users/likes.html', user=user)", "def post(self):\n liked = self.request.get('like')\n unliked = self.request.get('unlike')\n post_id = self.request.get('post_id')\n post = Posts.get_by_id(int(post_id))\n user = self.get_active_user()\n user_id = int(user.key().id())\n\n if liked:\n if user_id in post.liked_by:\n self.render_improper_endpoint_access(\"like\")\n else:\n if post.submitter_id != user_id:\n post.liked_by.append(user.key().id())\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)\n elif unliked:\n if user_id in post.liked_by:\n index = post.liked_by.index(user_id)\n del post.liked_by[index]\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(500)", "def post(self, request, *args, **kwargs):\n\n user_wall_post_comment = self.get_object()\n user_wall_post_comment.likes.add(self.request.user)\n return Response(status=201)", "def get_user_posts(self, request):\n post_objects = Post.objects.filter(liked_users__id=request.user.id)\n avg_user_liked_post_weight = self.__avg_user_interested_post_weight(post_objects)\n queryset = self.__user_interested_post_filter(avg_user_liked_post_weight)\n context = {'user':request.user}\n serializer = PostSerializer(queryset, many=True, context=context)\n return Response({'data': serializer.data}, status=status.HTTP_200_OK)", "def post_liker(a, args):\n if args.likes.filter(user=a).exists():\n exists = True\n else:\n exists = False\n return exists", "def d_like(request):\n token = request.GET.get('token','')\n\n if not token:\n return HttpResponse('error in user validation')\n \n try:\n api = ApiKey.objects.get(key=token)\n user = api.user\n except ApiKey.DoesNotExist:\n return HttpResponse('error in user validation')\n if request.method == 'POST' and user.is_active:\n try:\n post_id = int(request.POST['post_id'])\n except ValueError:\n return HttpResponse('erro in post id')\n try:\n post = Post.objects.get(pk=post_id)\n except Post.DoesNotExist:\n return HttpResponse('post not found')\n\n like, created = Likes.objects.get_or_create(user=user, post=post)\n if created:\n post.like=post.like+1\n post.save()\n return HttpResponse('+1')\n elif like:\n like.delete()\n post.like=post.like-1\n post.save()\n return HttpResponse('-1')\n\n return HttpResponse('error in parameters')", "def like_post(request, pk):\r\n post = get_object_or_404(Post, id=request.POST.get('id'))\r\n liked = False\r\n\r\n # If the user already liked the post\r\n if post.likes.filter(id=request.user.id).exists():\r\n # unlike\r\n post.likes.remove(request.user)\r\n liked = False\r\n Likes.objects.filter(user=request.user, post=post).delete()\r\n else:\r\n if post.dislikes.filter(id=request.user.id).exists():\r\n post.dislikes.remove(request.user)\r\n post.likes.add(request.user)\r\n liked = True\r\n like = Likes.objects.create(user=request.user, post=post)\r\n\r\n context = {\r\n 'post': post,\r\n 'is_liked': liked,\r\n }\r\n\r\n if request.is_ajax():\r\n html = render_to_string('blog/like_section.html', context, request=request)\r\n return JsonResponse({'form': html, \r\n 'total_likes': post.likes.count(),\r\n 'total_dislikes': post.dislikes.count(),\r\n 'liked': liked})", "def update_likes(self):\n self.nb_likes = self.likes.count()\n self.save()", "def like(self, event, number):\n\n if not hasattr(self, 'user_id') or not hasattr(self, 'top_id'):\n vk_common.dprint(2, vk_common.func_name(), f\"Не заданы атрибуты user_id = или top_id.\")\n return\n\n item_id = self.item_ids[self.top_id-1]\n if item_id is None:\n vk_common.dprint(2, vk_common.func_name(), f\"Атрибут item_id = None.\")\n return\n else:\n vk_common.dprint(2, vk_common.func_name(), f\"number = {number}; item_id = {item_id}\")\n\n vk_common.dprint(1, f\"имя файла с фото = \\\"{self.user_id}_{self.top_id}\\\"; \"\n f\"ссылка = https://vk.com/id{self.user_id}\")\n\n err, msg, likes = vk_common.like_photo(self.user_id, item_id)\n if err != 0:\n vk_common.dprint(2, \"Не смогли поставить лайк.\")\n mb.showinfo(\"Информация\", f\"Не смогли поставить лайк фото пользователя c id = {self.user_id}.\")\n return\n vk_common.dprint(2, f\"Поставили лайк: owner_id = {self.user_id}; item_id = {item_id}\")\n mb.showinfo(\"Информация\", f\"Поставили лайк фото пользователя c id = {self.user_id}.\")\n # err, msg, likes = user.like_photo(234068204, 345376029)", "def on_created_like(sender, instance: dillo.models.mixins.Likes, created, **kwargs):\n\n if (\n isinstance(instance.content_object, dillo.models.comments.Comment)\n and instance.content_object.parent_comment\n ):\n target = instance.content_object.parent_comment\n else:\n target = None\n\n log.debug('Generating like activity')\n # TODO(fsiddi) Prevent duplicate activity.\n # If the user likes a post or comment after having unliked it, do not generate activity.\n action.send(instance.user, verb='liked', action_object=instance.content_object, target=target)\n\n # Increase likes_count for profile of content owner their content is liked.\n if not created:\n return\n target_user = instance.content_object.user\n dillo.models.profiles.Profile.objects.filter(user=target_user).update(\n likes_count=F('likes_count') + 1\n )\n log.debug('Increased like count for user %s' % target_user)", "def get_likes_count(self, instance):\n return instance.likes.count()", "def get_likes_count(self, instance):\n return instance.likes.count()", "def get_likes_count(self, instance):\n return instance.likes.count()", "def calc_likes_from_post(self, post):\n postlikes = len(post['likes'])\n comments = post['comments']\n commentslikes = 0\n if comments: # not empty\n # DataFrame to avoid iterating over each comment\n commentsdf = pd.DataFrame(comments)\n commentslikes = commentsdf['like_count'].sum()\n likes = {'post_likes': postlikes, 'comments_likes': commentslikes}\n return likes", "def user_response_to_post(self, request, pk):\n post_objects_count = Post.objects.filter(id=pk, liked_users__id=request.user.id).count()\n post_objects = Post.objects.get(id=pk)\n if post_objects_count !=0:\n post_objects.liked_users.remove(request.user)\n response_msg = \"You disliked the post\"\n else:\n post_objects.liked_users.add(request.user)\n response_msg = \"You have liked the post\"\n return Response({'data': response_msg}, status=status.HTTP_200_OK)", "def send_like(request):\n if request.method == \"POST\":\n if \"token\" in request.data and request.data[\"token\"] != \"\" and request.data[\"token\"] is not None:\n if Token.objects.filter(key=request.data[\"token\"]).exists():\n token = get_object_or_404(Token, key=request.data[\"token\"])\n if Post.objects.filter(pk=request.data[\"post_id\"]).exists():\n post = Post.objects.get(pk=request.data[\"post_id\"])\n if Like.objects.filter(post=post, user_id=token.user_id).exists():\n return Response({\"error\": 31})\n else:\n post.count_likes += 1\n post.save()\n like = Like.objects.create(post=post, user_id=token.user_id)\n serializer = PostSerializer(post, context={'user_id': token.user_id})\n UserFeed.objects.create(user=post.author,\n action_user=token.user,\n like=like,\n action=\"Like\")\n if post.author != token.user:\n message = \"{} likes your post\".format(token.user.username)\n custom = {\n \"post_id\": post.id,\n \"avatar\": UserProfile.objects.get(user=token.user).avatar.url\n }\n\n user_notification = UserNotification.objects.get(user=post.author)\n send_notification(custom, message, user_notification)\n\n return Response({\"success\": 30,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 32})\n else:\n return Response({\"error\": 17})", "def post_like(self, entry, **args):\n args.update(entry=entry)\n return self.fetch(\"/like\", post_args=args)", "def on_deleted_like(sender, instance: dillo.models.mixins.Likes, **kwargs):\n if not instance.content_object:\n return\n target_user = instance.content_object.user\n profile_likes_count_decrease(target_user)\n log.debug('Decreased like count for user %s' % target_user)", "def get_likes_list(self, username):\n api = self.api\n api.searchUsername(username) \n result = api.LastJson\n username_id = result['user']['pk'] #Gets the user ID\n user_posts = api.getUserFeed(username_id) # gets the user feed\n result = api.LastJson\n media_id = result['items'][0]['id'] #gets the most recent post\n api.getMediaLikers(media_id) #gets users who liked\n users = api.LastJson('users')\n for user in users: #appends the users to the list\n users_list.append({'pk':user['pk'], 'username':user['username']})", "def rank(post):\n score = post.postlike_set.filter(post=post).count()\n order = log(max(score, 1), 10)\n\n seconds = seconds_since_epoch(post.creation_date) - SECONDS_SINCE_START\n\n post.rating = round(order + seconds / 45000, 7)", "def unlike(self, data_base, user):\n cursor = data_base.cursor()\n cursor.execute(f\"UPDATE post SET likes = likes - 1 WHERE id = '{self.id}'\") # Decrements the likes\n cursor.execute(f\"DELETE FROM user_like WHERE user_id = {user.id} AND post_id = {self.id}\")\n if self.commit_to_db:\n data_base.commit()\n cursor.close()", "def like_comment(request, pk, pk1):\r\n \r\n comment = get_object_or_404(Comment, id=request.POST.get('id'))\r\n liked = False\r\n\r\n if comment.likes.filter(id=request.user.id).exists():\r\n comment.likes.remove(request.user)\r\n liked = False\r\n else:\r\n comment.likes.add(request.user)\r\n liked = True\r\n\r\n context = {\r\n 'is_liked': liked,\r\n 'total_likes': comment.likes.count()\r\n }\r\n\r\n if request.method == 'POST':\r\n return HttpResponse(\r\n json.dumps(context),\r\n content_type=\"application/json\")", "def executeLikesOnPhotos(quantity):\n global api\n x = 0 \n time.sleep(2)\n element = api.s.driver.find_element_by_xpath(\"//*[contains(@href, '/?tagged=rugby')]\")\n element.click()\n while x != int(quantity):\n #elements = context.webdriver.find_elements_by_xpath(\"//*[@class='_mck9w _gvoze _tn0ps']\")\n time.sleep(2)\n try:\n api.s.driver.find_element_by_css_selector(\".coreSpriteHeartOpen\").click()\n api.logger.info(\"Successfully liked the photo\")\n followUser()\n #Enter follow function here\n except Exception:\n api.logger.error(\"No action has been done. Perhaps the photo has been liked before? Continue...\")\n ActionChains(api.s.driver).send_keys(Keys.RIGHT).perform()\n continue\n x+=1\n ActionChains(api.s.driver).send_keys(Keys.RIGHT).perform()\n time.sleep(2)", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def like(request, pk):\n try:\n like = Likes()\n like.user = request.user\n like.post = Post.objects.filter(pk=pk).first()\n like.save()\n except Exception:\n like = Likes.objects.filter(user=request.user.pk, post=pk).first()\n like.delete()\n return HttpResponseRedirect(reverse(\"mainapp:post\", kwargs={\"pk\":pk}))", "def addLikeCount(self,count):\n self.interactionCount += count\n return None", "def users_likes(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n # likes = Message.query.filter(Message.user_id.notin_(users_blocking)).all()\n user = User.query.get_or_404(user_id)\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/likes.html', user=user, likes=likes)", "def get_user_likes(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT user_id FROM user_like WHERE post_id = {self.id}\")\n user_likes = tuple(map(lambda x: str(x['user_id']), cursor.fetchall()))\n if not user_likes:\n return []\n cursor.execute(f\"SELECT username FROM user WHERE id IN ({', '.join(user_likes)})\")\n users = cursor.fetchall()\n cursor.close()\n return list(map(lambda x: x['username'], users))", "def get(self, post_id):\n Post.add_like(int(post_id), self.user.get_id())\n self.redirect('/blog')", "def count_karma(user):\r\n\r\n karma = 0\r\n posts = Post.objects.filter(author=user).all()\r\n for post in posts:\r\n karma += (int(post.likes.count()) - int(post.dislikes.count()))\r\n \r\n return karma", "def like_comment(username):\n\n user_ID = before_request()\n user_ID = None\n if user_ID != None:\n user_ID = str(g.user['_id'])\n\n message_id = request.args.get('message_id')\n profile_usr = request.args.get('profile_usr')\n count = float(1)\n\n redis_obj.zincrby('add_like', message_id, count)\n\n print \"message ID\", message_id\n\n if redis_obj.get(user_ID):\n return redirect(url_for('timeline', username=profile_usr, userId=pickle.loads(redis_obj.get(user_ID))))\n else:\n \"\"\" Invalidating cache \"\"\"\n redis_obj.delete('leaderboard-key')\n redis_obj.delete(session['user_id'])\n print \"Invalidating cache after Like\"\n return redirect(url_for('timeline', username=profile_usr))", "def like_following(self):\n self.logger.log(\"starting like_following...\")\n count_following = self.account.follows_count\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n acc = perform_with_ran_delay(self.instagram.get_account_by_id, acc)\n self.logger.log(\" {} > {} posts\".format(acc.username, acc.media_count))\n if acc.media_count > 0:\n\n posts = perform_with_ran_delay(self.instagram.get_medias, acc.username, 50)\n if posts:\n for m in posts:\n try:\n perform_with_ran_delay(self.instagram.like, m.identifier)\n self.logger.log(\"liking 1 post from \"+acc.username)\n random_delay()\n except Exception as e:\n self.logger.log(\"skipping 1 post from \"+acc.username)\n self.logger.log(e)\n random_delay()\n continue", "def get_user_has_liked(self, instance):\n request = self.context.get(\"request\")\n return instance.likes.filter(pk=request.user.pk).exists()", "def get_user_has_liked(self, instance):\n request = self.context.get(\"request\")\n return instance.likes.filter(pk=request.user.pk).exists()", "def put_like(self, object_id):\n return self.put_object(object_id, \"likes\")", "def put_like(self, object_id):\n return self.put_object(object_id, \"likes\")", "def posts_count(self):\n return Post.objects.filter(user__username = self.user.username).count()", "def create_pagination(request, user=None):\n if user:\n posts = Post.objects.filter(author=user).order_by(\"-timestamp\")\n else:\n posts = Post.objects.all().order_by(\"-timestamp\")\n paginator = Paginator(posts, 5)\n current_page = request.GET.get(\"page\", 1)\n page_obj = paginator.get_page(current_page)\n context = {\"page_obj\": page_obj}\n if request.user.is_authenticated:\n liked_posts = request.user.liked_posts.all()\n context.update({\"liked_posts\": liked_posts})\n return context", "def get_likes(self, obj):\n return QuestionPersonLike.objects.filter(question=obj,\n like=True).count()", "def liked_by(self, user):\n return Likes.objects.filter(recipe=self, chef=user).exists()", "def profile_likes_count_decrease(target_user: User):\n try:\n with transaction.atomic():\n dillo.models.profiles.Profile.objects.filter(user=target_user).update(\n likes_count=F('likes_count') - 1\n )\n except IntegrityError:\n log.warning('Integrity error when incrementing likes count for user %i' % target_user.id)\n target_user.profile.recalculate_likes()", "def get_likes(self):\n source, edge = self.id, \"likes\"\n return User.graph().get_connections(source, edge, limit=100000)[\"data\"]", "def get_num_likes_daily(self, date, like):\n cursor = self.get_cursor()\n end_date = date + relativedelta(days=1)\n query = 'SELECT count(*) AS num ' \\\n 'FROM vote ' \\\n 'WHERE voted_on > %s AND voted_on < %s AND upvote = %s'\n cursor.execute(query, (date, end_date, like))\n count = cursor.fetchall()\n return count[0]['num']", "def add_unsaved_likes_to_user(sender, user, request, **kwargs):\n session_likes = request.session.get('likes')\n if session_likes:\n user.userprofile.liked_products.add(*session_likes)", "def meal_liked(request, meal_pk):\n meal_pk = int(meal_pk)\n meal = Meal.objects.get(pk=meal_pk)\n like = True\n member = request.user\n\n try:\n rating = Rating.objects.get(member=member, meal=meal)\n r_percent = meal.percent()\n except ObjectDoesNotExist:\n Rating.objects.create_rating(member, meal, like)\n r_percent = meal.percent()\n return HttpResponse(r_percent)\n\n rating.like = like\n rating.save()\n r_percent = meal.percent()\n return HttpResponse(r_percent)", "def likes(self):\n return self.get_queryset().filter(vote__gt=0)", "def test_timeline_by_likes_paginated_same_likes_span(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n\n # Store the ids of all created activities. First is the oldest\n activities = []\n self.create_context(create_context)\n\n # Create 10 users, subscribe to a context and write a post for each one\n for i in range(1, 11):\n username = 'user{}'.format(i)\n self.create_user(username)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activities.append(res.json['id'])\n\n # Like activities so activities with 3 likes spans trough pages 1,2 and 3\n\n self.like_activity('user1', activities[0])\n self.like_activity('user2', activities[0])\n self.like_activity('user3', activities[0])\n self.like_activity('user4', activities[0])\n self.like_activity('user5', activities[0])\n\n self.like_activity('user1', activities[1])\n self.like_activity('user2', activities[1])\n self.like_activity('user3', activities[1])\n self.like_activity('user4', activities[1])\n\n time.sleep(1)\n self.like_activity('user1', activities[2])\n self.like_activity('user2', activities[2])\n self.like_activity('user3', activities[2])\n\n time.sleep(1)\n self.like_activity('user1', activities[3])\n self.like_activity('user2', activities[3])\n self.like_activity('user3', activities[3])\n\n time.sleep(1)\n self.like_activity('user1', activities[4])\n self.like_activity('user2', activities[4])\n self.like_activity('user3', activities[4])\n\n time.sleep(1)\n self.like_activity('user1', activities[5])\n self.like_activity('user2', activities[5])\n self.like_activity('user3', activities[5])\n\n time.sleep(1)\n self.like_activity('user1', activities[6])\n self.like_activity('user2', activities[6])\n self.like_activity('user3', activities[6])\n\n self.like_activity('user1', activities[7])\n self.like_activity('user2', activities[7])\n\n self.like_activity('user1', activities[8])\n\n firstpage = self.testapp.get('/people/%s/timeline?sort=likes&limit=3' % \"user1\", \"\", oauth2Header(\"user1\"), status=200)\n\n self.assertEqual(len(firstpage.json), 3)\n self.assertEqual(firstpage.json[0]['likesCount'], 5)\n self.assertEqual(firstpage.json[0]['id'], activities[0])\n self.assertEqual(firstpage.json[1]['likesCount'], 4)\n self.assertEqual(firstpage.json[1]['id'], activities[1])\n self.assertEqual(firstpage.json[2]['likesCount'], 3)\n self.assertEqual(firstpage.json[2]['id'], activities[6])\n\n secondpage = self.testapp.get('/people/%s/timeline?sort=likes&limit=3&before=%s' % (\"user1\", activities[6]), \"\", oauth2Header(\"user1\"), status=200)\n\n self.assertEqual(len(secondpage.json), 3)\n self.assertEqual(secondpage.json[0]['likesCount'], 3)\n self.assertEqual(secondpage.json[0]['id'], activities[5])\n self.assertEqual(secondpage.json[1]['likesCount'], 3)\n self.assertEqual(secondpage.json[1]['id'], activities[4])\n self.assertEqual(secondpage.json[2]['likesCount'], 3)\n self.assertEqual(secondpage.json[2]['id'], activities[3])\n\n thirdpage = self.testapp.get('/people/%s/timeline?sort=likes&limit=3&before=%s' % (\"user1\", activities[3]), \"\", oauth2Header(\"user1\"), status=200)\n\n self.assertEqual(len(thirdpage.json), 3)\n self.assertEqual(thirdpage.json[0]['likesCount'], 3)\n self.assertEqual(thirdpage.json[0]['id'], activities[2])\n self.assertEqual(thirdpage.json[1]['likesCount'], 2)\n self.assertEqual(thirdpage.json[1]['id'], activities[7])\n self.assertEqual(thirdpage.json[2]['likesCount'], 1)\n self.assertEqual(thirdpage.json[2]['id'], activities[8])\n\n fourthpage = self.testapp.get('/people/%s/timeline?sort=likes&limit=3&before=%s' % (\"user1\", activities[8]), \"\", oauth2Header(\"user1\"), status=200)\n self.assertEqual(len(fourthpage.json), 1)\n self.assertEqual(fourthpage.json[0]['likesCount'], 0)\n self.assertEqual(fourthpage.json[0]['id'], activities[9])", "def is_liked(value, user: User):\n return value.is_liked(user)", "def show_likes(user_id):\n\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n # define user whose favorites are being viewed\n profuser = User.query.get_or_404(user_id)\n # define logged-in user for navbar details\n user = User.query.get(session[CURRENT_USER_KEY])\n if session[CURRENT_USER_KEY] == user_id:\n like_active = 'active'\n else:\n like_active = ''\n\n return render_template('likes.html', user=user, profuser=profuser, likes=profuser.likes, like_active=like_active)", "def get_likes_count():\n return Flag.objects.filter(flag=Flag.LIKE_FLAG).count()", "def save_model(self, request, obj, form, change):\n obj.post.likeNumIncrease()\n obj.save()", "def like_tweet(self, tag):\n self.bot.get('https://twitter.com/search?q=' + tag + '&src=typed')\n self.__wait(3, 3)\n for i in range(1, 3):\n self.bot.execute_script('window.scrollTo(0,document.body.scrollHeight)')\n self.__wait(2, 3)\n tweets = self.bot.find_elements_by_tag_name('article')\n\n links = []\n for tweet in tweets:\n sub_links = tweet.find_elements_by_tag_name('a')\n links += [sub_link.get_attribute('href')\n for sub_link in sub_links if 'status' in sub_link.get_attribute('href')]\n\n print('Started to like {} tweets'.format(len(links)))\n\n for link in links:\n self.bot.get(link)\n self.__wait(3, 5)\n likes = self.bot.find_elements_by_css_selector('div[data-testid=\"like\"')\n for like in likes:\n like.click()\n self.__wait(3, 5)", "def LikeArticle(request):\n try:\n if not request.user.is_authenticated():\n return HttpResponse(json.dumps({\n \"result\": \"1\"\n }))\n\n like = request.GET.get('like', '')\n article_id = request.GET.get('article_id', '')\n\n if str(like).lower() != 'true':\n like = 'false'\n else:\n like = 'true'\n \n record = UserLikedArticles.objects.filter(user=request.user.id, article=article_id)\n\n if like == 'false':\n if record != None and len(record) > 0:\n record[0].delete()\n else:\n if record == None or len(record) == 0:\n like = UserLikedArticles()\n like.user = request.user.id\n like.article=Article.objects.filter(id=article_id)[0]\n like.save()\n \n article = Article.objects.filter(id=article_id)[0]\n article.likes = len(UserLikedArticles.objects.filter(article=article_id))\n article.save()\n \n return HttpResponse(json.dumps({\n \"result\": 0\n }))\n except Exception, e:\n return HttpResponse(json.dumps({\n \"result\": e\n }))", "def add_like(id):\r\n username = request.json['username']\r\n duplicate_likes_query = models.Like.query.filter(models.Like.username == username, models.Like.post_id == id)\r\n # if like from this user to this tweet already exist\r\n if duplicate_likes_query.count() > 0:\r\n return {'like_id': duplicate_likes_query.first().like_id}\r\n\r\n # if original tweet does not exist -> 404\r\n models.Tweet.query.get_or_404(id)\r\n\r\n like = models.Like(post_id=id, username=username, timestamp=datetime.datetime.now())\r\n db.session.add(like)\r\n db.session.commit()\r\n\r\n return {'like_id': like.like_id}", "async def love(ctx, user: discord.Member):\r\n author = ctx.message.author\r\n if user.id == ctx.bot.user.id:\r\n await ctx.send(\"I am not capable of loving like you can. I'm sorry.\" )\r\n else:\r\n await ctx.send(author.mention + \" is capable of loving \" + user.mention + \" a whopping \" +\r\n str(randint(0, 100)) + \"%!\")\r\n ctx.counter(n)", "def likes(self):\r\n return Likes(self)", "def likes(self, media_id):\n\n url = \"https://api.instagram.com/v1/media/{0}/likes?access_token={1}\".format(media_id, self.access_token)\n request = requests.get(url)\n\n return request.json()", "def ListLikes(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_likes_message(self, mid):\n cursor = self.get_cursor()\n query = 'SELECT count(*) FROM vote WHERE mid = %s AND upvote = TRUE'\n cursor.execute(query, (mid,))\n return cursor.fetchall()", "def vote(self, upvote, id, remote_addr):\n\n rv = self.db.fetchone(\n 'SELECT likes, dislikes, voters FROM comments WHERE id=%s', (id, ))\n\n if rv is None:\n return None\n\n likes, dislikes, votersPickle = rv\n voters = pickle.loads(votersPickle)\n if likes + dislikes >= 142:\n return {'likes': likes, 'dislikes': dislikes}\n\n bf = Bloomfilter(voters.array, likes + dislikes)\n if remote_addr in bf:\n return {'likes': likes, 'dislikes': dislikes}\n\n bf.add(remote_addr)\n self.db.commit([\n 'UPDATE comments SET',\n ' likes = likes + 1,' if upvote else 'dislikes = dislikes + 1,',\n ' voters = %s'\n 'WHERE id=%s;'], (pickle.dumps(bf.array), id))\n\n if upvote:\n return {'likes': likes + 1, 'dislikes': dislikes}\n return {'likes': likes, 'dislikes': dislikes + 1}", "def toggle_like(post_id):\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n liked_post = Post.query.get_or_404(post_id)\n user = User.query.get(session[CURRENT_USER_KEY])\n # toggle the like by removing from user likes\n if liked_post in user.likes:\n user.likes.remove(liked_post)\n else:\n user.likes.append(liked_post)\n\n db.session.commit()\n\n return redirect(url_for('show_likes', user_id=user.id))", "def get_followers_likes(self, followers_likes):\n user_followers = []\n\n followers = self.get_new_followers()\n if len(followers) < followers_likes:\n user_followers = self.get_followers()\n self.self_followers = deepcopy(user_followers)\n user_followers = [i['pk'] for i in user_followers.get('users', []) if i['pk'] not in followers]\n\n if user_followers:\n if len(user_followers) > followers_likes - len(followers):\n followers.extend([random.choice(user_followers) for _ in range(followers_likes - len(followers))])\n else:\n followers.extend(user_followers)\n else:\n followers = [random.choice(followers) for _ in range(followers_likes)]\n\n followers_media_ids = [self.random_user_media(i) for i in followers]\n\n if len(followers_media_ids) < followers_likes and user_followers:\n while len(followers_media_ids) < followers_likes:\n u = random.choice(user_followers)\n rm = self.random_user_media(u)\n if rm and rm not in followers_media_ids:\n followers_media_ids.append(rm)\n\n return followers_media_ids", "def is_liked_by(self, user):\n return user.liked_articles.filter(pk=self.pk).exists()", "def unlike(self, request, pk=None):\n\n user_wall_post = self.get_object()\n user_wall_post.likes.remove(self.request.user)\n return Response(status=201)", "def is_liked(obj, user) ->bool:\n\tif not user.is_authenticated:\n\t\treturn False\n\tobj_type = ContentType.objects.get_for_model(obj):\n\tlikes = Like.objects.filter(\n\t\tcontent_type = obj_type, object_id=obj.id, user=user)\n\treturn likes.exists()\n\n\tdef get_all_likes(obj):\n\t\t\"\"\"\n\t\t\tGets all users, who liked object\n\t\t\"\"\"\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)", "def dislike(request, pk):\n update_posts_expiration()\n post = 0;\n post = Post.objects.get(id=pk)\n if post.is_expired == True:\n return Response(\"You can't interact with a expired post\")\n else:\n if request.method == \"POST\":\n if request.user != post.user:\n post.dislikes_count += 1\n post.save()\n serializer = DislikeSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=request.user, disliked_post=post)\n return Response(\"you disliked the post with title: \" + post.title)\n else:\n return Response(serializer.errors)\n else:\n return Response(\"you can't dislike your own post\")\n \n\n return Response(\"you didn't dislike the post yet\")", "def recommend_for_user(self, R, user, n=10, filter_previously_seen=False,\n return_scores=True, **kwargs):", "def __user_interested_post_filter(self, avg_user_liked_post_weight):\n query = Post.objects.extra(select={'is_top': \"weight = \" + str(avg_user_liked_post_weight)})\n resultant_obj = query.extra(order_by = ['-is_top'])\n return resultant_obj", "def get_num_like_photos_daily(self, pid, date, like):\n cursor = self.get_cursor()\n end_date = date + relativedelta(days=1)\n query = 'SELECT count(*) AS num FROM vote WHERE vote.mid = %s ' \\\n 'AND vote.upvote = %s AND voted_on > %s AND voted_on < %s'\n cursor.execute(query, (pid, like, date, end_date,))\n count = cursor.fetchall()\n return count[0]['num']", "def likes():\n click.echo(chalk.blue(\"For whom you want to view likes for\"))\n friend_name = input().strip().lower()\n FRIENDS_FILE_PATH = get_friends_file_path(friend_name)\n\n if os.path.isfile(FRIENDS_FILE_PATH):\n with open(FRIENDS_FILE_PATH) as fin:\n contents = yaml.load(fin)\n entries = contents[\"entries\"]\n likes = []\n for entry in entries:\n if \"likes\" in entry:\n likes.extend(entry[\"likes\"])\n click.echo(\"Likes:\")\n for i, n in enumerate(likes):\n click.echo(str(i) + \": \" + n)\n else:\n click.echo(\n chalk.red(\n 'The Likes file path for this module does not exist. Please type \"yoda people like\" to create a new one'\n )\n )", "def postTweet(self, userId: int, tweetId: int) -> None:\n self.cnt += 1\n self.posts[userId].append((self.cnt, tweetId))", "def like_comment(request):\n\n # get data\n in_data = getRequestData(request)\n\n # increment comment score\n try:\n comment = Comment.objects.get(id=in_data.get('mycommentid'))\n comment.score += 1\n comment.save()\n except:\n return HttpResponseBadRequest('Error saving to database!')\n\n return JsonResponse(in_data)", "def post(self, request, *args, **kwargs):\n\n user_wall_post_comment = self.get_object()\n user_wall_post_comment.likes.remove(self.request.user)\n return Response(status=201)", "def upvote(self, request, pk=None):\n post = self.get_object()\n post.upvotes += 1\n post.save()\n serializer = self.get_serializer(post)\n return Response(serializer.data, status.HTTP_200_OK)", "def dislike_post(request, pk):\r\n post = get_object_or_404(Post, id=request.POST.get('id'))\r\n disliked = False\r\n if post.dislikes.filter(id=request.user.id).exists():\r\n post.dislikes.remove(request.user)\r\n disliked = False\r\n else:\r\n if post.likes.filter(id=request.user.id).exists():\r\n post.likes.remove(request.user)\r\n post.dislikes.add(request.user)\r\n disliked = True\r\n\r\n if request.is_ajax():\r\n return JsonResponse({'total_dislikes': post.dislikes.count(),\r\n 'total_likes': post.likes.count(),\r\n 'disliked': disliked})", "def test_likes_sorting_2(self):\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n\n page_size = 3\n\n # Store the ids of all created activities. First is the oldest\n activities = []\n self.create_context(create_context)\n\n # Create 6 users, post an activity with each one and self-like it\n for i in range(1, 7):\n username = 'user{}'.format(i)\n self.create_user(username)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n res = self.create_activity(username, user_status_context)\n activities.append(res.json['id'])\n\n self.like_activity(username, activities[0])\n self.like_activity(username, activities[3])\n self.like_activity(username, activities[1])\n self.like_activity(username, activities[5])\n self.like_activity(username, activities[4])\n self.like_activity(username, activities[2])\n\n firstpage = self.testapp.get('/people/%s/timeline?limit=%d&sort=likes' % (\"user1\", page_size), \"\", oauth2Header(\"user1\"), status=200)\n\n self.assertEqual(len(firstpage.json), 3)\n self.assertEqual(firstpage.json[0]['likesCount'], 1)\n self.assertEqual(firstpage.json[0]['id'], activities[2])\n self.assertEqual(firstpage.json[1]['likesCount'], 1)\n self.assertEqual(firstpage.json[1]['id'], activities[4])\n self.assertEqual(firstpage.json[2]['likesCount'], 1)\n self.assertEqual(firstpage.json[2]['id'], activities[5])\n\n secondpage = self.testapp.get('/people/%s/timeline?sort=likes&limit=%d&before=%s' % (\"user1\", page_size, activities[5]), \"\", oauth2Header(\"user1\"), status=200)\n\n self.assertEqual(len(secondpage.json), 3)\n\n self.assertEqual(secondpage.json[0]['likesCount'], 1)\n self.assertEqual(secondpage.json[0]['id'], activities[1])\n self.assertEqual(secondpage.json[1]['likesCount'], 1)\n self.assertEqual(secondpage.json[1]['id'], activities[3])\n self.assertEqual(secondpage.json[2]['likesCount'], 1)\n self.assertEqual(secondpage.json[2]['id'], activities[0])", "def like(id):\n spinner = Halo(text=\"Liking photo...\", spinner=\"dots\").start()\n try:\n api.like(id)\n spinner.succeed(\"Liked\")\n except Exception:\n spinner.fail(\"Failed to like photo.\")", "def test_like(self):\n\n message = Message(text=\"hello world\", user_id=self.testuser2.id)\n \n db.session.add(message)\n db.session.commit()\n \n message_id = message.id\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.post(f\"/messages/{message_id}/like\", follow_redirects=True)\n \n self.assertEqual(response.status_code, 200)\n\n likes = Likes.query.filter(Likes.message_id==message_id).all()\n self.assertEqual(len(likes), 1)\n self.assertEqual(likes[0].user_id, self.testuser.id)", "def up_vote(cls, user, message):\r\n pass", "def test_unauthenticated_user_liking(self):\n self.like_dislike(self.dislike_url(5))", "def _find_avg_likes(shared_data):\r\n\r\n td = datetime.timedelta(days=1)\r\n today = datetime.datetime.today()\r\n\r\n # retrieve most recent media by user \r\n try:\r\n \tmedia_nodes = shared_data['entry_data']['ProfilePage'][0]['user']['media']['nodes']\r\n except Exception as e:\r\n \tlogging.error(e)\r\n \tlogging.error('Uh oh...something went wrong when trying to parse through the user profile. Continuing...')\r\n\r\n \treturn\r\n\r\n # Check each media, and add them into a list if they are over 1 day old\r\n media_num = 0\r\n like_num = []\r\n for node in media_nodes:\r\n \t\r\n \ttimestamp = int(node['date'])\r\n \tcreated_time = datetime.datetime.fromtimestamp(int(timestamp))\r\n\r\n \tif today - created_time >= td:\r\n \t\tlike_num.append(int(node['likes']['count']))\r\n\r\n \tmedia_num += 1\r\n\r\n # find the average like\r\n if len(like_num) == 0:\r\n avg_like = 0\r\n elif len(like_num) >= 10:\r\n like_num.sort()\r\n l = like_num[1:-1]\r\n avg_like = sum(l) / len(l)\r\n else:\r\n avg_like = sum(like_num) / len(like_num)\r\n\r\n return avg_like", "def get_user_liked(user, status):\n return models.Favorite.objects.filter(user=user, status=status).exists()", "def increase_count(self, number=1):\n self.count += number", "def like_comment_API(id):\n\n count = float(1)\n redis_obj.zincrby('add_like', id, count)\n\n \"\"\" Invalidating cache \"\"\"\n redis_obj.delete('leaderboard-key')\n\n return jsonify(username=g.user[1], Message=\"Success! Message liked successfully. \", Status_code=status.HTTP_200_OK)", "def like_view(request, pk):\n cocktail = get_object_or_404(Cocktail, id=pk)\n liked = False\n if cocktail.likes.filter(id=request.user.id).exists():\n cocktail.likes.remove(request.user)\n liked = False\n else:\n cocktail.likes.add(request.user)\n liked = True\n\n return HttpResponseRedirect(reverse('cocktails:cocktail-details', args=[str(pk)]))", "def post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.views += 1\n post.save()\n return render(request, \"postdetail.html\", {\"post\": post})", "def like():\n if CURR_USER_KEY in session:\n cafe_id = int(request.json[\"cafe_id\"])\n print(type(cafe_id))\n user_id = g.user.id\n print(type(user_id))\n like = Like(\n cafe_id=cafe_id,\n user_id=user_id\n )\n print(like)\n db.session.add(like)\n print(\"====================================\")\n print(like)\n db.session.commit()\n return jsonify({\"liked\": cafe_id})\n return jsonify({\"error\": \"Not logged in\"})", "def up_vote(cls, user, message):\n pass" ]
[ "0.7468292", "0.7368493", "0.7155223", "0.70347476", "0.6748949", "0.67359304", "0.6665075", "0.6645786", "0.6584126", "0.65746444", "0.6570663", "0.64926827", "0.64644575", "0.64383584", "0.63599944", "0.6310028", "0.6255511", "0.62522185", "0.6192657", "0.6181631", "0.6179297", "0.61228096", "0.6122225", "0.6122225", "0.6122225", "0.61219585", "0.60776657", "0.6046964", "0.604594", "0.60105324", "0.59765184", "0.5970693", "0.5953125", "0.59501356", "0.5896209", "0.58751154", "0.58684444", "0.58556867", "0.58526456", "0.584434", "0.5799406", "0.576595", "0.5764498", "0.5759122", "0.5755701", "0.5755701", "0.5750669", "0.5750669", "0.5750052", "0.5725957", "0.57255167", "0.5711292", "0.5709991", "0.5701076", "0.5697607", "0.566967", "0.56516653", "0.56155777", "0.56149393", "0.56021", "0.55870605", "0.55728364", "0.55680627", "0.5560885", "0.55605376", "0.55553615", "0.55520356", "0.55483705", "0.55378413", "0.55200726", "0.5508181", "0.55064446", "0.55042577", "0.55039936", "0.5485238", "0.54759026", "0.54713327", "0.54691774", "0.5468719", "0.5446975", "0.54285246", "0.5391477", "0.5377984", "0.53546786", "0.53381914", "0.53360885", "0.53266674", "0.5323384", "0.5321014", "0.53155255", "0.53080076", "0.530659", "0.52762914", "0.52578974", "0.52517235", "0.5251419", "0.5250462", "0.523981", "0.52376676", "0.52296937" ]
0.67711866
4
Method gets a list of users who like a post
def get_likes_list(self, username): api = self.api api.searchUsername(username) result = api.LastJson username_id = result['user']['pk'] #Gets the user ID user_posts = api.getUserFeed(username_id) # gets the user feed result = api.LastJson media_id = result['items'][0]['id'] #gets the most recent post api.getMediaLikers(media_id) #gets users who liked users = api.LastJson('users') for user in users: #appends the users to the list users_list.append({'pk':user['pk'], 'username':user['username']})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_likes(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT user_id FROM user_like WHERE post_id = {self.id}\")\n user_likes = tuple(map(lambda x: str(x['user_id']), cursor.fetchall()))\n if not user_likes:\n return []\n cursor.execute(f\"SELECT username FROM user WHERE id IN ({', '.join(user_likes)})\")\n users = cursor.fetchall()\n cursor.close()\n return list(map(lambda x: x['username'], users))", "def get_all_likes(obj):\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)", "def get_users_from_likes(self, type, owner_id, item_id):\n likes = self.vk.likes.getList(type=type, owner_id=owner_id, item_id=item_id, count=1)\n likes = self.vk.likes.getList(type=type, owner_id=owner_id, item_id=item_id, count=likes['count'])\n return likes['items']", "def like_user_posts(self, user:str, n_posts:int, like:bool=True):\n\n action = 'Like' if like else 'Unlike'\n\n self._nav_user(user)\n\n imgs = []\n elements = self._find_element(EC.presence_of_all_elements_located((By.CLASS_NAME, '_9AhH0')))\n imgs.extend(elements)\n\n for img in imgs[:n_posts]:\n img.click() \n time.sleep(1) \n try:\n self.driver.find_element_by_xpath(\"//*[@aria-label='{}']\".format(action)).click()\n except Exception as e:\n LOGGER.error(e)\n\n self.driver.find_elements_by_class_name('ckWGn')[0].click()", "def get_meals_user_liked(username):\n meals_user_liked = []\n user_liked = Rating.objects.filter(member__username=username, like=True)\n for ratting in user_liked:\n meals_user_liked.append(ratting.meal)\n return meals_user_liked", "def get_user_posts(self, request):\n post_objects = Post.objects.filter(liked_users__id=request.user.id)\n avg_user_liked_post_weight = self.__avg_user_interested_post_weight(post_objects)\n queryset = self.__user_interested_post_filter(avg_user_liked_post_weight)\n context = {'user':request.user}\n serializer = PostSerializer(queryset, many=True, context=context)\n return Response({'data': serializer.data}, status=status.HTTP_200_OK)", "def latest_likes(self, user, number_posts, likes):\n WAIT = 1\n if likes:\n action = 'Like'\n else:\n action = 'Unlike'\n self.nav_user(user)\n image_container = []\n image_container.extend(self.driver.find_elements_by_class_name('_9AhH0'))\n for image in image_container[:number_posts]:\n image.click()\n time.sleep(WAIT)\n try:\n self.driver.find_element_by_xpath(\"//*[@aria-label='{}']\".format(action).click())\n except Exception as e:\n print(e)\n self.driver.find_elements_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]/button')[0].click() # clicks the heart symbol\n time.sleep(WAIT)\n self.driver.find_elements_by_xpath('/html/body/div[4]/div[3]/button')[0].click() #Makes sure to close out of current picture\n time.sleep(WAIT)\n \n # Tested\n users_list = []\n def get_likes_list(self, username):\n \"\"\"\n Method gets a list of users who like a post\n\n \"\"\"\n api = self.api\n api.searchUsername(username) \n result = api.LastJson\n username_id = result['user']['pk'] #Gets the user ID\n user_posts = api.getUserFeed(username_id) # gets the user feed\n result = api.LastJson\n media_id = result['items'][0]['id'] #gets the most recent post\n api.getMediaLikers(media_id) #gets users who liked\n users = api.LastJson('users')\n for user in users: #appends the users to the list\n users_list.append({'pk':user['pk'], 'username':user['username']})", "def get_users_who_liked_object(*, obj: 'Model'):\n ct = ContentType.objects.get_for_model(obj)\n\n return (\n User.objects\n .filter(\n likes__content_type=ct,\n likes__object_id=obj.pk\n )\n )", "def get_followers_likes(self, followers_likes):\n user_followers = []\n\n followers = self.get_new_followers()\n if len(followers) < followers_likes:\n user_followers = self.get_followers()\n self.self_followers = deepcopy(user_followers)\n user_followers = [i['pk'] for i in user_followers.get('users', []) if i['pk'] not in followers]\n\n if user_followers:\n if len(user_followers) > followers_likes - len(followers):\n followers.extend([random.choice(user_followers) for _ in range(followers_likes - len(followers))])\n else:\n followers.extend(user_followers)\n else:\n followers = [random.choice(followers) for _ in range(followers_likes)]\n\n followers_media_ids = [self.random_user_media(i) for i in followers]\n\n if len(followers_media_ids) < followers_likes and user_followers:\n while len(followers_media_ids) < followers_likes:\n u = random.choice(user_followers)\n rm = self.random_user_media(u)\n if rm and rm not in followers_media_ids:\n followers_media_ids.append(rm)\n\n return followers_media_ids", "def is_liked(obj, user) ->bool:\n\tif not user.is_authenticated:\n\t\treturn False\n\tobj_type = ContentType.objects.get_for_model(obj):\n\tlikes = Like.objects.filter(\n\t\tcontent_type = obj_type, object_id=obj.id, user=user)\n\treturn likes.exists()\n\n\tdef get_all_likes(obj):\n\t\t\"\"\"\n\t\t\tGets all users, who liked object\n\t\t\"\"\"\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)", "def like(request, post_id):\n if request.method == \"PUT\":\n liked = None\n user = request.user\n post = Post.objects.get(id=post_id)\n # If user already liked, decrement the like count and remove as 'liker'\n if user in post.liked_by.all():\n post.liked_by.remove(user)\n post.likes -= 1\n post.save()\n liked = False\n # Else increase like count and add user\n else:\n post.liked_by.add(user)\n post.likes += 1\n post.save()\n liked = True\n # Return data for updating dynamically with javascript\n return JsonResponse({\"total_likes\": post.likes, \"liked\": liked})", "def get_likes(self):\n source, edge = self.id, \"likes\"\n return User.graph().get_connections(source, edge, limit=100000)[\"data\"]", "def show_likes(user_id):\n\n\n user = User.query.get_or_404(user_id)\n\n return render_template('users/likes.html', user=user)", "def get_user_liked(user, status):\n return models.Favorite.objects.filter(user=user, status=status).exists()", "def getLikeCommentInfos(self, user, listado_infos, filtrar_like):\n listado_infos_likes = []\n if filtrar_like:\n for elemento in listado_infos:\n try:\n LikeInfo.objects.get(info=elemento, usuario=user)\n info = {}\n info[\"info\"] = elemento\n info[\"like\"] = LikeInfo.objects.filter(info=elemento).count()\n info[\"comment\"]=Comments.objects.filter(page=elemento).count()\n if not user.is_authenticated():\n LikeInfo.objects.get(info=elemento, usuario=user)\n info[\"likeInfo\"] = True\n else:\n info[\"likeInfo\"] = False\n listado_infos_likes.append(info)\n except ObjectDoesNotExist, e:\n pass\n else:\n for elemento in listado_infos:\n info = {}\n info[\"info\"] = elemento\n info[\"like\"] = LikeInfo.objects.filter(info=elemento).count()\n info[\"comment\"] = Comments.objects.filter(page=elemento).count()\n try:\n if user.is_authenticated():\n LikeInfo.objects.get(info=elemento, usuario=user)\n info[\"likeInfo\"] = True\n else:\n info[\"likeInfo\"] = False\n except ObjectDoesNotExist, e:\n info[\"likeInfo\"] = False\n listado_infos_likes.append(info)\n return listado_infos_likes", "def likes(self):\n return self.get_queryset().filter(vote__gt=0)", "def liked_by(self, user):\n return Likes.objects.filter(recipe=self, chef=user).exists()", "def get_queryset(self):\n user: User = self.request.user\n following_users = user.profile.following.all()\n return Post.objects.filter(author__in=following_users).order_by('created')", "def get_user_has_liked(self, instance):\n request = self.context.get(\"request\")\n return instance.likes.filter(pk=request.user.pk).exists()", "def get_user_has_liked(self, instance):\n request = self.context.get(\"request\")\n return instance.likes.filter(pk=request.user.pk).exists()", "def like(self, data_base, user):\n cursor = data_base.cursor()\n cursor.execute(f\"UPDATE post SET likes = likes + 1 WHERE id = '{self.id}'\") # Increments the likes\n cursor.execute(f\"INSERT INTO user_like (user_id, post_id) VALUES ({user.id}, {self.id})\")\n if self.commit_to_db:\n data_base.commit()\n cursor.close()", "def post(self):\n liked = self.request.get('like')\n unliked = self.request.get('unlike')\n post_id = self.request.get('post_id')\n post = Posts.get_by_id(int(post_id))\n user = self.get_active_user()\n user_id = int(user.key().id())\n\n if liked:\n if user_id in post.liked_by:\n self.render_improper_endpoint_access(\"like\")\n else:\n if post.submitter_id != user_id:\n post.liked_by.append(user.key().id())\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)\n elif unliked:\n if user_id in post.liked_by:\n index = post.liked_by.index(user_id)\n del post.liked_by[index]\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(500)", "def like_following(self):\n self.logger.log(\"starting like_following...\")\n count_following = self.account.follows_count\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n acc = perform_with_ran_delay(self.instagram.get_account_by_id, acc)\n self.logger.log(\" {} > {} posts\".format(acc.username, acc.media_count))\n if acc.media_count > 0:\n\n posts = perform_with_ran_delay(self.instagram.get_medias, acc.username, 50)\n if posts:\n for m in posts:\n try:\n perform_with_ran_delay(self.instagram.like, m.identifier)\n self.logger.log(\"liking 1 post from \"+acc.username)\n random_delay()\n except Exception as e:\n self.logger.log(\"skipping 1 post from \"+acc.username)\n self.logger.log(e)\n random_delay()\n continue", "def get_likes(self, obj):\n return QuestionPersonLike.objects.filter(question=obj,\n like=True).count()", "def users_likes(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n # likes = Message.query.filter(Message.user_id.notin_(users_blocking)).all()\n user = User.query.get_or_404(user_id)\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/likes.html', user=user, likes=likes)", "def like(self, request, pk=None):\n\n user_wall_post = self.get_object()\n user_wall_post.likes.add(self.request.user)\n to_user = user_wall_post.owner\n from_user = request.user\n\n UserNotification.create_post_friend_liked_notification(from_user, to_user, 'Right', id=pk)\n return Response(status=201)", "def fans(self, request, pk=None):\n\n obj = self.get_object()\n users_list = like_func.get_liked_users(obj)\n serializer = UserSerializer(users_list, context={'request': request}, many=True)\n return Response(serializer.data)", "def get_meals_user_disliked(username):\n meals_user_disliked = []\n user_disliked = Rating.objects.filter(member__username=username, like=False)\n for ratting in user_disliked:\n meals_user_disliked.append(ratting.meal)\n return meals_user_disliked", "def prepare_process_like_and_follow(self):\n follow = []\n media = []\n unfollow = []\n\n coef = self.users_to_follow / self.limits_per_hour.get('follow', 1)\n media_to_like = round(coef*self.limits_per_hour.get('like'))\n num_to_unfollow = round(coef*self.limits_per_hour.get('unfollow'))\n\n feed_likes = media_to_like // 2\n feed_likes_list = []\n following_likes = round((media_to_like // 2) * 3 / 4)\n following_likes_list = []\n followers_likes = media_to_like - feed_likes - following_likes\n\n monitored_ids = [i[\"user\"] for i in self.monitored_users]\n\n for posts in self.hashtag_feed_list(self.search_hashtags):\n if len(follow) < self.users_to_follow:\n for m in posts:\n if self.check_if_suit(m):\n user_id, username = self.get_user_from_post(m)\n if user_id and user_id not in [i[\"user\"] for i in follow] \\\n and user_id not in monitored_ids:\n follow.append({'user': user_id, 'username': username})\n following_likes_list.append(m)\n\n if len(follow) >= self.users_to_follow:\n break\n\n for p in following_likes_list:\n if p in posts:\n posts.remove(p)\n\n if feed_likes > 0:\n if len(posts) > feed_likes:\n feed_likes_list.extend([i['id'] for i in (random.choice(posts) for _ in range(feed_likes))\n if i['id'] not in feed_likes_list])\n else:\n feed_likes_list.extend([i['id'] for i in posts[:feed_likes] if i['id'] not in feed_likes_list])\n feed_likes -= len(feed_likes_list)\n if feed_likes <= 0:\n if len(follow) >= self.users_to_follow:\n break\n if len(follow) >= self.users_to_follow and feed_likes <= 0:\n break\n\n media.extend(feed_likes_list)\n\n if len(following_likes_list) < following_likes:\n followings = []\n get_n_followings = following_likes - len(following_likes_list)\n if following_likes_list:\n following_likes_list = [self.get_media_id_from_post(i) for i in following_likes_list]\n following_likes_list.extend([i for i in self.get_following_likes(followings, get_n_followings)\n if i and i not in media])\n media.extend(following_likes_list)\n else:\n media.extend([self.get_media_id_from_post(i) for i in following_likes_list[:following_likes]])\n\n media.extend([i for i in self.get_followers_likes(followers_likes) if i and i not in media])\n\n unfollow = self.get_to_unfollow(unfollow, num_to_unfollow)\n\n return follow, media, unfollow", "def like(request, pk):\n update_posts_expiration()\n post = 0;\n post = Post.objects.get(id=pk)\n #if the post is expired you user can't like it\n if post.is_expired == True:\n return Response(\"You can't interact with a expired post\")\n else:\n #if the post is not expired then increment the likes count by 1 and save a serializer of like to the database with user and post information\n if request.method == \"POST\":\n if request.user != post.user:\n post.likes_count += 1\n post.save()\n serializer = LikeSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=request.user, liked_post=post)\n return Response(\"you liked the post with title: \" + post.title)\n else:\n return Response(serializer.errors)\n else:\n return Response(\"you can't like your own post\")\n \n\n return Response(\"you didn't like the post yet\")", "def order_posts_to_likes(sender, instance, created=False, **kwargs):\n user = instance.user\n post = instance.post\n tags = post.tags.all()\n\n likes = Like.objects.filter(user=user, post=post)\n liked_posts = [ i.post for i in likes if i.liked_status==True]\n\n for post in liked_posts:\n liked_tags = post.tags.all()\n for tag in liked_tags:\n if not tag in tags:\n tags.append(tag)\n\n similar_posts = Post.objects.filter(tags__in=tags).distinct()\n try:\n UserPostWeight.objects.filter(user=user).delete()\n except:\n pass\n weight = 2\n for post in similar_posts:\n UserPostWeight.objects.create(user=user, post=post, weight=weight)\n weight+=2\n \n\n\n pass", "def user_response_to_post(self, request, pk):\n post_objects_count = Post.objects.filter(id=pk, liked_users__id=request.user.id).count()\n post_objects = Post.objects.get(id=pk)\n if post_objects_count !=0:\n post_objects.liked_users.remove(request.user)\n response_msg = \"You disliked the post\"\n else:\n post_objects.liked_users.add(request.user)\n response_msg = \"You have liked the post\"\n return Response({'data': response_msg}, status=status.HTTP_200_OK)", "def get_user_likes(self) -> int:\n return -1", "def getLikedOkCupidUsers(self):\n\t\tself.logger.info(\"Get all liked OkCupid users\")\n\t\tusers = self.session.query(Models.Okcupid).filter(Models.Okcupid.liked==True).all()\n\t\treturn users", "def like_cafes():\n if CURR_USER_KEY in session:\n cafe_id = request.args.get(\"cafe_id\")\n user = g.user\n print(user.id)\n print(user.likes)\n for like in user.likes:\n if like.cafe_id == int(cafe_id or 0):\n print(\"get True\")\n return jsonify({\"likes\": True})\n\n return jsonify({\"likes\": False})\n\n return jsonify({\"error\": \"Not logged in\"})", "def get_queryset(self, *args, **kwargs):\n following_username = self.kwargs.get(self.look_url_kwarg)\n following_users = FollowUser.objects.filter(\n following_username=following_username)\n\n return following_users", "def likes(self):\r\n return Likes(self)", "def ListLikes(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def post_liker(a, args):\n if args.likes.filter(user=a).exists():\n exists = True\n else:\n exists = False\n return exists", "def getPosts():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE id = ?\", [user_id])\n name = cur.fetchall()[0][0]\n cur.execute(\"SELECT * FROM posts WHERE name IN (SELECT following FROM followers WHERE user = ?) OR name = ?\", (name, name))\n posts = cur.fetchall()\n return posts", "def send_like(request):\n if request.method == \"POST\":\n if \"token\" in request.data and request.data[\"token\"] != \"\" and request.data[\"token\"] is not None:\n if Token.objects.filter(key=request.data[\"token\"]).exists():\n token = get_object_or_404(Token, key=request.data[\"token\"])\n if Post.objects.filter(pk=request.data[\"post_id\"]).exists():\n post = Post.objects.get(pk=request.data[\"post_id\"])\n if Like.objects.filter(post=post, user_id=token.user_id).exists():\n return Response({\"error\": 31})\n else:\n post.count_likes += 1\n post.save()\n like = Like.objects.create(post=post, user_id=token.user_id)\n serializer = PostSerializer(post, context={'user_id': token.user_id})\n UserFeed.objects.create(user=post.author,\n action_user=token.user,\n like=like,\n action=\"Like\")\n if post.author != token.user:\n message = \"{} likes your post\".format(token.user.username)\n custom = {\n \"post_id\": post.id,\n \"avatar\": UserProfile.objects.get(user=token.user).avatar.url\n }\n\n user_notification = UserNotification.objects.get(user=post.author)\n send_notification(custom, message, user_notification)\n\n return Response({\"success\": 30,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 32})\n else:\n return Response({\"error\": 17})", "def like_post(request, pk):\r\n post = get_object_or_404(Post, id=request.POST.get('id'))\r\n liked = False\r\n\r\n # If the user already liked the post\r\n if post.likes.filter(id=request.user.id).exists():\r\n # unlike\r\n post.likes.remove(request.user)\r\n liked = False\r\n Likes.objects.filter(user=request.user, post=post).delete()\r\n else:\r\n if post.dislikes.filter(id=request.user.id).exists():\r\n post.dislikes.remove(request.user)\r\n post.likes.add(request.user)\r\n liked = True\r\n like = Likes.objects.create(user=request.user, post=post)\r\n\r\n context = {\r\n 'post': post,\r\n 'is_liked': liked,\r\n }\r\n\r\n if request.is_ajax():\r\n html = render_to_string('blog/like_section.html', context, request=request)\r\n return JsonResponse({'form': html, \r\n 'total_likes': post.likes.count(),\r\n 'total_dislikes': post.dislikes.count(),\r\n 'liked': liked})", "def post_likes(request):\n if request.method == 'POST':\n if not request.user.is_authenticated:\n raise PermissionDenied\n pk = request.POST.get('post_id')\n try:\n post = PostsModel.objects.get(id=pk)\n except PostsModel.DoesNotExist:\n post = None\n try:\n likes = LikesModel.objects.get(user=request.user, post=post)\n except LikesModel.DoesNotExist:\n likes = None\n\n\n if likes == None:\n likes = LikesModel.objects.create(user=request.user, post=post)\n post.likes_count = F('likes_count') + 1\n post.save(update_fields=[\"likes_count\"])\n class_likes = 'fa fa-heart mx-2'\n else:\n if likes.is_liked == True:\n likes.is_liked = False\n likes.save()\n post.likes_count = F('likes_count') - 1\n post.save(update_fields=[\"likes_count\"])\n class_likes = 'fa fa-heart-o mx-2'\n elif likes.is_liked == False:\n likes.is_liked = True\n likes.save()\n post.likes_count = F('likes_count') + 1\n post.save(update_fields=[\"likes_count\"])\n class_likes = 'fa fa-heart mx-2'\n post.refresh_from_db()\n response_data = {'_code' : 0, '_status' : 'ok', '_likes': post.likes_count, '_class_likes': class_likes}\n else:\n response_data = {'_code' : 1, '_status' : 'no' }\n\n return JsonResponse(response_data)", "def get_posts_of_followings_of_a_user(tx: Transaction, email: str) -> BoltStatementResult:\n query = f\"\"\"\n MATCH (:Person {{email: '{email}'}})\n -[:FOLLOWS]->(user:Person)\n -[:POSTED]->(post:Post)\n RETURN DISTINCT {{content:post.content, modified:post.modified, created:post.created, uuid:post.uuid, user_email:user.email}} AS posts\"\"\"\n return tx.run(query)", "def likes():\n click.echo(chalk.blue(\"For whom you want to view likes for\"))\n friend_name = input().strip().lower()\n FRIENDS_FILE_PATH = get_friends_file_path(friend_name)\n\n if os.path.isfile(FRIENDS_FILE_PATH):\n with open(FRIENDS_FILE_PATH) as fin:\n contents = yaml.load(fin)\n entries = contents[\"entries\"]\n likes = []\n for entry in entries:\n if \"likes\" in entry:\n likes.extend(entry[\"likes\"])\n click.echo(\"Likes:\")\n for i, n in enumerate(likes):\n click.echo(str(i) + \": \" + n)\n else:\n click.echo(\n chalk.red(\n 'The Likes file path for this module does not exist. Please type \"yoda people like\" to create a new one'\n )\n )", "def show_likes(user_id):\n\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n # define user whose favorites are being viewed\n profuser = User.query.get_or_404(user_id)\n # define logged-in user for navbar details\n user = User.query.get(session[CURRENT_USER_KEY])\n if session[CURRENT_USER_KEY] == user_id:\n like_active = 'active'\n else:\n like_active = ''\n\n return render_template('likes.html', user=user, profuser=profuser, likes=profuser.likes, like_active=like_active)", "def d_like(request):\n token = request.GET.get('token','')\n\n if not token:\n return HttpResponse('error in user validation')\n \n try:\n api = ApiKey.objects.get(key=token)\n user = api.user\n except ApiKey.DoesNotExist:\n return HttpResponse('error in user validation')\n if request.method == 'POST' and user.is_active:\n try:\n post_id = int(request.POST['post_id'])\n except ValueError:\n return HttpResponse('erro in post id')\n try:\n post = Post.objects.get(pk=post_id)\n except Post.DoesNotExist:\n return HttpResponse('post not found')\n\n like, created = Likes.objects.get_or_create(user=user, post=post)\n if created:\n post.like=post.like+1\n post.save()\n return HttpResponse('+1')\n elif like:\n like.delete()\n post.like=post.like-1\n post.save()\n return HttpResponse('-1')\n\n return HttpResponse('error in parameters')", "def getLikeInfo(self, info, user):\n like = None\n if user.is_authenticated():\n try:\n like = LikeInfo.objects.get(info=info, usuario=user)\n except ObjectDoesNotExist, e:\n pass\n return like", "def like(request, pk):\n try:\n like = Likes()\n like.user = request.user\n like.post = Post.objects.filter(pk=pk).first()\n like.save()\n except Exception:\n like = Likes.objects.filter(user=request.user.pk, post=pk).first()\n like.delete()\n return HttpResponseRedirect(reverse(\"mainapp:post\", kwargs={\"pk\":pk}))", "def get_likes_count():\n return Flag.objects.filter(flag=Flag.LIKE_FLAG).count()", "def get_profile_like(_driver, _list, _url_list):\n _profile_likes = {} # the dictionary that will be returned\n _profile_likes_link = {} # the dictionary that will be returned\n _iterator = 0 # simple integer to iterate through names list\n for i in _url_list:\n current_name = _list[_iterator] # get the name of the profile is scrapping now\n _driver.get(i) # load the profile\n time.sleep(1.0) # wait to load the page\n\n # check if the profile is a page or not\n try:\n _flags = _driver.find_element_by_xpath(\"//*[contains(text(), 'Reviews')]\") # its a page\n except NoSuchElementException:\n _flags = None # Its not a page\n\n if _flags == None:\n _about_page = WebDriverWait(_driver, 10).until(\n EC.presence_of_element_located((By.XPATH, \"//*[contains(text(), 'About')]\"))) # locate the about page\n _about_page.click() # clicking the about page\n fast_scroll(_driver=_driver) # scroll to reveal likes\n time.sleep(1.0) # wait to load the pages\n try:\n _likes = _driver.find_element_by_xpath(\"//*[contains(text(), 'Likes')]\")\n except NoSuchElementException or TimeoutException:\n _likes = None # likes button restricted or hidden by profile\n\n if _likes != None:\n _likes.find_element_by_xpath(\"../../..\").click() # locate the parent button to click\n time.sleep(2.0) # wait for the page to load\n\n _all_likes = WebDriverWait(_driver, 10).until(\n EC.presence_of_element_located((By.XPATH, \"//*[contains(text(), 'All Likes')]\"))) # locate the all likes\n _all_likes.find_element_by_xpath(\"../../../..\").click() # get the all likes parents and click\n time.sleep(1.0) # wait for the page to load\n fast_scroll(_driver=_driver) # scroll to reveal more\n time.sleep(1.0) # wait to load the page\n html_doc_likes = _driver.page_source # dump the page\n soup2 = BeautifulSoup(html_doc_likes, 'lxml') # make soup to navigate the html\n b = soup2.findAll('div', {'class': '_1a5p'}) # get all the liked item\n\n for j in b:\n _liked_item_text = j.find('div', {'class': '_1a5r'}).find('span').text # get the text\n if \".\" in _liked_item_text:\n _liked_item_text = _liked_item_text.replace(\".\", \"\")\n _link = j.find('a')['href']\n _link_processed = \"https://www.facebook.com\" + _link\n _profile_likes_link[_liked_item_text] = _link_processed\n _profile_likes[current_name] = _profile_likes_link # put it on dictionary\n else:\n _profile_likes[current_name] = {'Empty or Restricted': \"Empty or Restricted\"} # put it on dictionary in case not found\n else:\n pass\n time.sleep(1.0) # wait one seconds\n _iterator += 1 # increase the iterator\n print(_iterator)\n return _profile_likes # return the dictionary", "def toggle_like(post_id):\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n liked_post = Post.query.get_or_404(post_id)\n user = User.query.get(session[CURRENT_USER_KEY])\n # toggle the like by removing from user likes\n if liked_post in user.likes:\n user.likes.remove(liked_post)\n else:\n user.likes.append(liked_post)\n\n db.session.commit()\n\n return redirect(url_for('show_likes', user_id=user.id))", "def ratings_usuarios(username, ratings):\n return list(filter(lambda x: x.username == username, ratings))", "def like():\n if CURR_USER_KEY in session:\n cafe_id = int(request.json[\"cafe_id\"])\n print(type(cafe_id))\n user_id = g.user.id\n print(type(user_id))\n like = Like(\n cafe_id=cafe_id,\n user_id=user_id\n )\n print(like)\n db.session.add(like)\n print(\"====================================\")\n print(like)\n db.session.commit()\n return jsonify({\"liked\": cafe_id})\n return jsonify({\"error\": \"Not logged in\"})", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def is_liked_by(self, user):\n return user.liked_articles.filter(pk=self.pk).exists()", "def get_queryset(self):\r\n\r\n user = get_object_or_404(User, username=self.kwargs.get('username'))\r\n return Post.objects.filter(author=user).order_by('-date_posted')", "def LikeArticle(request):\n try:\n if not request.user.is_authenticated():\n return HttpResponse(json.dumps({\n \"result\": \"1\"\n }))\n\n like = request.GET.get('like', '')\n article_id = request.GET.get('article_id', '')\n\n if str(like).lower() != 'true':\n like = 'false'\n else:\n like = 'true'\n \n record = UserLikedArticles.objects.filter(user=request.user.id, article=article_id)\n\n if like == 'false':\n if record != None and len(record) > 0:\n record[0].delete()\n else:\n if record == None or len(record) == 0:\n like = UserLikedArticles()\n like.user = request.user.id\n like.article=Article.objects.filter(id=article_id)[0]\n like.save()\n \n article = Article.objects.filter(id=article_id)[0]\n article.likes = len(UserLikedArticles.objects.filter(article=article_id))\n article.save()\n \n return HttpResponse(json.dumps({\n \"result\": 0\n }))\n except Exception, e:\n return HttpResponse(json.dumps({\n \"result\": e\n }))", "def like_comment(request, pk, pk1):\r\n \r\n comment = get_object_or_404(Comment, id=request.POST.get('id'))\r\n liked = False\r\n\r\n if comment.likes.filter(id=request.user.id).exists():\r\n comment.likes.remove(request.user)\r\n liked = False\r\n else:\r\n comment.likes.add(request.user)\r\n liked = True\r\n\r\n context = {\r\n 'is_liked': liked,\r\n 'total_likes': comment.likes.count()\r\n }\r\n\r\n if request.method == 'POST':\r\n return HttpResponse(\r\n json.dumps(context),\r\n content_type=\"application/json\")", "def users_like_changed(sender, instance, **kwargs):\n instance.total_likes = instance.users_like.count()\n instance.save()", "def get_favorite(self, obj):\n article_fav_users = obj.favorite.all()\n return self.fetch_usernames(article_fav_users)", "def __list_all_users(self):\n\n usernames_dict = get_data.get_usernames_dict()\n if len(usernames_dict) > 0:\n first_str = 'user'\n second_str = 'posts scraped'\n descriptor = '{:<40} {}'\n print('')\n print(descriptor.format(first_str, second_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-'))\n for number, username in usernames_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + username\n second = str(get_data.get_user_post_count(username))\n print(descriptor.format(first, second))\n else:\n print('no users found in the database')", "def like_tweet(self, tag):\n self.bot.get('https://twitter.com/search?q=' + tag + '&src=typed')\n self.__wait(3, 3)\n for i in range(1, 3):\n self.bot.execute_script('window.scrollTo(0,document.body.scrollHeight)')\n self.__wait(2, 3)\n tweets = self.bot.find_elements_by_tag_name('article')\n\n links = []\n for tweet in tweets:\n sub_links = tweet.find_elements_by_tag_name('a')\n links += [sub_link.get_attribute('href')\n for sub_link in sub_links if 'status' in sub_link.get_attribute('href')]\n\n print('Started to like {} tweets'.format(len(links)))\n\n for link in links:\n self.bot.get(link)\n self.__wait(3, 5)\n likes = self.bot.find_elements_by_css_selector('div[data-testid=\"like\"')\n for like in likes:\n like.click()\n self.__wait(3, 5)", "def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200", "def common_likes(self, user):\n\n self_like_ids = set(self.likes.keys()) if self.likes else set()\n other_like_ids = set(user.fb_profile.likes.keys()) if user.fb_profile.likes else set()\n\n common_like_ids = self_like_ids.intersection(other_like_ids)\n\n return common_like_ids", "def recommendations(self):\n return [user for user in self.tags.similar_objects() if user.status == UserStatus.APPROVED]", "def calc_likes_from_post(self, post):\n postlikes = len(post['likes'])\n comments = post['comments']\n commentslikes = 0\n if comments: # not empty\n # DataFrame to avoid iterating over each comment\n commentsdf = pd.DataFrame(comments)\n commentslikes = commentsdf['like_count'].sum()\n likes = {'post_likes': postlikes, 'comments_likes': commentslikes}\n return likes", "def add_like(id):\r\n username = request.json['username']\r\n duplicate_likes_query = models.Like.query.filter(models.Like.username == username, models.Like.post_id == id)\r\n # if like from this user to this tweet already exist\r\n if duplicate_likes_query.count() > 0:\r\n return {'like_id': duplicate_likes_query.first().like_id}\r\n\r\n # if original tweet does not exist -> 404\r\n models.Tweet.query.get_or_404(id)\r\n\r\n like = models.Like(post_id=id, username=username, timestamp=datetime.datetime.now())\r\n db.session.add(like)\r\n db.session.commit()\r\n\r\n return {'like_id': like.like_id}", "def displayAlsoLike(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"11925205\", result.data)", "def post_like(self, entry, **args):\n args.update(entry=entry)\n return self.fetch(\"/like\", post_args=args)", "def visible_posts_for(user):\n own = Post.objects.filter(author=user)\n public = Post.objects.filter(circles=PUBLIC_CIRCLE)\n my_circle = Post.objects.filter(circles__owner=user.pk)\n in_circle = Post.objects.filter(circles__members=user.pk)\n return own | public | my_circle | in_circle", "def get_followers(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)", "def get_likers(_driver):\n _likers_name_list = [] # the first list of names that will be returned\n _likers_profile_list = [] # the second list of profile links that will be returned\n _driver.find_element_by_class_name(\"_1g06\").click() # click on the all likes button\n time.sleep(2.0) # wait for the pages to load\n more = more_locator(_driver) # find the see more button\n while more != None:\n more.click() # click on more buttons\n time.sleep(2.0) # wait to load more\n more = more_locator(_driver) # find again the more button\n\n html_doc = _driver.page_source # dumping the page for scrapping data\n soup = BeautifulSoup(html_doc, 'lxml') # making soup for navigating through HTML\n block = soup.findAll(\"div\", {'class': '_4mn'}) # isolate all likers name and profile_link div\n\n for b in block:\n profile_link = b.find(\"a\")['href'] # finding the profile link\n full_profile_link = LOGIN_URL + profile_link # post process to absolute url\n _likers_profile_list.append(full_profile_link) # adding the URL to the list\n\n name = b.find(\"strong\").text # finding the name\n if \".\" in name:\n name = name.replace(\".\", \"\")\n _likers_name_list.append(name) # adding the name to the list\n\n return _likers_name_list, _likers_profile_list # return the two list", "def find_user_like(self, query):\n # if cache is empty, fill it\n if self.user_cache is None:\n self.user_cache = self.get_users()\n\n # if it's still empty, something's wrong\n if self.user_cache is not None:\n # search the names first\n for user in self.user_cache:\n if query in user[\"name\"]:\n return user\n # then search the emails\n for user in self.user_cache:\n if query in user[\"email\"]:\n return user\n return None", "def get_list_of_likers_message(self, mid):\n cursor = self.get_cursor()\n query = 'SELECT users.uid, users.first_name, users.last_name, users.username, ' \\\n 'vote.voted_on ' \\\n 'FROM users INNER JOIN vote ON users.uid = vote.uid AND vote.upvote = TRUE ' \\\n 'INNER JOIN messages ON messages.mid = vote.mid AND messages.mid = %s'\n cursor.execute(query, (mid,))\n return cursor.fetchall()", "def list(self, request):\n users = WhoYouUser.objects.all()\n\n # Filter content based on query parameters\n # e.g.: /user?name=\"fred\"\n search_Name = self.request.query_params.get('name', None)\n if search_Name is not None:\n # TODO: Implement a search that uses the django orm instead of iterating over\n # the list of users\n users = (user for user in users if search_Name.lower() in user.name.lower())\n\n\n serializer = WhoYouUserSerializer(\n users, many=True, context={'request': request}\n )\n return Response(serializer.data)", "def post(self, request, *args, **kwargs):\n\n user_wall_post_comment = self.get_object()\n user_wall_post_comment.likes.add(self.request.user)\n return Response(status=201)", "def like_tweets(pos_tweets):\n\n for tweet in pos_tweets:\n twitter.CreateFavorite(status_id=tweet.id)\n\n return", "def list_posts(request):\n if request.method == 'POST':\n category = request.POST.get('category', False)\n posts = Post.objects.select_related('author')\\\n .filter(category=category)\\\n .order_by('-modified')\n # import pdb; pdb.set_trace()\n return render(request, 'posts/index.html',\n {'posts': posts})\n\n posts = Post.objects.select_related('author').order_by('-modified')\n likes = Likes.objects.select_related('post')\n\n return render(request, 'posts/index.html',\n {'posts': posts})", "def user_list(request, user_name):\n bookmarks = get_list_or_404(Bookmark.objects.all().filter(human__username=user_name))\n return render(request, 'urly_bird/any_user_list.html', {'bookmarks': bookmarks})", "def getNewsFeed(self, userId: int):\n if userId not in self.followList:\n self.followList[userId] = [userId]\n res = []\n\n for user in self.followList[userId]:\n if self.tweetTimeLine.get(user, [user]):\n res += self.tweetTimeLine.get(user, [])\n res.sort()\n res = res[:10]\n # print(res)\n return [i[1] for i in res]", "def __user_interested_post_filter(self, avg_user_liked_post_weight):\n query = Post.objects.extra(select={'is_top': \"weight = \" + str(avg_user_liked_post_weight)})\n resultant_obj = query.extra(order_by = ['-is_top'])\n return resultant_obj", "def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list", "def get_dislikes(self, obj):\n return QuestionPersonLike.objects.filter(question=obj,\n like=False).count()", "def test_like(self):\n client = Client()\n slug = ['bryan-fox-snowboard-2017', 'some_slug'] # Correct and incorrect slug\n response = client.get('/products/{0}/'.format(slug[0]))\n self.assertEqual(response.status_code, 200)\n Like.objects.filter(product__slug=slug).count() # numbers like of post\n product = Product.objects.get(slug=slug[0])\n if response.context[\"user\"].is_authenticated():\n product.like_set.filter(user=response.context[\"user\"])", "def home(request):\n current_user = request.user\n\n # return_list = []\n # for image in all_images:\n # return_list.append((image, image.image_likes.filter(profile_owner=request.user)))\n\n return render(request,'main_templates/landing.html',{'user':current_user})", "def test_is_liked_by(self):\n\n u = self.user\n u2 = User(\n email=\"test2@test.com\" ,\n username=\"testuser2\" ,\n password=\"HASHED_PASSWORD\"\n )\n\n m = Message(\n user_id=u.id ,\n text='test content'\n )\n\n db.session.add_all([u2 , m])\n db.session.commit()\n\n #not like \n self.assertEqual(len(u2.likes) , 0)\n self.assertEqual(len(m.likes_users) , 0)\n self.assertEqual(u2.is_like(m) , False)\n self.assertEqual(m.is_liked_by(u2) , False)\n\n like = Likes(user_id=u2.id , message_id=m.id)\n db.session.add(like)\n db.session.commit()\n\n self.assertEqual(len(u2.likes) , 1)\n self.assertEqual(len(m.likes_users) , 1)\n self.assertEqual(u2.is_like(m) , True)\n self.assertEqual(m.is_liked_by(u2), True)", "def create_pagination(request, user=None):\n if user:\n posts = Post.objects.filter(author=user).order_by(\"-timestamp\")\n else:\n posts = Post.objects.all().order_by(\"-timestamp\")\n paginator = Paginator(posts, 5)\n current_page = request.GET.get(\"page\", 1)\n page_obj = paginator.get_page(current_page)\n context = {\"page_obj\": page_obj}\n if request.user.is_authenticated:\n liked_posts = request.user.liked_posts.all()\n context.update({\"liked_posts\": liked_posts})\n return context", "def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200", "def findSuggestions():\n users = None\n if current_user.genderPreferences == \"any\":\n users = User.query.filter(or_(User.genderPreferences==current_user.gender, User.genderPreferences=='any'), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n elif current_user.genderPreferences == \"male\":\n users = User.query.filter(or_(User.gender==\"male\", User.gender==\"other\"), or_(User.genderPreferences==current_user.gender, User.genderPreferences==\"any\"), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n elif current_user.genderPreferences == \"female\":\n users = User.query.filter(or_(User.gender==\"female\", User.gender==\"other\"), or_(User.genderPreferences==current_user.gender, User.genderPreferences==\"any\"), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n show_users = []\n print(users)\n for user in users:\n if (not user in current_user.likes) and (not user in current_user.dislikes):\n show_users.append(user)\n print(show_users)\n return show_users", "def dislike_post(request, pk):\r\n post = get_object_or_404(Post, id=request.POST.get('id'))\r\n disliked = False\r\n if post.dislikes.filter(id=request.user.id).exists():\r\n post.dislikes.remove(request.user)\r\n disliked = False\r\n else:\r\n if post.likes.filter(id=request.user.id).exists():\r\n post.likes.remove(request.user)\r\n post.dislikes.add(request.user)\r\n disliked = True\r\n\r\n if request.is_ajax():\r\n return JsonResponse({'total_dislikes': post.dislikes.count(),\r\n 'total_likes': post.likes.count(),\r\n 'disliked': disliked})", "def alsoLikesGraph(self, data, docID, visitorID = None):\n result = {}\n docVisitors, userDocuments = self.alsoLikesData(data)\n\n if docID in docVisitors:\n visitors = docVisitors[docID]\n else:\n print(\"Document %s not found in data set\" % str(docID))\n return\n\n for user in userDocuments:\n if user == visitorID:\n result[user] = [docID] # Don't want to include any other docs visitor has seen\n else:\n if user in visitors and not userDocuments[user] == [docID]: # Don't add visitors who contribute no other documents\n result[user] = userDocuments[user]\n return result", "def add_unsaved_likes_to_user(sender, user, request, **kwargs):\n session_likes = request.session.get('likes')\n if session_likes:\n user.userprofile.liked_products.add(*session_likes)", "def recommend_one_user(self, user_id: int) -> List[Item]:\n # Retrieve all the movies for this author\n already_seen_movies = self.database.interactions.get_user_interactions(user_id)\n return self.recommend_from_list(already_seen_movies)", "def unlike(self, data_base, user):\n cursor = data_base.cursor()\n cursor.execute(f\"UPDATE post SET likes = likes - 1 WHERE id = '{self.id}'\") # Decrements the likes\n cursor.execute(f\"DELETE FROM user_like WHERE user_id = {user.id} AND post_id = {self.id}\")\n if self.commit_to_db:\n data_base.commit()\n cursor.close()", "def feed(request):\n followers = request.user.profile.followers.values_list('pk', flat=True)\n posts = Post.objects.filter(author_id__in=followers)\n\n return render(request,\n 'posts/feed.html',\n {'posts': posts})", "def is_liked(value, user: User):\n return value.is_liked(user)", "def test_unauthenticated_user_liking(self):\n self.like_dislike(self.dislike_url(5))", "def get(self, post_id):\n Post.add_like(int(post_id), self.user.get_id())\n self.redirect('/blog')", "def users_followers(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n user = User.query.get_or_404(user_id)\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/followers.html', user=user, likes=likes)" ]
[ "0.80609584", "0.7395297", "0.69443375", "0.6895322", "0.68074024", "0.67923427", "0.6753025", "0.6566023", "0.6552493", "0.63974077", "0.639381", "0.6380745", "0.6334056", "0.6326958", "0.6302191", "0.62897587", "0.62885493", "0.62881505", "0.6275099", "0.6275099", "0.6238257", "0.62229425", "0.6217055", "0.6210115", "0.62097657", "0.6204797", "0.6191636", "0.6151198", "0.61173654", "0.6113968", "0.6097306", "0.60856855", "0.60824436", "0.60664886", "0.60581017", "0.60204506", "0.6010874", "0.5974446", "0.5954707", "0.5935603", "0.58982533", "0.5888669", "0.5876275", "0.5837567", "0.58344007", "0.5824619", "0.58235574", "0.5820995", "0.57546103", "0.5750249", "0.57476634", "0.5744902", "0.5728775", "0.5693421", "0.56924194", "0.568501", "0.5683507", "0.5673702", "0.5647824", "0.5633634", "0.5610169", "0.55927473", "0.5590257", "0.5579759", "0.557555", "0.55749255", "0.55651224", "0.55635023", "0.5563501", "0.55607647", "0.55605215", "0.5555576", "0.5539587", "0.55368865", "0.5533195", "0.55280507", "0.5524602", "0.5511516", "0.5509164", "0.5505244", "0.55023336", "0.5498549", "0.5494895", "0.549017", "0.54895175", "0.5468895", "0.5465871", "0.5461817", "0.544836", "0.5442112", "0.54326177", "0.54215765", "0.54168415", "0.54159606", "0.54131985", "0.540689", "0.5404368", "0.5398983", "0.5394137", "0.5386193" ]
0.76121527
1
Initialize a ``TFSPredictor``. See ``sagemaker.RealTimePredictor`` for more info about parameters.
def __init__(self, endpoint_name, sagemaker_session=None, serializer=json_serializer, deserializer=json_deserializer, content_type=None, model_name=None, model_version=None): super(Predictor, self).__init__(endpoint_name, sagemaker_session, serializer, deserializer, content_type) attributes = [] if model_name: attributes.append('tfs-model-name={}'.format(model_name)) if model_version: attributes.append('tfs-model-version={}'.format(model_version)) self._model_attributes = ','.join(attributes) if attributes else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_predictor(self):\n try: \n predict_fn = tf.contrib.predictor.from_saved_model(self.saved_path)\n except OSError as err: \n print(f\"OSError: {err}\")\n self._predict_fn = predict_fn", "def init_tf(FLAGS):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n visible_gpus = []\n if gpus and not FLAGS.cpu:\n visible_gpus = gpus[hvd.local_rank()]\n tf.config.experimental.set_visible_devices(visible_gpus, 'GPU')\n\n if FLAGS.amp:\n policy = tf.keras.mixed_precision.Policy(\"mixed_float16\")\n tf.keras.mixed_precision.set_global_policy(policy)\n\n tf.config.run_functions_eagerly(FLAGS.run_eagerly)\n\n if FLAGS.tfdata_debug:\n tf.data.experimental.enable_debug_mode()\n\n if FLAGS.inter_op_parallelism:\n tf.config.threading.set_inter_op_parallelism_threads(FLAGS.inter_op_parallelism)\n\n if FLAGS.intra_op_parallelism:\n tf.config.threading.set_intra_op_parallelism_threads(FLAGS.intra_op_parallelism)\n\n tf.random.set_seed(hash((FLAGS.seed, hvd.rank())))", "def __init__(self, *args):\n _snap.TForestFire_swiginit(self, _snap.new_TForestFire(*args))", "def _get_tslearn_object(self):\n cls = self._get_tslearn_class()\n params = self.get_params()\n params[\"init\"] = params.pop(\"init_algorithm\")\n return cls(**params)", "def __init__(self, predictor_name, obj,\n predictor_scoring_fun=default_predictor_scoring_fun,\n score_cutoff_fun=default_score_cutoff_fun,\n n_features_dependent_kwargs=None,\n **kwargs):\n if n_features_dependent_kwargs is None:\n n_features_dependent_kwargs = {}\n self.n_features_dependent_kwargs = n_features_dependent_kwargs\n self.constant_kwargs = kwargs\n self.predictor_scoring_fun = predictor_scoring_fun\n self.score_cutoff_fun = score_cutoff_fun\n self.predictor_name = predictor_name\n sys.stdout.write(\n \"{}\\tPredictor {} is of type {}\\n\".format(timestamp(),\n self.predictor_name,\n obj))\n self._parent = obj\n self.__doc__ = obj.__doc__\n sys.stdout.write(\n \"{}\\tAdded {} to default predictors\\n\".format(timestamp(),\n self.predictor_name))", "def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)", "def __init__(self):\n self.sess = tf.Session()\n vocab_path = os.path.join(params.data_dir, \"vocab%d\" % params.vocab_size)\n self.vocab, self.rev_vocab = data_utils.initialize_vocabulary(vocab_path)\n self.model = model_utils.create_model(self.sess, True)\n self.model.batch_size = 1 # Respond 1 sentence at a time.", "def __init__(self, *args):\n _snap.TFltTr_swiginit(self, _snap.new_TFltTr(*args))", "def __init__(self, tfInputGraph=None, inputMapping=None, outputMapping=None, tfHParms=None):\n super(TFTransformer, self).__init__()\n kwargs = self._input_kwargs\n self.setParams(**kwargs)", "def __init__(self, *args):\n _snap.TFltPr_swiginit(self, _snap.new_TFltPr(*args))", "def __init__(self, *args):\n _snap.TFlt_swiginit(self, _snap.new_TFlt(*args))", "def __init__(self, checkpoint=None, predict_fn=None):\n if not checkpoint:\n logging.info(\"No checkpoint specified, defaulting to BLEURT-tiny.\")\n checkpoint = _get_default_checkpoint()\n\n logging.info(\"Reading checkpoint {}.\".format(checkpoint))\n self.config = checkpoint_lib.read_bleurt_config(checkpoint)\n max_seq_length = self.config[\"max_seq_length\"]\n vocab_file = self.config[\"vocab_file\"]\n do_lower_case = self.config[\"do_lower_case\"]\n sp_model = self.config[\"sp_model\"]\n\n logging.info(\"Creating BLEURT scorer.\")\n self.tokenizer = tokenizers.create_tokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case, sp_model=sp_model)\n self.max_seq_length = max_seq_length\n self._predictor = _create_predictor(checkpoint, predict_fn)\n self._predictor.initialize()\n logging.info(\"BLEURT initialized.\")", "def __init__(self, *args):\n _snap.TFIn_swiginit(self, _snap.new_TFIn(*args))", "def __init__(self,\n ckpt: Text,\n max_embedding_batch_size: Optional[int] = 128,\n max_score_batch_size: Optional[int] = 128) -> None:\n super(TacDependentPredictor, self).__init__(\n ckpt,\n max_embedding_batch_size=max_embedding_batch_size,\n max_score_batch_size=max_score_batch_size)\n self.selected_tactic = -1", "def __init__(self, t_vector=None, lr_model=None, ncf=None):\r\n self.ncf = ncf", "def __init__(self) -> None:\n\n self.train_env = None # Training environment\n self.agent = None # The algorithm used to solve an RL problem is represented by a TF-Agent\n self.replay_buffer = None # The replay buffer keeps track of data collected from the environment\n self.dataset = None # The agent needs access to the replay buffer via an iterable tf.data.Dataset\n self.iterator = None # The iterator of self.dataset", "def initialize(self, finetune_lr=0.1):\n\n inputs = Tensor.matrix('inputs')\n outputs = Tensor.ivector('outputs')\n minibatch_index = Tensor.lscalar('minibatch_index')\n\n self.training_function = self.compiled_training_function(\n self.classifier,\n minibatch_index,\n inputs,\n outputs,\n finetune_lr\n )\n self.validation_eval_function = self.compiled_validation_function(\n self.classifier,\n minibatch_index,\n inputs,\n outputs\n )\n self.test_eval_function = self.compiled_test_function(\n self.classifier,\n minibatch_index,\n inputs,\n outputs\n )", "def __init__(\n self,\n data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],\n config: Dict[Text, Any],\n max_history_featurizer_is_used: bool,\n label_data: RasaModelData,\n entity_tag_specs: Optional[List[EntityTagSpec]],\n ) -> None:\n super().__init__(\"TED\", config, data_signature, label_data)\n\n self.max_history_featurizer_is_used = max_history_featurizer_is_used\n\n self.predict_data_signature = {\n feature_name: features\n for feature_name, features in data_signature.items()\n if feature_name in PREDICTION_FEATURES\n }\n\n self._entity_tag_specs = entity_tag_specs\n\n # metrics\n self.action_loss = tf.keras.metrics.Mean(name=\"loss\")\n self.action_acc = tf.keras.metrics.Mean(name=\"acc\")\n self.entity_loss = tf.keras.metrics.Mean(name=\"e_loss\")\n self.entity_f1 = tf.keras.metrics.Mean(name=\"e_f1\")\n self.metrics_to_log += [\"loss\", \"acc\"]\n if self.config[ENTITY_RECOGNITION]:\n self.metrics_to_log += [\"e_loss\", \"e_f1\"]\n\n # needed for efficient prediction\n self.all_labels_embed: Optional[tf.Tensor] = None\n\n self._prepare_layers()", "def __init__(self, facePredictor = None):\n self.detector = dlib.get_frontal_face_detector()\n if facePredictor != None:\n self.predictor = dlib.shape_predictor(facePredictor)\n else:\n self.predictor = None", "def build(self):\n if self.predictor_constructor is None:\n print('[ERROR] build_predictor_fn not set, skip.')\n else:\n if hasattr(self, 'predictor'):\n print(\n \"[WARNING] predictor is already set, predictor is overridden\")\n del self.predictor\n with self.init_scope():\n self.predictor = self.predictor_constructor(\n **self.filter_sk_params(self.predictor_constructor)\n )\n self.update_device(self.device)", "def __init__(self, training_steps=None, evaluation_steps=None, checkpoint_path=None, py_version='py2',\n framework_version=None, model_dir=None, requirements_file='', image_name=None,\n script_mode=False, distributions=None, **kwargs):\n if framework_version is None:\n LOGGER.warning(fw.empty_framework_version_warning(TF_VERSION, TF_VERSION))\n self.framework_version = framework_version or TF_VERSION\n\n super(TensorFlow, self).__init__(image_name=image_name, **kwargs)\n self.checkpoint_path = checkpoint_path\n self.py_version = py_version\n self.training_steps = training_steps\n self.evaluation_steps = evaluation_steps\n self.model_dir = model_dir\n self.script_mode = script_mode\n self.distributions = distributions or {}\n\n self._validate_args(py_version=py_version, script_mode=script_mode, framework_version=framework_version,\n training_steps=training_steps, evaluation_steps=evaluation_steps,\n requirements_file=requirements_file, checkpoint_path=checkpoint_path)\n self._validate_requirements_file(requirements_file)\n self.requirements_file = requirements_file", "def __init__(\n self,\n endpoint_name,\n sagemaker_session=None,\n serializer=RecordSerializer(),\n deserializer=RecordDeserializer(),\n ):\n super(RandomCutForestPredictor, self).__init__(\n endpoint_name,\n sagemaker_session,\n serializer=serializer,\n deserializer=deserializer,\n )", "def __init__(self, tf_session):\n self._sess = tf_session", "def _initialize(self) -> None:\n p = self.params\n # We make self.input public so that users can access its methods like\n # IdsToStrings if needed.\n with py_utils.infeed_context_scope(\n infeed_host_index=p.infeed_host_index,\n num_infeed_hosts=p.num_infeed_hosts):\n self.input = p.input.Instantiate()\n\n if hasattr(self.input, 'datasource') and isinstance(\n self.input.datasource, datasource.TFDatasetSource):\n # For the special case when the input is implemented by a tf.data.Dataset,\n # use it directly. Otherwise roundtrip adaptions may result in returning\n # duplciate batches.\n self._get_next_fn = self.input.datasource.GetNext\n else:\n self._get_next_fn = tf.function(self._get_batch)\n self._num_batches_produced = 0", "def __init__(self, vector_dim: int = 128, **kwargs):\n\n # Init vectorizer\n self.vectorizer = TfidfVectorizer(max_features=vector_dim, **kwargs)\n # Set is_trained = True if vectorizer is trained, is_trained = False\n self.is_trained = False", "def __init__(self, *args):\n _snap.TFltTrV_swiginit(self, _snap.new_TFltTrV(*args))", "def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def __init__(self):\n\n constant_extratrees_kwargs = {'bootstrap': True,\n 'random_state': 0,\n 'oob_score': True,\n 'verbose': True}\n\n self.predictor_config(\n 'ExtraTreesClassifier', obj=ExtraTreesClassifier,\n n_features_dependent_kwargs={\n 'max_features': PredictorConfigScalers.max_feature_scaler,\n 'n_estimators': PredictorConfigScalers.n_estimators_scaler,\n 'n_jobs': PredictorConfigScalers.n_jobs_scaler},\n **constant_extratrees_kwargs)\n\n self.predictor_config(\n 'ExtraTreesRegressor', obj=ExtraTreesRegressor,\n n_features_dependent_kwargs={\n 'max_features': PredictorConfigScalers.max_feature_scaler,\n 'n_estimators': PredictorConfigScalers.n_estimators_scaler,\n 'n_jobs': PredictorConfigScalers.n_jobs_scaler},\n **constant_extratrees_kwargs)\n\n constant_boosting_kwargs = {'n_estimators': 80, 'max_features': 1000,\n 'learning_rate': 0.2, 'subsample': 0.6, }\n\n self.predictor_config('GradientBoostingClassifier',\n obj=GradientBoostingClassifier,\n **constant_boosting_kwargs)\n\n self.predictor_config('GradientBoostingRegressor',\n obj=GradientBoostingRegressor,\n **constant_boosting_kwargs)", "def __init__(self, *args):\n _snap.TFltPrV_swiginit(self, _snap.new_TFltPrV(*args))", "def __init__(self, samples, theta, **kwargs):\n super().__init__(dynamic=True, **kwargs)\n self.samples = tf.Variable(initial_value=samples, trainable=True)\n self.theta = tf.Variable(initial_value=theta, trainable=True)", "def __call__(self, n_features):\n parameters = self.parameters(n_features)\n\n sys.stdout.write(\n \"{} Configuring predictor type: {} with {} features\".format(\n timestamp(), self.predictor_name, n_features))\n\n predictor = self._parent(**parameters)\n predictor.score_cutoff_fun = self.score_cutoff_fun\n predictor.predictor_scoring_fun = self.predictor_scoring_fun\n predictor.has_been_fit = False\n predictor.has_been_scored = False\n predictor._score_coefficient = SCORE_COEFFICIENT\n return predictor", "def __init__(self):\n\n self.result = None # To store the result\n self.predictor = None # To store the fit predictor", "def __init__(self, predictor, scaler):\n\n # Check arguments\n if predictor is None:\n raise ValueError('Cannot load genotyper predictor `None`')\n\n if scaler is None:\n raise ValueError('Cannot load feature scaler `None`')\n\n if isinstance(predictor, str):\n predictor = joblib.load(predictor)\n\n if isinstance(scaler, str):\n scaler = joblib.load(scaler)\n\n if not isinstance(predictor, SVC):\n raise ValueError('Predictor must be class sklearn.svm.SVC: Found \"{}\"'.format(type(predictor)))\n\n if not isinstance(scaler, StandardScaler):\n raise ValueError(\n 'Scaler must be class sklearn.preprocessing.StandardScaler: Found \"{}\"'.format(type(scaler))\n )\n\n # Set fields\n self.predictor = predictor\n self.scaler = scaler", "def __init__(\n self,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n model: Optional[RasaModel] = None,\n featurizer: Optional[TrackerFeaturizer] = None,\n fake_features: Optional[Dict[Text, List[Features]]] = None,\n entity_tag_specs: Optional[List[EntityTagSpec]] = None,\n ) -> None:\n super().__init__(\n config, model_storage, resource, execution_context, featurizer=featurizer\n )\n self.split_entities_config = rasa.utils.train_utils.init_split_entities(\n config[SPLIT_ENTITIES_BY_COMMA], SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE\n )\n self._load_params(config)\n\n self.model = model\n\n self._entity_tag_specs = entity_tag_specs\n\n self.fake_features = fake_features or defaultdict(list)\n # TED is only e2e if only text is present in fake features, which represent\n # all possible input features for current version of this trained ted\n self.only_e2e = TEXT in self.fake_features and INTENT not in self.fake_features\n\n self._label_data: Optional[RasaModelData] = None\n self.data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None\n\n self.tmp_checkpoint_dir = None\n if self.config[CHECKPOINT_MODEL]:\n self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())", "def __init__(self, config, **kwargs):\n super().__init__(config, **kwargs)\n\n self.pitch_predictor = FastSpeechVariantPredictor(\n config, dtype=tf.float32, name='pitch_predictor'\n )\n self.duration_predictor = FastSpeechVariantPredictor(\n config, dtype=tf.float32, name='duration_predictor'\n )\n\n self.pitch_embeddings = tf.keras.layers.Conv1D(\n filters=config.encoder_self_attention_params.hidden_size,\n kernel_size=3,\n padding='same',\n name='pitch_embeddings',\n )", "def __new__(cls, name, *args, **kwargs):\n instance = super(TFModel, cls).__new__(cls)\n instance.__scope_name = name\n instance.__graph = tf.Graph()\n instance.__phase = tf.placeholder(tf.bool)\n return instance", "def __init__(self, brain_spec, hparams):\n self._brain_spec = brain_spec\n self._validate_spec()\n self._hparams = hparams\n super().__init__(input_tensor_spec=brain_spec.observation_spec.tfa_spec)", "def __init__(self, latent_vars=None, data=None):\n if isinstance(latent_vars, list):\n with tf.variable_scope(None, default_name=\"posterior\"):\n latent_vars_dict = {}\n continuous = \\\n ('01', 'nonnegative', 'simplex', 'real', 'multivariate_real')\n for z in latent_vars:\n if not hasattr(z, 'support') or z.support not in continuous:\n raise AttributeError(\n \"Random variable {} is not continuous or a random \"\n \"variable with supported continuous support.\".format(z))\n batch_event_shape = z.batch_shape.concatenate(z.event_shape)\n loc = tf.Variable(tf.random_normal(batch_event_shape))\n scale = tf.nn.softplus(\n tf.Variable(tf.random_normal(batch_event_shape)))\n latent_vars_dict[z] = Normal(loc=loc, scale=scale)\n latent_vars = latent_vars_dict\n del latent_vars_dict\n\n super(elbo_optimizer, self).__init__(latent_vars, data)", "def __init__(\n self,\n model_dir,\n model_filename=\"model.pdmodel\",\n params_filename=\"model.pdiparams\",\n precision=\"fp32\",\n use_trt=False,\n use_mkldnn=False,\n batch_size=1,\n device=\"CPU\",\n min_subgraph_size=3,\n use_dynamic_shape=False,\n cpu_threads=1,\n ):\n self.rerun_flag = False\n if device != \"GPU\" and use_trt:\n raise ValueError(\n \"Predict by TensorRT mode: {}, expect device=='GPU', but device == {}\".format(precision, device)\n )\n config = Config(os.path.join(model_dir, model_filename), os.path.join(model_dir, params_filename))\n if device == \"GPU\":\n # initial GPU memory(M), device ID\n config.enable_use_gpu(200, 0)\n # optimize graph and fuse op\n config.switch_ir_optim(True)\n else:\n config.disable_gpu()\n config.set_cpu_math_library_num_threads(cpu_threads)\n config.switch_ir_optim()\n if use_mkldnn:\n config.enable_mkldnn()\n if precision == \"int8\":\n config.enable_mkldnn_int8({\"conv2d\", \"depthwise_conv2d\", \"pool2d\", \"transpose2\", \"elementwise_mul\"})\n if precision == \"bf16\":\n config.enable_mkldnn_bfloat16()\n\n if precision == \"bf16\":\n config.enable_mkldnn_bfloat16()\n\n if use_trt:\n if precision == \"bf16\":\n print(\"paddle trt does not support bf16, switching to fp16.\")\n precision = \"fp16\"\n\n precision_map = {\n \"int8\": Config.Precision.Int8,\n \"fp32\": Config.Precision.Float32,\n \"fp16\": Config.Precision.Half,\n }\n assert precision in precision_map.keys()\n\n if use_dynamic_shape:\n dynamic_shape_file = os.path.join(model_dir, \"dynamic_shape.txt\")\n if os.path.exists(dynamic_shape_file):\n config.enable_tuned_tensorrt_dynamic_shape()\n print(\"trt set dynamic shape done!\")\n else:\n # In order to avoid memory overflow when collecting dynamic shapes, it is changed to use CPU.\n config.disable_gpu()\n config.set_cpu_math_library_num_threads(10)\n config.collect_shape_range_info(dynamic_shape_file)\n print(\"Start collect dynamic shape...\")\n self.rerun_flag = True\n\n if not self.rerun_flag:\n config.enable_tensorrt_engine(\n workspace_size=1 << 30,\n max_batch_size=batch_size,\n min_subgraph_size=min_subgraph_size,\n precision_mode=precision_map[precision],\n use_static=True,\n use_calib_mode=False,\n )\n\n # enable shared memory\n config.enable_memory_optim()\n self.predictor = create_predictor(config)\n self.input_handles = [self.predictor.get_input_handle(name) for name in self.predictor.get_input_names()]\n self.output_handles = [self.predictor.get_output_handle(name) for name in self.predictor.get_output_names()]\n print(\"[Paddle Inference Backend] Completed PaddleInferenceEngine init ...\")", "def _setup_init(self):\n with tf.variable_scope(\"output\", reuse=True):\n assert self.q_values is not None\n self.policy_proba = tf.nn.softmax(self.q_values)", "def __init__(self, tensor, df):\n super().__init__()\n self.tensor = tensor\n self.df = df", "def __init__(self, dataset, more_augmentation=False):\n self.dataset = dataset\n self.generator_factory = GeneratorFactory(self.dataset,\n more_augmentation)\n phases = [\"train\", \"validation\", \"test\"]\n self.generators = {n: None for n in phases}\n self.iterator = None\n self.iterator_init_ops = {n: None for n in phases}\n self.clouds_tensor, self.cloud_slice_indices = \\\n self.create_cloud_constants()\n self.obj_ids_pl = tf.placeholder(tf.int32, shape=(None, 2),\n name=\"obj_ids\")\n self.translations_pl = tf.placeholder(tf.float32, shape=(None, 2, 3),\n name=\"translations\")\n self.rotations_pl = tf.placeholder(tf.float32, shape=(None, 2, 4),\n name=\"rotations\")\n self.rotations = None # stores the resulting rotations ...\n self.translations = None # ... and translations when generalizing\n self.translation_vars = []\n self.rotation_vars = []", "def __init__(self, *args):\n _snap.TFltVP_swiginit(self, _snap.new_TFltVP(*args))", "def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def __init__(self, samples, **kwargs):\n super().__init__(dynamic=True, **kwargs)\n self.samples = tf.Variable(initial_value=samples, trainable=True)", "def __init__(self) -> None:\n\n self.config = TbSETConfig()\n self.session = PromptSession()\n self.commands = WordCompleter([\n \"train\",\n \"translate\"\n ])\n\n # Check if saved model is present for inference\n saved_path = self.config.TRN_HYPERP[\"save_path\"]\n\n if saved_path and tf.saved_model.contains_saved_model(saved_path):\n print(\"INFO: Trained model found. It will be used for inference.\\n\")\n self.saved_translator = tf.saved_model.load(saved_path)\n else:\n print(\"INFO: Trained model not found. Please train the model before making inference.\\n\")\n self.saved_translator = None", "def __init__(self, features, hparams, mode):\n valid_modes = [\n tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL,\n tf.estimator.ModeKeys.PREDICT\n ]\n if mode not in valid_modes:\n raise ValueError(\"Expected mode in {}. Got: {}\".format(valid_modes, mode))\n\n self.features = features\n self.hparams = hparams\n self.mode = mode\n\n self.ccf_data = self.features[\"ccf_data\"]\n self.label = self.features[\"label\"]\n self.total_loss = None", "def __init__(self, features, hparams, mode):\n valid_modes = [\n tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL,\n tf.estimator.ModeKeys.PREDICT\n ]\n if mode not in valid_modes:\n raise ValueError(\"Expected mode in {}. Got: {}\".format(valid_modes, mode))\n\n self.features = features\n self.hparams = hparams\n self.mode = mode\n\n self.ccf_data = self.features[\"ccf_data\"]\n self.label = self.features[\"label\"]\n self.total_loss = None", "def init_tensors(self, sample, *args):\n raise NotImplementedError", "def __init__(self, *args):\n _snap.TFOut_swiginit(self, _snap.new_TFOut(*args))", "def __init__(self,\n action_space,\n framework=\"tf\",\n static_params=None,\n time_dependent_params=None,\n **kwargs):\n assert framework is not None\n super().__init__(\n action_space=action_space, framework=framework, **kwargs)\n\n self.static_params = static_params or {}\n\n # TODO(sven): Support scheduled params whose values depend on timestep\n # and that will be passed into the distribution's c'tor.\n self.time_dependent_params = time_dependent_params or {}", "def initialize(self) -> None:\n self._step = self._start_step\n self._tebd_propagator = compute_tebd_propagator(\n system_chain=self._system_chain,\n time_step=self._parameters.dt/2.0,\n epsrel=self._parameters.epsrel,\n order=self._parameters.order)\n self._results = {}\n self._t_mps = PtTebdBackend(\n gammas=self._initial_augmented_mps.gammas,\n lambdas=self._initial_augmented_mps.lambdas,\n epsrel=self._parameters.epsrel,\n config=self._backend_config)\n self._init_results()\n self._apply_controls(step=self.step, post=False)\n self._append_results()", "def __init__(\n self,\n model_data: Union[str, PipelineVariable],\n role: Optional[str] = None,\n sagemaker_session: Optional[Session] = None,\n **kwargs\n ):\n sagemaker_session = sagemaker_session or Session()\n image_uri = image_uris.retrieve(\n RandomCutForest.repo_name,\n sagemaker_session.boto_region_name,\n version=RandomCutForest.repo_version,\n )\n pop_out_unused_kwarg(\"predictor_cls\", kwargs, RandomCutForestPredictor.__name__)\n pop_out_unused_kwarg(\"image_uri\", kwargs, image_uri)\n super(RandomCutForestModel, self).__init__(\n image_uri,\n model_data,\n role,\n predictor_cls=RandomCutForestPredictor,\n sagemaker_session=sagemaker_session,\n **kwargs\n )", "def __init__(self, args):\n self.lr = args.lr\n self.l1_reg = args.l1_reg\n self.l2_reg = args.l2_reg\n self.nb_epochs = args.nb_iter\n self.dropout_p = args.dropout_p\n self.input_size = args.input_size\n self.batch_size = args.batch_size\n self.nb_targets = args.nb_targets\n self.activation = args.activation\n self.optimizer = args.optimizer\n\n self.global_step = tf.Variable(initial_value=0, trainable=False, name=\"global_step\")\n self.sess = tf.InteractiveSession()", "def initialize_session(self):\n self.logger.info(\"Initializing tf session\")\n self.sess = tf.compat.v1.Session()\n self.sess.run(tf.compat.v1.global_variables_initializer())\n self.saver = tf.compat.v1.train.Saver()", "def __init__(self, *args):\n _snap.TFltV_swiginit(self, _snap.new_TFltV(*args))", "def __init__(self, tf_id: str, name: str, site_length: int = None):\n\n self.type = 'transcription_factor'\n self.id = tf_id\n self.name = name\n self.site_length = site_length\n\n # initialize the binding sites dictionary; we will always have the \"base\" case of binding sites for the\n # \"unmodified\" conformation (i.e. just the TF); so we can add that key now\n self.binding_sites = {name: []}\n self.regulons = []\n self.i_modulons = []", "def __init__(self,\n transition_parameters = None,\n initial_dist_logits = None,\n batch_dims = 0,\n name = 'FiniteStateMarkovModel'):\n super().__init__(name=name)\n self._maybe_set_static_parameters(\n transition_parameters=transition_parameters,\n initial_dist_logits=initial_dist_logits)\n self._batch_dims = batch_dims", "def __init__(\n self,\n feature_config: Union[tf.tpu.experimental.embedding.FeatureConfig,\n Iterable], # pylint:disable=g-bare-generic\n optimizer: Optional[Union[tf.tpu.experimental.embedding.SGD,\n tf.tpu.experimental.embedding.Adagrad,\n tf.tpu.experimental.embedding.Adam,\n tf.tpu.experimental.embedding.FTRL]],\n pipeline_execution_with_tensor_core: bool = False,\n batch_size: Optional[int] = None,\n embedding_feature: Optional[\n tf.tpu.experimental.HardwareFeature.EmbeddingFeature] = None):\n super().__init__()\n self._feature_config, self._table_config_map = (\n _clone_and_prepare_features(feature_config))\n self._optimizer = _normalize_and_prepare_optimizer(optimizer)\n\n self._strategy = tf.distribute.get_strategy()\n self._using_tpu = _is_tpu_strategy(self._strategy)\n\n self._embedding_feature = None\n if self._using_tpu:\n self._embedding_feature = self._strategy.extended.tpu_hardware_feature.embedding_feature\n # Override the embedding feature setting if passed.\n if embedding_feature is not None:\n if embedding_feature == _EMBEDDING_UNSUPPORTED:\n self._embedding_feature = _EMBEDDING_UNSUPPORTED\n if (embedding_feature != _EMBEDDING_UNSUPPORTED and\n self._embedding_feature != embedding_feature):\n raise ValueError(\n \"TPU only supports {} and {}, but got {} which is not supported.\"\n .format(_EMBEDDING_UNSUPPORTED, self._embedding_feature,\n embedding_feature))\n\n # Create TPU embedding mid level APIs according to the embedding feature\n # setting.\n self._tpu_embedding = self._create_tpu_embedding_mid_level_api(\n self._using_tpu, self._embedding_feature,\n pipeline_execution_with_tensor_core)\n\n self.batch_size = batch_size\n\n self._tpu_call_id = 0", "def __init__(self,\n name: str,\n input_sequence: TemporalStateful,\n save_checkpoint: str = None,\n load_checkpoint: str = None,\n initializers: InitializerSpecs = None) -> None:\n check_argument_types()\n ModelPart.__init__(self, name, save_checkpoint, load_checkpoint,\n initializers)\n\n self.input_sequence = input_sequence\n\n with self.use_scope():\n self.train_mode = tf.placeholder(tf.bool, [], \"train_mode\")\n self._input_mask = tf.expand_dims(\n self.input_sequence.temporal_mask, -1)\n self._masked_input = (\n self.input_sequence.temporal_states * self._input_mask)", "def __init__(self, *args, **kwargs):\n self.classes = [0,1] # (default to 0/1; replace during training)\n self.theta = np.array([]) # placeholder value before training\n\n if len(args) or len(kwargs): # if we were given optional arguments,\n self.train(*args,**kwargs) # just pass them through to \"train\"", "def from_time(cls, fs, tdata, **kwargs):\n tf = cls(fs, tdata=tdata, **kwargs)\n return tf", "def __init__(self, *args):\n _snap.TSStr_swiginit(self, _snap.new_TSStr(*args))", "def __init__(self, *args):\n _snap.TFltKd_swiginit(self, _snap.new_TFltKd(*args))", "def __init__(self):\r\n self.pub_tf = rospy.Publisher(\"/tf\", tf.msg.tfMessage, queue_size=1)\r\n\r\n #Loads the robot model, which contains the robot's kinematics information\r\n self.robot = URDF.from_parameter_server()\r\n\r\n #Subscribes to information about what the current joint values are.\r\n rospy.Subscriber(\"joint_states\", JointState, self.callback)", "def __init__(self):\n if not os.path.isfile(PREDICTOR_PATH):\n try:\n print ('Predictor not found. Downloading...this may take a while...')\n url = 'https://github.com/hriddhidey/visage/blob/master/visage/shape_predictor_68_face_landmarks.dat?raw=true'\n def dl_progress(count, block_size, total_size):\n \"\"\" Show download progress bar. \"\"\"\n percent = int(count*block_size*100/total_size)\n sys.stdout.write(\"\\r\" + 'Progress:' + \"...%d%%\" % percent)\n sys.stdout.flush()\n urlretrieve(\n url,\n PREDICTOR_PATH,\n reporthook=dl_progress\n )\n print ('Predictor downloaded.')\n except IOError:\n print ('Download failed. Try again with reliable network connection.')\n raise IOError\n self.predictor = dlib.shape_predictor(PREDICTOR_PATH)\n self.cascade = cv2.CascadeClassifier(CASC_PATH)\n self.detector = dlib.get_frontal_face_detector()", "def __init__(self, n_features, n_templates, n_template_nodes, n_classes, n_hidden_layers, feature_init_mean=0., feature_init_std=1.):\n super().__init__()\n\n self.n_templates = n_templates\n self.n_template_nodes = n_template_nodes\n self.n_hidden_layers = n_hidden_layers\n self.n_features = n_features\n\n self.conv = GCNConv(self.n_features, self.n_hidden_layers)\n\n self.TFGW = TFGWPooling(self.n_hidden_layers, self.n_templates, self.n_template_nodes, feature_init_mean=feature_init_mean, feature_init_std=feature_init_std)\n\n self.linear = Linear(self.n_templates, n_classes)", "def __init__(self, params, device_assigner=None, model_dir=None,\n graph_builder_class=tensor_forest.RandomForestGraphs,\n config=None, weights_name=None, keys_name=None,\n feature_engineering_fn=None,\n early_stopping_rounds=100,\n num_trainers=1, trainer_id=0,\n report_feature_importances=False,\n local_eval=False):\n super(TensorForestEstimator, self).__init__(\n model_fn=get_model_fn(\n params.fill(),\n graph_builder_class,\n device_assigner,\n weights_name=weights_name,\n keys_name=keys_name,\n early_stopping_rounds=early_stopping_rounds,\n num_trainers=num_trainers,\n trainer_id=trainer_id,\n report_feature_importances=report_feature_importances,\n model_dir=model_dir,\n local_eval=local_eval),\n model_dir=model_dir,\n config=config,\n feature_engineering_fn=feature_engineering_fn)", "def __init__(self, path_ft, q0, qfs, words):\n\n self.treat = Treater(path_ft)\n\n self.q0 = q0 \n self.qfs = qfs\n self.words = words\n self.process_example()", "def __init__(self, seed = None):\n self.data_dir = pkg_resources.resource_filename('logistic_control_variate', 'data/')\n self.generate_data(seed)\n # Holds logistic regression object for this example\n self.lr = None", "def init_weights_and_state(self, input_signature):\n if self._mode == 'predict':\n cache_signature = input_signature[4:6]\n self.state = self._fast_inference_init_state(cache_signature)", "def __init__(self, word, tf):\n self.word = word\n self.tf = tf", "def __init__(self, dim_x: int = 1, kf: Optional[Kalman] = None):\n # TODO: Add support for x_init. Needs reimplementation of NFourSID.\n\n super().__init__()\n\n if kf is None:\n self.kf = None\n self.dim_x = dim_x\n self._kf_provided = False\n else:\n self.kf = kf\n self.dim_u = kf.state_space.u_dim\n self.dim_x = kf.state_space.x_dim\n self.dim_y = kf.state_space.y_dim\n self._kf_provided = True\n if self.dim_u > 0:\n self._expect_covariates = True", "def initialize_trainer(self):\n self.initialize_matrices()\n self.initialize_model()\n self.initialize_optimizers()\n return self", "def __init__(self):\n\n rospy.init_node('mcl_tf')\n br = tf.TransformBroadcaster()\n self.tf_listener = tf.TransformListener()\n \n # Give the listener some time to accumulate transforms... \n rospy.sleep(1.0) \n\n rospy.Subscriber('amcl_pose', PoseStamped, self.pose_callback)\n\n self.transform_position = np.array([0., 0., 0.])\n self.transform_quaternion = np.array([0., 0., 0., 1.0])\n \n # Broadcast the transform at 10 HZ\n while not rospy.is_shutdown():\n br.sendTransform(self.transform_position,\n self.transform_quaternion,\n rospy.Time.now(),\n \"odom\",\n \"map\")\n rospy.sleep(.1)", "def __init__(self, model_name: str, label_file: str) -> None:\n\n # Append TFLITE extension to model_name if there's no extension\n _, ext = os.path.splitext(model_name)\n if not ext:\n model_name += '.tflite'\n\n # Initialize the TFLite model.\n interpreter = Interpreter(model_path=model_name, num_threads=4)\n interpreter.allocate_tensors()\n\n self._input_index = interpreter.get_input_details()[0]['index']\n self._output_index = interpreter.get_output_details()[0]['index']\n self._interpreter = interpreter\n\n self.pose_class_names = self._load_labels(label_file)", "def setup(self, context: ExecutionContext) -> BaseStep:\n if self.is_initialized:\n return self\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)\n\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')\n\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True", "def __init__(self, graph=None, *args, **kwargs):\n # Set TensorFlow graph. All TF code will work on this graph.\n self.graph = graph or tf.Graph()\n self.SetParams(*args, **kwargs)", "def _init_vars(self):\n print \"Initializing session\"\n self.x = tf.placeholder(tf.float32, shape=[None, 784])\n self.y = tf.placeholder(tf.float32, shape=[None, 10])", "def __init__(self, *args):\n _snap.TSFlt_swiginit(self, _snap.new_TSFlt(*args))", "def initialize_session(self):\r\n self.logger.info(\"Initializing tf session\")\r\n self.sess = tf.Session()\r\n self.sess.run(tf.global_variables_initializer())\r\n self.saver = tf.train.Saver()", "def init_training(self):\n\n if not os.path.exists(self._model_root_path):\n os.makedirs(self._model_root_path)\n\n # Only initialize once!\n if self._model is None:\n self._model = TrainableAimbotModel(self._config, self._fov,\n os.path.join(self._model_root_path, 'aimbot_model.tf'))\n\n if not os.path.isfile(self._train_data_tfrecord_path) and not os.path.isfile(self._test_data_tfrecord_path):\n # Only create if not existing\n images_labels = _get_annotations_and_images(self._image_path)\n images_labels_train, images_labels_test = train_test_split(images_labels, shuffle=True, test_size=0.20)\n\n self._model.create_tfrecords(self._train_data_tfrecord_path, images_labels_train)\n self._model.create_tfrecords(self._test_data_tfrecord_path, images_labels_test)\n\n self._train_data_set = self._model.create_dataset(self._train_data_tfrecord_path, augment=True, shuffle=True)\n self._test_data_set = self._model.create_dataset(self._train_data_tfrecord_path)", "def recent_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n config.dense_type = model_args.dense_type\n config.act_type = model_args.act_type\n config.num_labels_per_head = [\n len(label_id) for label_id in task_infos.head_id_to_label_id\n ]\n config.head2label = task_infos.head_id_to_label_id\n model_cls = getattr(mod, model_args.architectures,\n RobertaForKlueRecent)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model", "def basic_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n model_cls = getattr(mod, model_args.architectures,\n AutoModelForSequenceClassification)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model", "def _initial_setup(self, **train_kwargs):\n self._update(time_step=0., **train_kwargs)", "def __init__(self, num_features=NUM_FEATURES):\n self.w = tf.Variable(tf.random.normal(shape=[num_features, 1]))\n self.b = tf.Variable(tf.zeros(shape=[1, 1]))", "def __init__(self, batch_size, outputs, input_size, reduction_factor):\n with tf.name_scope(\"TacotronTrainingHelper\"):\n # Copy every r'th frame from the ground truth spectrogram.\n # => shape=(B, T_spec // reduction_factor, n_mels)\n self.outputs = outputs[:, reduction_factor - 1::reduction_factor, :]\n\n self._input_size = input_size\n self._reduction_factor = reduction_factor\n self._batch_size = batch_size\n\n # Get the number of time frames the decoder has to produce.\n # Note that we will produce sequences over the entire length of the batch. Maybe this\n # way the network will learn to generate silence after producing the actual sentence.\n n_target_steps = tf.shape(self.outputs)[1]\n\n # Create a tensor of length batch_size with each field containing n_target_steps.\n self._sequence_length = tf.tile([n_target_steps], [self._batch_size])", "def _initialize_with_tune_context(self, context: \"TuneContext\") -> None:\n raise NotImplementedError", "def start(self):\n self.sess = tf.Session()\n tf.global_variables_initializer().run(session=self.sess)", "def __init__(self, camera, cats, dogs):\n super(CatsDogsPredictor, self).__init__(self)\n\n self.camera = camera\n self.dogs = dogs\n self.cats = cats\n\n # Get the model's input dimensions. We'll use this information later to\n # resize images appropriately.\n self.input_shape = self.GetInputShape()\n\n # Get the model-specific preprocessing metadata\n self.preprocessing_metadata = helpers.get_image_preprocessing_metadata(self)\n\n # Holds the image from the camera or other sources\n self.image = None", "def __init__(self, data=None, data_to_predict=None, target=None):\n self.data = data\n self.target_name = target\n self.model_dict = {'LinearRegression': lm.LinearRegression(),\n 'Lasso': lm.Lasso(),\n 'Ridge': lm.Ridge,\n 'RandomForestRegressor': en.RandomForestRegressor(),\n 'AdaBoostRegressor': en.AdaBoostRegressor(),\n 'GradientBoost': en.GradientBoostingRegressor(),\n 'BaggingRegressor': en.BaggingRegressor(),\n 'RandomForestClassifier': en.RandomForestClassifier()}\n self.features_ = []\n self.selected_features_ = []\n self.model = None\n self.cv_score_ = {}\n self.train_index = None\n self.test_index = None\n self.data_to_predict = data_to_predict\n self.predictions = None\n self.train_score_ = None\n self.test_score_ = None\n self.best_params_ = None\n # self.fill_models = {}\n # self.fill_features = {}", "def __init__(self):\n super().__init__()\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)\n self.sim_act_diff_mov_tf = SimilarActorsFromDiffMovies()", "def __init__(self):\n super().__init__()\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)\n self.sim_act_diff_mov_tf = SimilarActorsFromDiffMovies()", "def __init__(self, sess, network, learning_rate=0.1, discount_factor=0.99):\n self.sess = sess\n self.learning_rate = learning_rate\n self.discount_factor = discount_factor\n self.network = network\n self.defineUpdateOperations()\n self.init = tf.global_variables_initializer()\n self.initialize_variables()", "def __init__(self, params=None):\n if isinstance(params, SSDParams):\n self.params = params\n else:\n self.params = SSDNet.default_params\n # if cfgs.DATA_FORMAT == \"NHWC\":\n # self.images_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, None, None, 3],\n # name=\"input_images\")\n # else:\n # self.images_batch = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 3, None, None],\n # name=\"input_images\")\n\n # self.labels_batch = tf.placeholder(dtype=tf.int32, shape=[None, None, cfgs.NUM_CLASS+1], name=\"gt_labels\")\n # self.bboxes_batch = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 4), name=\"gt_bboxes\")\n # self.scores_batch = tf.placeholder(dtype=tf.float32, shape=(None, None, 1), name=\"gt_scores\")\n\n self.global_step = tf.train.get_or_create_global_step()", "def __init__(\n self,\n n_timesteps: int = 80,\n n_features: int = 5,\n n_LSTM_layers: int = 2,\n LSTM_size: int = 64,\n random_seed: Optional[int] = None\n ):\n\n self.n_timesteps = n_timesteps\n self.n_features = n_features\n self.random_seed = random_seed\n\n self.model = self._define_model(n_LSTM_layers, LSTM_size)", "def __init__(self,args,train=True,reuse=None,model=None):\n self.max_seq_len = args.max_seq_len\n self.vocab_size = args.vocab_size\n self.hidden_size = args.hidden_size\n\n initialize = model is None # whether to initialize variables\n\n # evice = \"/cpu:0\" if args.cpu else \"\"\n self.graph = tf.Graph() if model is None else model.graph\n self.session = tf.Session(graph=self.graph) \\\n if model is None else model.session\n\n with self.graph.as_default(),\\\n tf.variable_scope(\"LanguageModel\") as vs:\n self._seq = tf.placeholder(\n tf.int64,[None,self.max_seq_len])\n self._len = tf.placeholder(\n tf.int64,[None,])\n\n cell = tf.nn.rnn_cell.BasicLSTMCell(\n self.hidden_size,state_is_tuple=True)\n\n # Running RNN through sequence\n logit, _ = self.rnn_with_embedding(\n cell,None,self._seq, self._len,reuse=reuse)\n\n logit_list = tf.unpack(tf.transpose(logit,[1,0,2]))\n seq_list = tf.unpack(tf.transpose(self._seq,[1,0]))\n seq_list = seq_list[1:]\n\n xent = self.softmax_xent_loss_sequence(\n logit_list,seq_list,self._len,self.max_seq_len)\n\n self._cost = xent\n\n if train:\n log(vs.name+\"/Adding optimizer\")\n with tf.variable_scope(\"AdamOptimizer\"):\n optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)\n self._train_op = optimizer.minimize(self._cost)\n\n if initialize:\n log(vs.name+\"/Initializing variables\")\n self.session.run(tf.initialize_all_variables())\n\n log(\"Done with constructor.\")", "def __init__(self) :\n self.prediction_ = None", "def __init__(self) :\n self.prediction_ = None" ]
[ "0.6117408", "0.60252637", "0.5911408", "0.5851764", "0.5833206", "0.57801265", "0.5762533", "0.5741081", "0.57022196", "0.5699099", "0.569854", "0.5625027", "0.55992603", "0.5584619", "0.55735224", "0.55720514", "0.556565", "0.55640054", "0.55431277", "0.5542662", "0.5516902", "0.5500344", "0.54941684", "0.54816586", "0.5458162", "0.545317", "0.54511255", "0.5438116", "0.5419646", "0.5418444", "0.5408887", "0.5398406", "0.539672", "0.53800946", "0.5372955", "0.537064", "0.53167456", "0.53166497", "0.53130865", "0.52926034", "0.52833414", "0.52749515", "0.52708393", "0.52575696", "0.52317876", "0.5223583", "0.5216773", "0.51984143", "0.51984143", "0.5190326", "0.5179686", "0.5166907", "0.5159693", "0.5152436", "0.5130682", "0.51190174", "0.51182085", "0.51111424", "0.5110733", "0.51081973", "0.5107452", "0.5106996", "0.5094663", "0.5090766", "0.5089388", "0.50874406", "0.507483", "0.5070028", "0.5067287", "0.5061018", "0.50594276", "0.5056771", "0.5054885", "0.50518537", "0.50515616", "0.50484186", "0.5042316", "0.5040683", "0.5037691", "0.50375926", "0.5036522", "0.503619", "0.503358", "0.50247914", "0.5019352", "0.5015734", "0.50117123", "0.50048", "0.50044", "0.50028986", "0.50016284", "0.500016", "0.49948904", "0.49948904", "0.49939486", "0.49921173", "0.49836355", "0.49709532", "0.49576885", "0.49576885" ]
0.5464787
24
Load sample images for image manipulation. Loads both, ``china`` and ``flower``. Returns
def load_sample_images(): # Try to import imread from scipy. We do this lazily here to prevent # this module from depending on PIL. try: try: from scipy.misc import imread except ImportError: from scipy.misc.pilutil import imread except ImportError: raise ImportError("The Python Imaging Library (PIL) " "is required to load data from jpeg files") ROOT_Dir = os.getcwd() module_path = os.path.join(ROOT_Dir, "images") with open(os.path.join(module_path, 'README.txt')) as f: descr = f.read() filenames = [os.path.join(module_path, filename) for filename in os.listdir(module_path) if filename.endswith(".jpg")] # Load image data for each image in the source folder. images = [imread(filename) for filename in filenames] return Bunch(images=images, filenames=filenames, DESCR=descr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_with_val']:\n\n self._images_train, self._labels_train = [], []\n desc = \"Loading train image pairs & flows\"\n with tqdm(total=len(self._img_trn_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_trn_path):\n pbar.update(1)\n label_path = self._lbl_trn_path[n]\n image, label = self._load_sample(image_path, label_path)\n self._labels_train.append(label)\n self._images_train.append(image)\n\n if self.mode == 'train_with_val':\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n if self.opts['tb_test_imgs'] is True:\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))\n\n elif self.mode in ['val', 'val_notrain']:\n\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n elif self.mode == 'test':\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))", "def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")", "def load_test_images(images):\n loaded = {}\n for description, _ in images.items():\n loaded[description] = load_from_netcdf(description)\n return loaded", "def load_images(self):\n images_list = [os.path.join(self.root, image['file_name'])\n for image in self.data['images']]\n\n if self.shuffle:\n random.shuffle(images_list)\n images_list = images_list[:self.max_samples] if self.max_samples is not None and self.max_samples <= len(\n images_list) else images_list\n\n return images_list", "def load_scraped_food_images(ROOT):\n Xtr, Ytr = load_food_image_batch(os.path.join(ROOT, 'train'),50000)\n Xte, Yte = load_food_image_batch(os.path.join(ROOT, 'test'),10000)\n return Xtr, Ytr, Xte, Yte", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def init_datasets(self, display_samples = False):\n print(\"==> Loading images from \", self.img_dir)\n self.image_data_gen = ImageDataGenerator(\n rescale=1./255,\n #rotation_range=30,\n #shear_range=30,\n #width_shift_range=.15,\n #height_shift_range=.15,\n #zoom_range=0.5,\n validation_split=0.2)\n\n self.train_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='training')\n\n self.val_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='validation')\n\n if display_samples:\n self.display_sample_images()", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def load_images(file):\n\timage_list = [] # List for storing all the images\n\ttargets = []\n\t\n\tfor filename in glob.glob(file + '/*.png'):\n\t\t# ==================\n\t\t# Reading the image\n\t\t# ==================\n\t\timage = scipy.misc.imread(filename).astype(np.float32)\n\t\t\n\t\t# ================================\n\t\t# Converting the image to a vector\n\t\t# ================================\n\t\timage = image.flatten() # (784, )\n\t\t\n\t\t# ==============================\n\t\t# Normalizing the image to numpy\n\t\t# ==============================\n\t\timage = image / 255.0\n\t\timage = image - 0.5\n\t\timage = image * 2.0\n\t\t\n\t\t# ===============================\n\t\t# Appending the image to the list\n\t\t# ===============================\n\t\timage_list.append(image)\n\t\t\n\t\t_, value = filename.split('\\\\')\n\t\t# print(value[0])\n\t\ttargets.append(int(value[0]))\n\t\n\timage_list = np.array(image_list)\n\ttargets = np.array(targets)\n\t\n\t# ================================================\n\t# \t\t\tShuffling the data\n\t# ================================================\n\timage_list, targets = shuffle(image_list, targets)\n\t\n\ttrain_images, test_images, train_targets, test_targets = split(image_list, targets)\n\treturn train_images, test_images, train_targets, test_targets", "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def load_isolated_images(Args):\n # load first galaxy images\n name = 'first_gal_band_wldeb_noise.fits'\n filename = os.path.join(out_dir, Args.model, name)\n Y1 = load_images(filename, ['i'], Args)\n # load second galaxy images\n name = 'second_gal_band_wldeb_noise.fits'\n filename = os.path.join(out_dir, Args.model, name)\n Y2 = load_images(filename, ['i'], Args)\n Y = {'Y1': Y1,\n 'Y2': Y2}\n return Y", "def three_sample_images():\n samples = samples_path()\n _truck = np.array(Image.open(os.path.join(samples, \"truck.png\")))\n _deer = np.array(Image.open(os.path.join(samples, \"deer.png\")))\n _frog = np.array(Image.open(os.path.join(samples, \"frog.png\")))\n truck = transforms.ToTensor()(_truck)\n deer = transforms.ToTensor()(_deer)\n frog = transforms.ToTensor()(_frog)\n return torch.stack([truck, deer, frog])", "def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def loadImages(files, targets):\n images = []\n for file in files:\n targets.append(file)\n images.append(snd.imread(file))\n return images, targets", "def load_dataset():\n # Get the start time\n start_time = time.time()\n\n # Load dataset YAML file\n # This contains all of our image labels, as well as locations of the images themself\n print(\"Reading dataset/dataset.yaml... \", end=\"\")\n with open(\"dataset/dataset.yaml\", \"r\") as file:\n dataset = yaml.safe_load(file)\n\n # Get paths, labels\n paths = []\n labels = []\n for sample in dataset:\n # Assign a \"1\" label if we're looking at the ground\n # 0 for everything else: trees, buildings, cars, etc\n label_semantic = max(sample[\"labels\"].keys(), key=sample[\"labels\"].get)\n if max(sample[\"labels\"].values()) < 0.80:\n # Samples that are not obviously in any one category: unsafe\n label=0\n elif label_semantic == \"GROUND\":\n # Safe if >80% ground\n label = 1\n else:\n # Unsafe otherwise, this is usually water\n label = 0\n\n paths.append(sample[\"path\"])\n labels.append(label)\n print(\"done!\", flush=True)\n\n print(\"Loading images\", end=\"\")\n # Get images\n images = np.zeros((len(paths), 128, 128, 3), dtype=np.float32)\n progress = 0.0\n for i, path in enumerate(paths):\n images[i] = np.array(PIL.Image.open(path).resize((128, 128))) / 255.0\n if i / len(paths) > progress:\n progress += 1.0 / 20.0\n print(\".\", end=\"\", flush=True)\n print(\" done!\")\n labels = np.array(labels, dtype=np.int)\n\n # Return\n print(f\"Loaded {len(images)} images in {time.time() - start_time} seconds!\")\n return images, labels", "def load_sample_image(image_name):\n images = load_sample_images()\n index = None\n for i, filename in enumerate(images.filenames):\n if filename.endswith(image_name):\n index = i\n break\n if index is None:\n raise AttributeError(\"Cannot find sample image: %s\" % image_name)\n return images.images[index]", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def _load_images(paths):\n assert isinstance(paths, list)\n _R_MEAN = 123.68\n _G_MEAN = 116.78\n _B_MEAN = 103.94\n\n # allocate memory\n images = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 3],\n dtype=np.float32)\n\n # load all images\n pbar = ProgressBar(max_value=len(paths))\n for i in range(len(paths)):\n img = sio.imread(paths[i])\n\n # resize images\n img = sresize(img, (FLAGS.target_height, FLAGS.target_width, 3),\n mode='constant', preserve_range=True)\n\n # store images\n images[i] = img.astype(np.float32)\n pbar.update(i)\n\n # mean removal\n images -= [_R_MEAN, _G_MEAN, _B_MEAN]\n return images", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def load_images(path, p=1, feature=None, transform=None):\n\n images = os.listdir(path)\n images = random.sample(images, math.ceil(len(images) * p))\n\n loaded = [\n load_image(\n os.path.join(path, img),\n feature=feature, transform=transform)\n for img in images]\n\n return np.array([x for x in loaded if x is not None])", "def load_images(self):\n self.img_paths = sorted(glob(self.img_pattern))\n self.imgs = []\n for idx, this_path in enumerate(self.img_paths):\n try:\n this_img = cv2.imread(this_path)\n if self.downscale > 1:\n this_img = cv2.resize(this_img, (0, 0),\n fx=1/float(self.downscale),\n fy=1/float(self.downscale),\n interpolation=cv2.INTER_LINEAR)\n except Exception as e:\n print(\"error loading img: %s\" % (this_path))\n if this_img is not None:\n self.imgs.append(this_img)\n print(\"loaded img %d size=(%d,%d): %s\" %\n (idx, this_img.shape[0], this_img.shape[1], this_path))\n print(\"loaded %d images\" % (len(self.imgs)))", "def load_images(test_data_dir, image_size = (300, 300)):\n # loop over the input images\n images_data = []\n labels = []\n imagePaths = sorted(list(paths.list_images(test_data_dir)))\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, image_size)\n image = img_to_array(image)\n images_data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n return images_data, sorted(labels)", "def load_test_data(image_path):\n raw = []\n image_filename = dict()\n count = 0\n for filename in glob.glob(image_path):\n name = os.path.basename(filename)[:-4]\n try:\n im = Image.open(filename)\n im = im.convert('L')\n im = im.resize((img_rows, img_cols))\n raw.append(np.array(im))\n image_filename[count] = name\n count += 1\n im.close()\n except IOError:\n print('Error loading image ', filename)\n return [raw, image_filename]", "def loadDataset(dataset):\n # List of images.\n images = []\n\n\n\n # Read all filenames from the dataset.\n for filename in dataset:\n # Read the input image.\n image = cv2.imread(filename)\n\n # Add the current image on the list.\n if image is not None: \n images.append(image)\n else:\n print(\"Could not read file: {}\".format(filename))\n sys.exit()\n\n # Return the images list.\n return images", "def load_examples_data(dataset_name):\n dataset_name = dataset_name.strip().lower()\n if dataset_name.lower() not in ['pokemon', 'hanzi', 'animals', 'nsfw', 'simpsons', 'horse2zebra', 'people',\n 'autodrive', 'superresolution', 'anpr', 'beauty','antisproofing','facelandmarks','dogs-vs-cats','chinese']:\n raise ValueError('Not a valid dataset_name.')\n dataset_name = 'examples_' + dataset_name\n dirname = os.path.join(_trident_dir, dataset_name)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n is_internet_ok = is_connected()\n if dataset_name == 'examples_pokemon':\n is_download=download_file_from_google_drive('1U-xc54fX9j9BcidvRa0ow6qjssMlSF2A', dirname, 'pokemon.tar')\n tar_file_path = os.path.join(dirname, 'pokemon.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n extract_path = os.path.join(dirname, 'pokemon')\n dataset = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n print('get pokemon images :{0}'.format(len(dataset)))\n return dataset\n\n\n elif dataset_name == 'examples_hanzi':\n download_file_from_google_drive('13UEzSG0az113gpRPKPyKrIE2HDaA2P4H', dirname, 'hanzi.tar')\n tar_file_path = os.path.join(dirname, 'hanzi.tar')\n extract_path = os.path.join(dirname, 'hanzi')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, os.path.join(dirname, 'train'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset_test = load_folder_images(dataset_name, os.path.join(dirname, 'test'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset.testdata = dataset_test.traindata\n dataset.class_names['zh-cn'] = dataset.class_names['en-us']\n return dataset\n\n elif dataset_name == 'examples_animals':\n download_file_from_google_drive('19Cjq8OO6qd9k9TMZxlPjDpejDOdiHJoW', dirname, 'animals.tar')\n tar_file_path = os.path.join(dirname, 'animals.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, dirname, folder_as_label=True)\n return dataset\n elif dataset_name == 'examples_nsfw':\n tar_file_path = os.path.join(dirname, 'nsfw.tar')\n if os.path.exists(tar_file_path) and get_file_create_time(tar_file_path)<datetime.datetime(2021, 2, 20, 0, 0, 0).timestamp():\n os.remove(tar_file_path)\n if os.path.exists(os.path.join(dirname,'porn_detection_data.pkl')):\n os.remove(os.path.join(dirname,'porn_detection_data.pkl'))\n _delete_h(dirname)\n download_file_from_google_drive('1EXpV2QUrSFJ7zJn8NqtqFl1k6HvXsUzp', dirname, 'nsfw.tar')\n\n extract_path = os.path.join(dirname, 'nsfw')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n folders = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']\n data=unpickle(os.path.join(dirname,'porn_detection_data.pkl'))\n\n trainData = []\n testData = []\n trainLabel = []\n testLabel = []\n for n in range(5):\n folder=folders[n]\n trainData.extend(data[folder]['train'])\n trainLabel.extend([n]*len(data[folder]['train']))\n testData.extend(data[folder]['test'])\n testLabel.extend([n] * len(data[folder]['test']))\n\n trainarray = ImageDataset(trainData,object_type=ObjectType.rgb)\n trainlabel = LabelDataset(trainLabel,object_type=ObjectType.classification_label)\n train_iter = Iterator(data=trainarray, label=trainlabel)\n\n testarray = ImageDataset(testData,object_type=ObjectType.rgb)\n testlabel = LabelDataset(testLabel,object_type=ObjectType.classification_label)\n test_iter = Iterator(data=testarray, label=testlabel)\n print('training images: {0} test images:{1}'.format(len(trainarray), len(testarray)))\n\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n dataset.binding_class_names(['drawing', 'hentai', 'neutral', 'porn', 'sexy'], 'en-us')\n dataset.binding_class_names(['绘画', '色情漫画', '中性', '色情', '性感'], 'zh-cn')\n dataset.binding_class_names(['繪畫', '色情漫畫', '中性', '色情', '性感'], 'zh-tw')\n dataset.scenario = 'train'\n return dataset\n elif dataset_name == 'examples_simpsons':\n download_file_from_google_drive('1hGNFbfBv3EZ4nx4Qod6PtSYzO8H4QIxC', dirname, 'simpsons.tar')\n tar_file_path = os.path.join(dirname, 'simpsons.tar')\n extract_path = os.path.join(dirname, 'simpsons')\n extract_archive(tar_file_path, extract_path, archive_format='tar')\n data_provider = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n data_provider.traindata.unpair = RandomNoiseDataset(shape=(100), random_mode='normal')\n print('get simpsons images :{0}'.format(len(data_provider.traindata.data.items)))\n return data_provider\n elif dataset_name == 'examples_horse2zebra':\n download_file_from_google_drive('1pqj-T90Vh4wVNBV09kYZWgVPsZUA2f7U', dirname, 'horse2zebra.tar')\n tar_file_path = os.path.join(dirname, 'horse2zebra.tar')\n extract_path = os.path.join(dirname, 'horse2zebra')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n trainA = ImageDataset(list_images(os.path.join(dirname, 'trainA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n trainB = ImageDataset(list_images(os.path.join(dirname, 'trainB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testA = ImageDataset(list_images(os.path.join(dirname, 'testA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testB = ImageDataset(list_images(os.path.join(dirname, 'testB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n train_iter = Iterator(data=trainA, unpair=trainB)\n test_iter = Iterator(data=testA, unpair=testB)\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n print('get horse2zebra images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_people':\n download_file_from_google_drive('1H7mJJfWpmXpRxurMZQqY4N_UXWLbQ2pT', dirname, 'people.tar')\n tar_file_path = os.path.join(dirname, 'people.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'imgs', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs=list(sorted(imgs))\n masks = list(sorted(masks))\n # make_dir_if_need(os.path.join(dirname, 'trimap'))\n # for i in range(len(masks)):\n # mask=mask2array(masks[i])\n # trimap=mask2trimap(mask)\n # save_mask(trimap,masks[i].replace('masks','trimap'))\n # print('trimap',len(masks))\n\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb)\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.binary_mask)\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get people images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_autodrive':\n download_file_from_google_drive('1JqPPeHqhWLqnI6bD8nuHcVx-Y56oIZMK', dirname, 'autodrive.tar')\n tar_file_path = os.path.join(dirname, 'autodrive.tar')\n extract_path = os.path.join(dirname, 'autodrive')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'images', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs = list(sorted(imgs))\n masks = list(sorted(masks))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb,symbol='image')\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.color_mask,symbol='mask')\n\n def parse_code(l):\n if len(l.strip().split(\"\\t\")) == 2:\n a, b = l.replace('\\t\\t', '\\t').strip().split(\"\\t\")\n return tuple(int(i) for i in b.split(' ')), a\n\n label_codes, label_names = zip(\n *[parse_code(l) for l in open(os.path.join(dirname, \"label_colors.txt\")).readlines()])\n for i in range(len(label_codes)):\n mskdata.palette[label_names[i]] = label_codes[i]\n\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get autodrive images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_superresolution':\n download_file_from_google_drive('1v1uoymrWI_MLSiGvSGW7tWJYSnzzXpEQ', dirname, 'superresolution.tar')\n tar_file_path = os.path.join(dirname, 'superresolution.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs.extend(glob.glob(os.path.join(dirname, '*.bmp')))\n imgs = list(sorted(imgs))\n\n print('get super resolution images :{0}'.format(len(imgs)))\n\n imgdata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='lr')\n labeldata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='hr')\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=labeldata))\n return dataset\n elif dataset_name == 'examples_beauty':\n download_file_from_google_drive('1aJhxN9IqsxuayhRTm-gmxk6PiLe5wm9X', dirname, 'beauty.tar')\n tar_file_path = os.path.join(dirname, 'beauty.tar')\n\n extract_archive(tar_file_path, dirname, archive_format='tar')\n # 讀取圖片數據\n images_dict = {}\n with open(os.path.join(dirname, 'images_dict.pkl'), 'rb') as fp:\n images_dict = pickle.load(fp)\n\n f = open(os.path.join(dirname, 'All_Ratings.txt'), encoding='utf-8-sig').readlines()\n imgs = []\n landmarks = []\n ratings = []\n for row in f:\n data = row.strip().split('\\t')\n if 'images\\\\' + data[0] in images_dict:\n img = images_dict['images\\\\' + data[0]][0]\n img = img.transpose([2, 0, 1])[::-1].transpose([1, 2, 0])\n imgs.append(img)\n landmark = images_dict['images\\\\' + data[0]][1].astype(np.float32)\n landmarks.append(landmark)\n rating = (float(data[1])) / 5.00\n ratings.append(rating)\n print('{0} faces loaded...'.format(len(imgs)))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb, symbol='faces')\n landmarkdata = LandmarkDataset(landmarks=landmarks, object_type=ObjectType.landmarks, symbol='target_landmarks')\n labeldata = LabelDataset(data=ratings,object_type=ObjectType.classification_label, symbol='target_beauty')\n data_provider = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=Dataset.zip(landmarkdata,labeldata)))\n return data_provider\n\n elif dataset_name == 'examples_facelandmarks':\n download_file_from_google_drive('1GtswQBAHPa_bXaB4tW2uOOQ8Lxfz2L5B', dirname, 'ibug_300W.tar')\n tar_file_path = os.path.join(dirname, 'ibug_300W.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n root_dir=os.path.join(dirname, 'ibug_300W_large_face_landmark_dataset')\n image_paths = {}\n landmarks = {}\n crops = {}\n\n for mode in ['train','test']:\n make_dir_if_need(os.path.join(dirname, 'crops',mode))\n tree = ElementTree.parse(os.path.join(root_dir, 'labels_ibug_300W_{0}.xml'.format(mode)))\n root = tree.getroot()\n image_paths[mode]=[]\n landmarks[mode] = []\n crops[mode] = []\n\n offset=5\n for j in tqdm(range(len(root[2]))):\n try:\n filename=root[2][j]\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmark=np.asarray(landmark)\n\n crop = filename[0].attrib\n for k in crop.keys():\n crop[k] = int(crop[k]) if isinstance(crop[k], str) else crop[k]\n for k in crop.keys():\n if k=='top' and int(landmark[:,1].min())<int(crop[k]):\n crop[k] = int( landmark[:,1].min())\n crop[ 'height']+=crop[k]-int(landmark[:,1].min())\n elif k=='left' and int(landmark[:,0].min())<int(crop[k]):\n crop[k] = int( landmark[:,0].min())\n crop['width']+= crop[k] - int(landmark[:, 0].min())\n elif k == 'width' and int(landmark[:, 0].max()-landmark[:, 0].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 0].max()-landmark[:, 0].min())\n elif k == 'height' and int(landmark[:, 1].max()-landmark[:, 1].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 1].max()-landmark[:, 1].min())\n\n crop['left']-=offset\n crop['top'] -= offset\n crop['width'] += 2*offset\n crop['height'] += 2*offset\n\n\n landmark[:,0]-=crop['left']\n landmark[:, 1] -= crop['top']\n\n\n if not os.path.exists(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j))):\n im=image2array(os.path.join(root_dir, filename.attrib['file']))\n if im.ndim==2:\n im=cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)\n im=im[crop['top']:min(crop['top']+crop['height'],im.shape[0]),crop['left']:min(crop['left']+crop['width'],im.shape[1]),:]\n\n if max(im.shape[:2])/max(min(im.shape[:2]),0)<=5:\n\n array2image(im).save(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n del im\n else:\n #im = image2array(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n\n if j%100==0:\n gc.collect()\n except Exception as e:\n pass\n\n print('ibug 300w train dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['train']),len(landmarks['train'])))\n print('ibug 300w test dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['test']), len(landmarks['test'])))\n imdata=ImageDataset(images=image_paths['train'],symbol='faces',object_type=ObjectType.rgb)\n landmarkdata = LandmarkDataset(landmarks=landmarks['train'], symbol='landmarks',object_type=ObjectType.landmarks)\n imtestdata = ImageDataset(images=image_paths['test'], symbol='faces',object_type=ObjectType.rgb)\n landmarktestdata = LandmarkDataset(landmarks=landmarks['test'], symbol='landmarks',object_type=ObjectType.landmarks)\n data_provider=DataProvider(traindata=Iterator(data=imdata,label=landmarkdata),testdata=Iterator(data=imtestdata,label=landmarktestdata))\n return data_provider\n\n elif dataset_name == 'examples_antisproofing':\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name,os.path.join(dirname, 'antisproofing'))\n return data_provider\n elif dataset_name == 'examples_anpr':\n download_file_from_google_drive('1uGBd8tXlP0TZAXNgrR6H0jl5MXj7VPbN', dirname, 'anpr.tar')\n tar_file_path = os.path.join(dirname, 'anpr.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs = list(sorted(imgs))\n\n # CCPD (Chinese City Parking Dataset, ECCV) and PDRC (license Plate Detection and Recognition Challenge)\n # https://github.com/detectRecog/CCPD\n provinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\",\n \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'O']\n ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\n def lp2char(lp):\n cols = lp.split('_')\n charstring = ''\n for i in range(len(cols)):\n if i == 0:\n charstring += provinces[int(cols[i])]\n elif i == 1:\n charstring += alphabets[int(cols[i])]\n else:\n charstring += ads[int(cols[i])]\n return charstring\n\n width = 720\n height = 1160\n for im_path in imgs:\n lbl = im_path.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\n charstring = lp2char(lbl)\n iname = im_path.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\n box = [leftUp[0], leftUp[1], rightDown[0], rightDown[1]]\n ori_w, ori_h = [float(int(el)) for el in [width, height]]\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, os.path.join(dirname, 'antisproofing'))\n return data_provider\n\n\n\n elif dataset_name == 'examples_dogs-vs-cats':\n download_file_from_google_drive('10czW0On7eIXkPP-MuQ-IRxMWdTizWjNC', dirname, 'dogs-vs-cats.tar')\n tar_file_path = os.path.join(dirname, 'dogs-vs-cats.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, dirname)\n return data_provider\n elif dataset_name == 'examples_chinese':\n to_half=ToHalfWidth()\n to_sc=ChineseConvert(convert_to='simplified')\n download_file_from_google_drive('1yzRzXpLuhSUxnixqCgpbdTk16ajnTEWF', dirname, 'chinese.tar')\n tar_file_path = os.path.join(dirname, 'chinese.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n\n as_train = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_training.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000' ,'|'))).splitlines()\n cityu_train =remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_training.utf8'), encoding='utf-8-sig').read().strip().replace(' ','|'))).splitlines()\n\n as_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_testing_gold.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000', '|'))).splitlines()\n cityu_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_test_gold.utf8'), encoding='utf-8-sig').read().strip().replace(' ', '|'))).splitlines()\n\n\n data = as_train + cityu_train # 把兩個語料合併\n test_data=as_test + cityu_test # 把兩個語料合併\n\n\n raw_data_train = [row.strip('\\n').strip('\\r') for row in data] # 移除分行字元\n raw_data_test = [row.strip('\\n').strip('\\r') for row in test_data] # 移除分行字元\n\n process_data_train=[]\n process_seg_label_train = []\n process_simplifided_label_train = []\n process_traditional_label_train = []\n\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n for k in tqdm(range(len(raw_data_train))):\n row=raw_data_train[k]\n if row.startswith('∥'):\n row=row[1:]\n words=row.replace('||','|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_train.append(word[i])\n #tmp_simplifided_label_train.append(to_half(to_sc(word[i])))\n #轉換為BMES\n\n if i==0 and len(word)>1: #B 是一個詞的開始\n tmp_seg_label_train.append('B')\n elif i==len(word)-1 and len(word)>=2 and tmp_seg_label_train[-1] in ['B','M']: #E 是一個詞的結束\n tmp_seg_label_train.append('E')\n elif len(word)==1 and i==0: #S 自己就是一個單詞\n tmp_seg_label_train.append('S')\n elif len(word)>=3 and tmp_seg_label_train[-1] in ['B','M']: #M 是一個詞的中間\n tmp_seg_label_train.append('M')\n\n if len(tmp_seg_label_train)>0 and tmp_seg_label_train[-1] in ['E','S']:\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)) and k+1<len(words):\n if word in [ '。','﹖']:\n pass\n\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_train.append(' ')\n tmp_seg_label_train.append('S')\n\n if (k+1<len(raw_data_train) and not raw_data_train[k+1].startswith( '」')) and words[-1] in [ '。','﹖']:\n #process_traditional_label_train.append(tmp_data_train)\n\n tmp_data_train=to_half(''.join(tmp_data_train))\n tmp_seg_label_train = ''.join(tmp_seg_label_train)\n # if len(tmp_data_train)!=len(tmp_seg_label_train):\n # print('')\n tmp_simplifided_label_train =to_sc(tmp_data_train)\n\n process_data_train.append(tmp_data_train)\n process_seg_label_train.append(tmp_seg_label_train)\n process_simplifided_label_train.append(tmp_simplifided_label_train)\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n # else:\n # tmp_data_train.append('\\n')\n # tmp_simplifided_label_train.append('\\n')\n # tmp_seg_label_train.append('\\n')\n corpus=process_data_train\n seg_corpus=process_seg_label_train\n simplifided_corpus =process_simplifided_label_train\n\n process_data_test = []\n process_seg_label_test = []\n process_simplifided_label_test = []\n process_traditional_label_test = []\n print('generate test labels')\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n for k in tqdm(range(len(raw_data_test))):\n row=raw_data_test[k]\n if row.startswith('∥'):\n row=row[1:]\n words = row.replace('||', '|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_test.append(word[i])\n # tmp_simplifided_label_test.append(to_half(to_sc(word[i])))\n # 轉換為BMES\n\n if i == 0 and len(word) > 1: # B 是一個詞的開始\n tmp_seg_label_test.append('B')\n elif i == len(word) - 1 and len(word) >= 2 and tmp_seg_label_test[-1] in ['B', 'M']: # E 是一個詞的結束\n tmp_seg_label_test.append('E')\n elif len(word) == 1 and i == 0: # S 自己就是一個單詞\n tmp_seg_label_test.append('S')\n elif len(word) >= 3 and tmp_seg_label_test[-1] in ['B', 'M']: # M 是一個詞的中間\n tmp_seg_label_test.append('M')\n\n if len(tmp_seg_label_test) > 0 and tmp_seg_label_test[-1] in ['E', 'S'] and k+1<len(words):\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)):\n if word in ['。', '﹖']:\n pass\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_test.append(' ')\n tmp_seg_label_test.append('S')\n\n if (k + 1 < len(raw_data_test) and not raw_data_test[k + 1].startswith('」')) and words[-1] in ['。', '﹖']:\n # process_traditional_label_test.append(tmp_data_test)\n\n tmp_data_test = to_half(''.join(tmp_data_test))\n tmp_seg_label_test = ''.join(tmp_seg_label_test)\n # if len(tmp_data_test)!=len(tmp_seg_label_test):\n # print('')\n tmp_simplifided_label_test = to_sc(tmp_data_test)\n\n process_data_test.append(tmp_data_test)\n process_seg_label_test.append(tmp_seg_label_test)\n process_simplifided_label_test.append(tmp_simplifided_label_test)\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n # else:\n # tmp_data_test.append('\\n')\n # tmp_simplifided_label_test.append('\\n')\n # tmp_seg_label_test.append('\\n')\n test_corpus = process_data_test\n test_seg_corpus = process_seg_label_test\n test_simplifided_corpus = process_simplifided_label_test\n\n\n data=TextSequenceDataset(corpus=corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_label = TextSequenceDataset(corpus=seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_label = TextSequenceDataset(corpus=simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_label = TextSequenceDataset(corpus= copy.deepcopy(corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n data_test=TextSequenceDataset(corpus=test_corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_test_label = TextSequenceDataset(corpus=test_seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_test_label = TextSequenceDataset(corpus=test_simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_test_label = TextSequenceDataset(corpus= copy.deepcopy(test_corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n\n chars = list(sorted(set(list( ''.join(corpus) +bpmf_phonetic+'\\n\\r\\t∥'+ ''.join(simplifided_corpus)+''.join(test_data)))))\n chars.insert(0, '[CLS]')\n chars.insert(1, '[SEP]')\n chars.insert(2, '[UNK]')\n chars.insert(3, '[PAD]')\n chars.insert(4, '[MASK]')\n\n data.vocabs =data_test.vocabs=simplifided_label.vocabs=simplifided_test_label.vocabs = chars\n data.text2index=data_test.text2index =simplifided_label.text2index=simplifided_test_label.text2index = dict((c, i) for i, c in enumerate(chars))\n data.index2text =data_test.index2text =simplifided_label.index2text=simplifided_test_label.index2text= dict((i, c) for i, c in enumerate(chars))\n traditional_label = copy.deepcopy(data)\n traditional_test_label = copy.deepcopy(data_test)\n traditional_label.object_type =traditional_test_label.object_type = ObjectType.sequence_label\n traditional_label.symbol =traditional_test_label.symbol = 'traditional_label'\n\n mask_label = copy.deepcopy(data)\n mask_test_label = copy.deepcopy(data_test)\n #mask_label.object_type =mask_test_label.object_type= ObjectType.corpus\n mask_label.symbol = mask_test_label.symbol = 'mask_label'\n\n\n\n nextword=copy.deepcopy(data)\n nextword_test = copy.deepcopy(data_test)\n nextword.object_type=nextword_test.object_type=ObjectType.sequence_label\n nextword.symbol=nextword_test.symbol='nextword_label'\n nextword.sequence_offset=nextword_test.sequence_offset=1\n\n label=ZipDataset(seg_label,nextword,simplifided_label,traditional_label,mask_label)\n label_test = ZipDataset(seg_test_label, nextword_test, simplifided_test_label, traditional_test_label, mask_test_label)\n provider=TextSequenceDataProvider(\n traindata=Iterator(data=data,label=label),\n testdata=Iterator(data=data_test,label=label_test))\n return provider\n #,sample_filter=lambda x:x[0][-1]==3\n else:\n return None", "def loadImages(self):\n for map_name, img in self.maps.items():\n if img is None or map_name not in __class__.input_tr:\n continue\n getCyclesImage(img)", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def load_images(self, tmx):\n for image_data in tmx.images:\n if image_data:\n image, _, _ = image_data\n self.load_image(image)", "def load_images(image_name_to_label):\n images = []\n labels = []\n\n image_names = os.listdir(DEFAULT_IMG_PATH_EDITED)\n\n # Remove directories\n image_names.remove(\"COVID-19\")\n image_names.remove(\"Normal\")\n image_names.remove(\"ViralPneumonia\")\n\n # Load images from specific image directories (COVID-19, normal, viral pneumonia)\n def load_directory(directory):\n notifier.send(\" Loading from directory: \" + directory + \"...\")\n directory_path = DEFAULT_IMG_PATH_EDITED + os.sep + directory\n directory_image_names = os.listdir(directory_path)\n for i, image_name in enumerate(directory_image_names):\n base_image_name = get_base_image_name(image_name)\n query_name = directory + \"/\" + base_image_name\n query_name = query_name.lower().replace(\" \", \"\")\n if query_name in image_name_to_label:\n print(f\" {i / len(directory_image_names) * 100}% - [{image_name}]\")\n image_path = directory_path + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[query_name])\n load_directory(\"COVID-19\")\n load_directory(\"Normal\")\n load_directory(\"ViralPneumonia\")\n\n # Load images from default directory\n if LOAD_ALL_IMAGES:\n notifier.send(\" Loading from directory: default...\")\n for i, image_name in enumerate(image_names):\n base_image_name = get_base_image_name(image_name)\n if base_image_name in image_name_to_label:\n print(f\" {i / len(image_names) * 100}% - [{image_name}]\")\n image_path = DEFAULT_IMG_PATH_EDITED + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[base_image_name])\n\n return images, labels", "def load_test(filenames, short=600, max_size=1000, mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225)):\n if isinstance(filenames, str):\n filenames = [filenames]\n imgs = [cv2.cvtColor(cv2.imread(f), cv2.COLOR_BGR2RGB) for f in filenames]\n return transform_test(imgs, short, max_size, mean, std)", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True", "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True", "def load_base_images(base_img):\n if base_img is not None:\n if not os.path.exists(base_img):\n base_img = os.path.join(LIGHTHOUSES_DIR, base_img)\n return (\n Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'),\n Image.open(os.path.join(base_img, 'off.gif'))\n )\n return None, None", "def load_images(files, open_fn=None):\n if open_fn is None:\n import cv2\n open_fn = cv2.imread\n images = list()\n for _file in files:\n images.append(np.asarray(open_fn(_file)))\n return images", "def load_data(path):\n\n\t# Create a list of all files ending in .jpg\n\tim_list = list_images(path, '.jpg')\n\n\t# Create labels\n\tlabels = [int(im_name.split('/')[-1][0]) for im_name in im_list]\n\tfeatures = []\n\n\t# Create features from the images\n\t# TOD.O: iterate over images paths\n\tfor im_path in im_list:\n\t\t# TOD.O: load image as a gray level image\n\t\tim = np.array(Image.open(im_path).convert('L'))\n\t\t# TOD.O: process the image to remove borders and resize\n\t\tim = process_image(im)\n\t\t# TOD.O: append extracted features to the a list\n\t\tfeatures.append(extract_features(im))\n\n\t# TOD.O: return features, and labels\n\treturn features, labels", "def test_load_jpg():\n parameters = {'path': 'green-dot.jpg'}\n\n images.load(parameters)", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_source_png_images(self, num_slice):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n data = [] \n for l in self.locations.LABELS:\n slice_file = self.locations.SOURCE_PNG % (l, num_slice)\n \n #print 'Loading Input Image \\t\\t%s'%slice_file \n slice_data = misc.imread(slice_file) \n data.append(slice_data)\n \n return data #images in the same order as labels", "def sample_images(opt, batches_done, monet_dataloader, photo_dataloader):\n G_AB.eval()\n G_BA.eval()\n real_A = next(iter(monet_dataloader))[0].cuda()\n fake_B = G_AB(real_A)\n real_B = next(iter(photo_dataloader))[0].cuda()\n fake_A = G_BA(real_B)\n # Arange images along x-axis\n real_A = make_grid(real_A, nrow=opt.batch_size, normalize=True)\n real_B = make_grid(real_B, nrow=opt.batch_size, normalize=True)\n fake_A = make_grid(fake_A, nrow=opt.batch_size, normalize=True)\n fake_B = make_grid(fake_B, nrow=opt.batch_size, normalize=True)\n # Arange images along y-axis\n image_grid = torch.cat((real_A, fake_B, real_B, fake_A), 1)\n save_image(image_grid, \"%s/%s/images/%s.png\" % (opt.checkpoints_dir, opt.name, batches_done), normalize=False)", "def load_images(self, target = \"standard\", path=OMNIGLOT_DATAPATH):\n X = []\n Y = []\n folderName = {}\n if target == \"standard\":\n trainFolders = [\"images_background\"]\n testFolders = [\"images_evaluation\"]\n elif target == \"minimal\":\n trainFolders = [\"images_background_small1\", \"images_background_small2\"]\n testFolders = [\"images_evaluation\"]\n \n if self.train:\n for trainFolder in trainFolders:\n folderPath = os.path.join(path, trainFolder)\n imgAllCount = 0 # this is counted for the whole images in all alphabet\n chaAllCount = 0 # this is counted for the whole characters in all alphabet\n\n for alphabet in sorted(os.listdir(folderPath)):\n alphabetPath = os.path.join(folderPath, alphabet)\n folderName[alphabet] = {'totalChar': 0, 'charIndex': [], 'totalImg': 0, 'imgIndex': []}\n \n imgAlphabetCount = 0 # this is counted for the number of images in this alphabet\n chaAlphabetCount = 0 # this is counted for the number of character in this alphabet\n\n folderName[alphabet]['charIndex'].append(chaAllCount)\n folderName[alphabet]['imgIndex'].append(imgAllCount)\n \n for letter in sorted(os.listdir(alphabetPath)):\n letterPath = os.path.join(alphabetPath, letter)\n \n for letterImage in os.listdir(letterPath):\n imagePath = os.path.join(letterPath, letterImage)\n image = mpimg.imread(imagePath)\n X.append(image)\n Y.append(chaAllCount)\n \n imgAlphabetCount += 1\n imgAllCount += 1\n\n chaAlphabetCount += 1\n chaAllCount += 1\n \n folderName[alphabet]['totalChar'] = chaAlphabetCount\n folderName[alphabet]['totalImg'] = imgAlphabetCount\n folderName[alphabet]['charIndex'].append(chaAllCount-1)\n folderName[alphabet]['imgIndex'].append(imgAllCount-1)\n \n X = np.stack(X) \n X = X.reshape(-1, IMAGES_PER_CHARACTER, X.shape[1], X.shape[2])\n return X, np.stack(Y), folderName", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n # all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n # test_files = [all_files[idx] for x in np.random.choice(len(all_files), 200, replace=False)]\n # for filepath in test_files:\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(self, image_paths):\n \n fill_list = []\n \n for idx in tqdm(range(len(image_paths))):\n path = image_paths[idx]\n yield cv2.imread(path)", "def load_dataset(data_dir, img_size):\n global input_set\n global test_set\n\n imgs = []\n img_files = os.listdir(data_dir)\n for img in img_files:\n # try:\n tmp = scipy.misc.imread(data_dir + \"/\" + img)\n x, y, z = tmp.shape # shape : width * length * chanel\n coords_x = int(x / img_size) # 坐标\n coords_y = int(y / img_size) #\n coords = [(q, r) for q in range(coords_x) for r in range(coords_y)] # 列表 x * y\n for coord in coords:\n imgs.append((data_dir + \"/\" + img, coord)) # 为列表添加文件目录\n # except BaseException:\n # print(\"oops\")\n test_size = min(10, int(len(imgs) * 0.2))\n random.shuffle(imgs)\n test_set = imgs[:test_size]\n train_set_X = imgs[test_size:][:200]\n train_set = imgs[test_size:][200:400]\n return", "def load_images(self, image_path):\n X_train = []\n\n # Load all files from the image path using Image.open.\n for i in recursive_list(image_path):\n # Open images as ???\n img = Image.open(i)\n # Convert to NP array.\n img = np.asarray(img)\n # Append them into higher order array.\n if img.shape == (128, 128, 3):\n X_train.append(img)\n\n # return all the images concatenated as a 4D array\n return np.asarray(X_train)", "def loadImages(loadPath):\n img_array = []\n for filename in glob.glob(loadPath):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n img_array.append(img)\n\n return img_array", "def load_groundtruths(folder_path, num_images):\n imgs = []\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n # See if it is better to use dtype = int\n hot_img = convert_image_to_hot(img)\n imgs.append(hot_img)\n else:\n print('File ' + image_path + ' does not exist')\n #imgs = np.around(imgs) # Uncomment if we want to round values.\n imgs_array = np.asarray(imgs)\n return imgs_array", "def load_background_image_files(self, folder_path):\n if self.is_binary:\n print(':WARNING: background image files are not loaded for binary '\n 'generation mode.')\n else:\n self.augment.add_background_image_noises(folder_path)", "def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels", "def _load_sample(self, image_path=None, label_path=None, preprocess=True, as_tuple=False, is_training=False):\n # Read in RGB image, if any\n if image_path:\n image1, image2 = imread(image_path[0]), imread(image_path[1])\n assert(len(image1.shape) == 3 and image1.shape[2] == 3 and len(image2.shape) == 3 and image2.shape[2] == 3)\n\n # Read in label, if any\n if label_path:\n label = flow_read(label_path)\n assert (len(label.shape) == 3 and label.shape[2] == 2)\n else:\n label = None\n\n # Return image and/or label\n if label_path:\n if image_path:\n if as_tuple:\n return (image1, image2), label\n else:\n return np.array([image1, image2]), label\n else:\n return label\n else:\n if image_path:\n if as_tuple:\n return (image1, image2)\n else:\n return np.array([image1, image2])", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n if(FLAGS.checkpoint_file_name==\"vgg_16.ckpt\")or(FLAGS.checkpoint_file_name==\"vgg_19.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_50.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_101.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_152.ckpt\"):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float)\n images[idx, :, :, :] = image\n else:\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load(self,train_ratio=.9,seed=123):\n\t\tdata_dir = cfg.DATA_DIR\n\n\t\tif isinstance(DATASETS.get(self.data_name),dict):\n\t\t\t\n\t\t\tif len(DATASETS.get(self.data_name))==2: # Relevant only for MNIST\n\t\t\t\ttrain_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('train'))\n\t\t\t\ttest_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('test'))\n\t\t\t\n\t\t\telse: # Only relevant for binarized MNIST\n\t\t\t\ttrain_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('train'))\n\t\t\t\ttest_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('test'))\n\t\t\t\tvalid_fpath = os.path.join(data_dir,DATASETS.get(self.data_name).get('valid'))\n\t\telse:\n\t\t\tfpath = os.path.join(data_dir,DATASETS.get(self.data_name))\n\n\t\tprint(f\"Trying to load {self.data_name} from directory(ies):\")\n\t\t\n\t\tif self.data_name == \"freyfaces\":\n\t\t\t# Load freyfaces\n\t\t\tprint(f\"...from {os.path.join(data_dir,fpath)}\")\n\t\t\tf = open(fpath,'rb')\n\t\t\tdata = pickle.load(f,encoding='latin1')\n\t\t\tf.close()\n\n\t\t\t# This block is directly from the VRBound repository\n\t\t\tnp.random.seed(seed)\n\t\t\tnp.random.shuffle(data)\n\t\t\tif train_ratio==1 or (0>train_ratio or 1<train_ratio):\n\t\t\t\tprint(f\"Train split ratio {train_ratio} or test value is invalid!\")\n\t\t\t\traise Exception\n\t\t\tnum_train = int(train_ratio* data.shape[0])\n\n\t\t\tdata_train = data[:num_train]\n\t\t\tdata_test = data[num_train:]\n\t\t\t# End of copy\n\n\t\telif self.data_name == \"silhouettes\":\n\t\t\t# Load silhouettes data\n\t\t\tprint(f\"...from {os.path.join(data_dir,fpath)}\")\n\n\t\t\t# These lines are also from VRBound repository\n\t\t\tdata = loadmat(fpath) \n\t\t\tdata = 1-data.get('X')\n\n\t\t\t# This block is directly from the VRBound repository\n\t\t\tnp.random.seed(seed)\n\t\t\tnp.random.shuffle(data)\n\t\t\tif train_ratio==1 or (0>train_ratio or 1<train_ratio):\n\t\t\t\tprint(f\"Train split ratio {train_ratio} or test value is invalid!\")\n\t\t\t\traise Exception\n\t\t\tnum_train = int(train_ratio* data.shape[0])\n\n\n\t\t\tdata_train = data[:num_train]\n\t\t\tdata_test = data[num_train:]\n\t\t\t# End of copy\n\n\t\telif self.data_name == \"mnist\":\n\t\t\tprint(\"MNIST data is already train/test split - training ratio input ignored!\")\n\t\t\tprint(f\"...from {os.path.join(data_dir,DATASETS.get(self.data_name)['train'])}\")\n\n\t\t\tdata_train, _ = MNIST(train_fpath).load_training() # We don't care about what the labels are; overwrite\n\t\t\tdata_test, _ = MNIST(test_fpath).load_testing()\n\n\t\telif self.data_name == \"mnist_binary\":\n\t\t\tprint(\"MNIST data is already train/test split - training ratio input ignored!\")\n\t\t\tprint(f\"...from {os.path.join(train_fpath.split('/')[-2])}\")\n\t\t\t# This is directly from the iwae codebase\n\t\t\tdef lines_to_np_array(lines):\n\t\t\t return np.array([[int(i) for i in line.split()] for line in lines])\n\t\t\twith open(train_fpath) as f:\n\t\t\t lines = f.readlines()\n\t\t\ttrain_data = lines_to_np_array(lines).astype('float32')\n\t\t\twith open(test_fpath) as f:\n\t\t\t lines = f.readlines()\n\t\t\tvalidation_data = lines_to_np_array(lines).astype('float32')\n\t\t\twith open(valid_fpath) as f:\n\t\t\t lines = f.readlines()\n\t\t\tdata_test = lines_to_np_array(lines).astype('float32')\n\n\t\t\tdata_train= np.concatenate([train_data, validation_data], axis=0)\n\n\t\telif self.data_name == \"omniglot\":\n\t\t\tprint(f\"...from {os.path.join(data_dir,fpath)}\")\n\t\t\tprint(\"Omniglot data is already train/test split - training ratio input ignored!\")\n\n\t\t\tdata = loadmat(fpath)\n\n\t\t\t# From iwae repository\n\t\t\tdata_train = data['data'].T.astype('float32').reshape((-1, 28, 28)).reshape((-1, 28*28), order='F') \n\t\t\tdata_test = data['testdata'].T.astype('float32').reshape((-1, 28, 28)).reshape((-1, 28*28), order='F')\n\t\t\n\t\treturn data_train, data_test", "def _init_img_dataset(self, dataset_path):\n\n # ==\n # Define the classes used in the various states\n # form: (state class : cifar label class)\n class_dict = {\n 'initial': 'automobile',\n 'choice_1': 'dog',\n 'choice_2': 'cat',\n 'corridor': 'bird',\n }\n\n # ==\n # Download / initialize dataset\n ds = CIFAR10(dataset_path, train=self.training,\n download=True)\n\n # Get the CIFAR class index for each of the state classes\n cifar_class_dict = {\n k: ds.class_to_idx[class_dict[k]] for k in class_dict\n }\n\n # Iterate over the CIFAR dataset and get the idxs to each class\n cifar_indexes = {k: [] for k in class_dict}\n for i in range(len(ds)):\n cur_cifar_class = ds[i][1]\n for k in class_dict:\n if cur_cifar_class == cifar_class_dict[k]:\n cifar_indexes[k].append(i)\n\n # Manually sub-sample choice classes\n for k in ['choice_1', 'choice_2']:\n n_imgs = min(self.num_ds_imgs, len(cifar_indexes[k]))\n rng = np.random.default_rng()\n choice_imgs = rng.choice(cifar_indexes[k], size=n_imgs,\n replace=False)\n cifar_indexes[k] = choice_imgs\n\n # Manually shuffle the corridor class\n rng = np.random.default_rng()\n corri_img_shufIdxs = rng.choice(cifar_indexes['corridor'],\n size=len(cifar_indexes['corridor']),\n replace=False)\n cifar_indexes['corridor'] = corri_img_shufIdxs\n\n # ==\n # Construct the data subset dictionary\n ds_dict = {}\n for k in class_dict:\n ds_dict[k] = Subset(ds, cifar_indexes[k])\n\n return ds_dict", "def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))", "def load_gray(self, **kwargs):\n print('Loading monochrome images from sequence ' +\n self.sequence + '...')\n\n imL_path = os.path.join(self.sequence_path, 'image_0', '*.png')\n imR_path = os.path.join(self.sequence_path, 'image_1', '*.png')\n\n imL_files = sorted(glob.glob(imL_path))\n imR_files = sorted(glob.glob(imR_path))\n\n # Subselect the chosen range of frames, if any\n if self.frame_range:\n imL_files = [imL_files[i] for i in self.frame_range]\n imR_files = [imR_files[i] for i in self.frame_range]\n\n print('Found ' + str(len(imL_files)) + ' image pairs...')\n\n self.gray = utils.load_stereo_pairs(imL_files, imR_files, **kwargs)\n\n print('done.')", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def __initDataFromImages(self):\n #Check if the local_db exist\n initial_dirs = os.listdir(os.getcwd())\n is_db_empty = False\n if len(os.listdir(self.base_dir)) == 1: #Empty here means no person data\n [images_dir] = os.listdir(self.base_dir)\n is_db_empty = images_dir == cfg.local[\"IMG_DIR\"]\n if cfg.local[\"DEFAULT_IMGS_DIR\"] in initial_dirs and is_db_empty:\n default_path = os.path.join(os.getcwd(), cfg.local[\"DEFAULT_IMGS_DIR\"])\n self.X, self.y = loadDataFromImagesPath(self.detector, default_path)\n self.le = LabelEncoder()\n #Nothing relate to mapping name to dir here, we don't care about\n #This data because of the user doesn't exist in the database\n self.__savePreProcessedData()", "def load_image(self, image_index):\n\t\t\timage_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n\t\t\tpath = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])\n\t\t\treturn read_image_bgr(path)", "def from_images(cls, specimen_id, image_files):\n views = [LoadingView(v, img) for v, img in image_files]\n return cls(specimen_id, views)", "def explore_data():\n labels = [\"vehicles\", \"non-vehicles\"]\n labelmap = {0: \"vehicles\", 1: \"non-vehicles\"}\n vehicles_glob = os.path.join(data_dir, \"vehicles\", \"**\", \"*.png\")\n nonvehicles_glob = os.path.join(data_dir, \"non-vehicles\", \"**\", \"*.png\")\n class_fnames = [\n glob.glob(vehicles_glob, recursive = True),\n glob.glob(nonvehicles_glob, recursive = True)]\n n_samples = [len(fnames) for fnames in class_fnames]\n shapes = []\n samples = []\n print(table_format([\"label\", \"size\", \"shape\"], header = True))\n for label, fnames in enumerate(class_fnames):\n indices = np.random.choice(len(fnames), 4*10, replace = False)\n for i in indices:\n fname = fnames[i]\n img = cv2.imread(fname)\n samples.append(img)\n shape = img.shape\n shapes.append(shape)\n print(table_format([labels[label], n_samples[label], shapes[label]]))\n\n samples = np.stack(samples)\n samples = tile(samples, 2*4, 10)\n cv2.imwrite(os.path.join(out_dir, \"datasamples.png\"), samples)\n\n return class_fnames, labelmap", "def load_nist(block=0):\n dataset_directory = os.path.join(root_directory,'NIST19')\n if(block == 2):\n bitmaps = np.load(dataset_directory+'/train_nist19_bitmaps_lim10000.npz')['bitmaps']\n targets = np.load(dataset_directory+'/train_nist19_targets_lim10000.npz')['targets']\n names = np.load(dataset_directory+'/train_nist19_names_lim10000.npz')['names']\n else:\n if(block == 1):\n bitmaps = np.load(dataset_directory+'/train_nist19_bitmaps_1000.npz')['bitmaps']\n targets = np.load(dataset_directory+'/train_nist19_targets_1000.npz')['targets']\n names = np.load(dataset_directory+'/train_nist19_names_1000.npz')['names']\n else:\n directory_class = os.path.join(dataset_directory,'by_class')\n list_of_class = os.listdir(directory_class)\n list_of_class.sort()\n bitmaps = [];\n targets = [];\n names = [];\n id_class = 0;\n\n for a_class in list_of_class:\n directory_subclass = os.path.join(directory_class,a_class)\n list_of_subclass = [fn for fn in os.listdir(directory_subclass) if (\"train\" in fn)]\n sorted(list_of_subclass)\n for folder_of_images in list_of_subclass:\n directory_of_images = os.path.join(directory_subclass,folder_of_images)\n list_of_images = os.listdir(directory_of_images)\n sorted(list_of_images)\n for filename in list_of_images:\n img = np.where(io.imread(os.path.join(directory_of_images,filename),True) > 0, 0, 1)\n bitmaps.append(Bitmap(img.astype(np.int8)))\n targets.append(id_class);\n id_class += 1\n names.append(binascii.unhexlify(a_class).decode('UTF-8'));\n\n\n return {'bitmaps': bitmaps, 'targets': targets, 'names':names}", "def GenerateImageSamples(self):\n self.generateImageSamples = GenerateImageSamples(self.trainDataDir,\n self.testDataDir,\n self.trainClassDir,\n self.testClassDir,\n self.cfgData)\n\n self.generateImageSamples.LoadDataSave(self.trainDataDir, 'train')\n # self.generateImageSamples.CopyFiles(self.trainClassDir)\n\n self.generateImageSamples.LoadDataSave(self.testDataDir, 'test')\n # self.generateImageSamples.CopyFiles(self.testClassDir)", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_svhn_images(folder_path):\n images = []\n for file in os.listdir(folder_path):\n if file.endswith(\".png\"):\n image = Image.open(file)\n image.load()\n # Load image data as 1 dimensional array\n # We're using float32 to save on memory space\n feature = np.array(image, dtype=np.float32)\n images.append(feature)\n\n return images", "def load_images(image_filename):\n\n # Write code here to loop over image data and populate DB.", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def load_fashion_mnist():\n # List of image file names\n dataset_directory = os.path.join(root_directory,'Fashion_MNIST')\n filenames = os.listdir(dataset_directory)\n filenames.sort()\n\n # List of numpy array; each row is a Image of the dataset\n data = []\n\n # Numpy array of labels associated to each class of image\n target = np.empty([len(filenames), ])\n\n previous_label = ''\n class_num = -1\n index = 0\n\n for index, filename in enumerate(filenames):\n data.append(Bitmap(io.imread(os.path.join(dataset_directory, filename))))\n file_label = filename.split('-')[0]\n\n if(previous_label != file_label):\n previous_label = file_label\n class_num += 1\n target[index] = class_num\n else:\n target[index] = class_num\n\n return {'bitmaps': data, 'targets': target}", "def load_images(input_dir, batch_shape=[2000,299,299,3]):\n \n filenames = []\n idx = 0\n filepaths=tf.gfile.Glob(os.path.join('./', '*.png'))\n print(len(filepaths))\n print(filepaths)\n batch_shape[0]=len(filepaths)\n batch_size = batch_shape[0]\n print(batch_shape)\n print(\"ZZZ\")\n images = np.zeros(batch_shape, dtype=np.float32)\n \n for filepath in filepaths:\n# with tf.gfile.Open(filepath) as f:\n# image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n image = np.array(scipy.misc.imresize(scipy.misc.imread(filepath),(299,299)),dtype=np.float32)/255\n \n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image -0.5 #* 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n return filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n return filenames, images", "def sample_images(images, n=1000):\n ix = np.random.choice(np.arange(len(images)), size=n, replace=False)\n sample = labels.loc[ix, [\"img_name\", \"breed\"]]\n assert len(sample) == n\n return sample", "def read_local(path):\n files = os.listdir(path)\n imgs = []\n for f in files:\n if f.endswith(\".tiff\") or f.endswith(\".tif\"):\n img = Image.open(os.path.join(path, f))\n imgs.append(np.array(img))\n return imgs", "def load_letter(folder, min_num_images):\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=np.float32)\n image_index = 0\n print(folder)\n for image in os.listdir(folder):\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[image_index, :, :] = image_data\n image_index += 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n \n num_images = image_index\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' %\n (num_images, min_num_images))\n \n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def load_image(self, **kwargs):\n ...", "def load_data():\n # Dictionary mapping image names to labels\n image_name_to_label = dict()\n\n # Store labels associated with image names\n notifier.send(\" Reading metadata...\")\n with open(\"data/metadata.csv\") as file: # Original dataset\n # Use images for normal, virus (unknown type), COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"Label\"].lower() == \"normal\":\n label = 2\n elif row[\"Label_2_Virus_category\"].lower() == \"covid-19\":\n label = 0\n elif row[\"Label_1_Virus_category\"].lower() == \"virus\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"X_ray_image_name\"]] = label\n with open(\"data/metadata2.csv\") as file: # GitHub dataset\n # Use COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"filename\"] in image_name_to_label: # Image already added\n continue\n if \"covid-19\" in row[\"finding\"].lower():\n label = 0\n elif row[\"finding\"].lower() == \"sars\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"filename\"]] = label\n with open(\"data/metadata_COVID-19.csv\") as file: # Additional COVID-19 images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"COVID-19/\" + row[\"FILE NAME\"] + \".\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 0\n with open(\"data/metadata_ViralPneumonia.csv\") as file: # Additional virus images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"ViralPneumonia/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 1\n with open(\"data/metadata_Normal.csv\") as file: # Additional normal images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"Normal/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 2\n\n notifier.send(\" Loading images...\")\n images, labels = load_images(image_name_to_label)\n\n notifier.send(\" Splitting data...\")\n return split_data(images, labels)", "def load_images(folder_path):\n images = []\n # first make image paths list\n # cv2 can take in wildcard args if using glob\n image_paths = glob.glob(folder_path + \"/*\")\n for path in image_paths:\n images.append(cv2.imread(path))\n return (images, image_paths)", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def load_test_dataset(self):\n test_data_path = \"testdata\"\n root = Path(test_data_path)\n classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])\n print(classes)\n\n transform = transforms.Compose([\n transforms.Resize(300),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(250),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.6071, 0.4828, 0.3934], std=[0.2845, 0.3187, 0.3240])\n ])\n\n dataset = datasets.ImageFolder(test_data_path, transform=transform)\n testloader = DataLoader(dataset, batch_size=4, shuffle=True)\n print(\"Loaded data\")\n return testloader", "def load_cifar_data():\n train_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_train, shuffle=True, pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=False, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_test, shuffle=True, pin_memory=True)\n return train_loader, test_loader", "def ensure_loaded(self, frames):\n if isinstance(frames, list):\n return [self.ensure_np_array(frame) for frame in frames]\n\n elif isinstance(frames, str):\n return Image.open(frames)\n\n elif isinstance(frames, np.ndarray):\n return Image.fromarray(frames)\n \n return frames", "def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def populate_image_lists():\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_a.append(path.path)\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_b\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_b.append(path.path)", "def load_set(directName, n = np.inf):\n # Loaded a set of images\n\n files = os.listdir(directName)\n n = min(n, len(files))\n #n = len(files)\n print(\"Loading \" + str(n) + \" images\")\n imgs = [mpimg.imread(directName + files[i]) for i in range(n)]\n\n return imgs", "def load_training_images(path_train_images: str, path_train_labels: str, max_projection: bool):\n X = sorted(glob(path_train_images))\n Y = sorted(glob(path_train_labels))\n assert len(X) > 0 and len(Y) > 0, \"Error: No images found in either X or Y.\"\n assert all(Path(x).name == Path(y).name for x, y in zip(X, Y)), \"Error: Filenames in X and Y do not match.\"\n X = list(map(imread, X))\n Y = list(map(imread, Y))\n n_channel = 1 if X[0].ndim == 3 else X[0].shape[-1]\n axis_norm = (0, 1, 2) # normalize channels independently\n # axis_norm = (0,1,2,3) # normalize channels jointly\n if n_channel > 1:\n print(\n \"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 3 in axis_norm else 'independently'))\n sys.stdout.flush()\n\n X = [normalize(x, 1, 99.8, axis=axis_norm) for x in tqdm(X)]\n Y = [fill_label_holes(y) for y in tqdm(Y)]\n if len(X) == 1:\n print(\n \"Warning: only one training data was provided! It will be used for both training and validation purposes!\")\n X = [X[0], X[0]]\n Y = [Y[0], Y[0]]\n rng = np.random.RandomState(42)\n ind = rng.permutation(len(X))\n n_val = max(1, int(round(0.15 * len(ind))))\n ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n X_val, Y_val = [X[i] for i in ind_val], [Y[i] for i in ind_val]\n X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]\n print('number of images: %3d' % len(X))\n print('- training: %3d' % len(X_trn))\n print('- validation: %3d' % len(X_val))\n print(f\"{X[0].shape=}\")\n i = 0\n img, lbl = X[i], Y[i]\n assert img.ndim in (3, 4)\n img = img if img.ndim == 3 else img[..., :3]\n if max_projection:\n plot_img_label_max_projection(img, lbl)\n else:\n plot_img_label_center_slice(img, lbl)\n\n return X, Y, X_trn, Y_trn, X_val, Y_val, n_channel", "def get_train(self, preprocess=False):\n return self._dataset(self._directory, 'images_background_small1', preprocess)", "def getimgs():", "def load_test_batch(self, image_sequence_names):\n def _parse_test_img(img_path):\n with tf.device('/cpu:0'):\n img_buffer = tf.read_file(img_path)\n image_decoded = tf.image.decode_jpeg(img_buffer)\n return image_decoded\n\n image_dataset = tf.data.Dataset.from_tensor_slices(image_sequence_names).map(\n _parse_test_img).batch(self.batch_size).prefetch(self.batch_size*4)\n iterator = image_dataset.make_initializable_iterator()\n return iterator", "def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)", "def sample_images(batches_done):\n val_imgs, val_labels = next(iter(val_dataloader))\n val_imgs = Variable(val_imgs.type(Tensor))\n val_labels = Variable(val_labels.type(Tensor))\n img_samples = None\n for i in range(10):\n img, label = val_imgs[i], val_labels[i]\n # Repeat for number of label changes\n imgs = img.repeat(c_dim, 1, 1, 1)\n labels = label.repeat(c_dim, 1)\n # Make changes to labels\n for sample_i, changes in enumerate(label_changes):\n for col, val in changes:\n labels[sample_i, col] = 1 - labels[sample_i, col] if val == -1 else val\n\n # Generate translations\n gen_imgs = generator(imgs, labels)\n # Concatenate images by width\n gen_imgs = torch.cat([x for x in gen_imgs.data], -1)\n img_sample = torch.cat((img.data, gen_imgs), -1)\n # Add as row to generated samples\n img_samples = img_sample if img_samples is None else torch.cat((img_samples, img_sample), -2)\n\n save_image(img_samples.view(1, *img_samples.shape), \"images/%s.png\" % batches_done, normalize=True)", "def load_many_images(paths):\r\n \r\n lpop = __g.pop\r\n \r\n for k in __g.keys()[1:]:\r\n lpop(k)\r\n \r\n if type(paths) == str or type(paths) == tuple and len(paths) == 2 and type(paths[0]) == int:\r\n __g[1] = Surface(paths)\r\n elif type(paths) == list:\r\n for p in range(1, len(paths) + 1):\r\n __g[p] = Surface(paths[p-1])", "def get_train_test_loaders(self, num_workers=2):\n print('Loading the image data...')\n \n train_path_info, test_path_info = self.get_train_test_info()\n\n train_transform = transforms.Compose([transforms.Resize((self.image_width, self.image_height)),\n transforms.RandomAffine(10,translate=(0.1,0.1)),\n transforms.ToTensor()])\n\n test_transform = transforms.Compose([transforms.Resize((self.image_width, self.image_height)),\n transforms.ToTensor()])\n\n trainset = PocovidDataset(train_path_info, transform = train_transform)\n testset = PocovidDataset(test_path_info, transform = test_transform)\n \n self.class_map = trainset.get_class_map()\n self.classes = [self.class_map[key] for key in sorted(self.class_map)]\n\n train_loader = torch.utils.data.DataLoader(trainset, num_workers=num_workers, shuffle=True,\n batch_size=self.batch_size, drop_last=True)\n\n test_loader = torch.utils.data.DataLoader(testset, num_workers=num_workers, shuffle=True,\n batch_size=self.batch_size)\n \n print('Image data is loaded with fold {} as the test data'.format(self.fold))\n print('Number of training images:', len(trainset))\n print('Number of testing images:', len(testset))\n print('*'*100)\n print('The classes are:', self.classes)\n print('*'*100)\n \n return train_loader, test_loader", "def load_data(model, set='train', img_rows=128, img_cols=128):\n print('#' * 30)\n print('Loading {} data from file.'.format(set))\n\n # read in the .npy file containing the images\n images_train = np.load('output/processed_data/images_{}.npy'.format(set))\n\n # read in the .npy file containing the target features\n targets_train = np.load('output/processed_data/targets_{}.npy'.format(set))\n\n # scale image pixel values to [0, 1]\n images_train = images_train.astype(np.float32)\n images_train /= 255.\n\n # scale target center coordinates to [-1, 1] (from 0 to 95 initially)\n targets_train = targets_train.astype(np.float32)\n targets_train[:, 0] = (targets_train[:, 0] - (img_rows / 2)) / (img_rows / 2)\n targets_train[:, 1] = (targets_train[:, 1] - (img_rows / 2)) / (img_cols / 2)\n\n # reshape images according to the neural network model intended to be used\n if model == 'cnn':\n print('Indicated model is a CNN, reshaping images with channels first.')\n images_train = images_train.reshape(-1, 1, img_rows, img_cols)\n elif model == 'dnn':\n print('Indicated model is a DNN, flattening out images.')\n images_train = images_train.reshape(images_train.shape[0], img_rows * img_rows)\n\n print('Loading done. Pixel values have been scaled to [0, 1] and target center coordinates to [-1, 1].')\n print('#' * 30)\n\n return images_train, targets_train", "def read_image():\n images = []\n for hand in os.listdir('images'):\n img = cv2.imread(os.path.join('images', hand))\n if img is not None:\n images.append(img)\n return images", "def load_paths_and_labels(self,classes):\n\t\tim_paths , im_labels = [], [] \n\n\t\tfor image_type in classes:\n\t\t\tmypath = self.data_path + self.dataset + '/' + image_type\n\t\t\tonlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f)) ]\n\t\t\tclass_support = 0\n\t\t\tfor file_name in onlyfiles:\n\t\t\t\t#print file_name\n\t\t\t\tif file_name != '.DS_Store':\n\t\t\t\t\tim_path = mypath = self.data_path + self.dataset + '/' + image_type + '/' + file_name\n\t\t\t\t\tim_paths.append(im_path)\n\t\t\t\t\tim_labels.append(image_type)\n\t\t\t\tclass_support += 1\n\t\t\t\tif self.support_per_class != None and class_support == self.support_per_class:\n\t\t\t\t\tbreak\n\n\t\tcombined = zip(im_paths, im_labels)\n\t\trandom.shuffle(combined)\n\t\t\n\t\tim_paths[:], im_labels[:] = zip(*combined)\n\n\t\treturn im_paths,im_labels", "def load_food_image_batch(filename, num):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f)\n url_parts = datadict['Image URL'].split(\"/\")\n img_fn = url_parts[-1]\n with open(img_fn):\n X = f.read()\n Y = datadict['coarse_labels']\n X = X.reshape(num, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def load_images(imgpath):\n images = read_paths(imgpath)\n imglist = [[], [], [], []]\n cur_img = 0\n SIFT = cv2.xfeatures2d.SIFT_create()\n for i in images[0]:\n img = cv2.imread(i, 0)\n imglist[0].append(img)\n imglist[1].append(images[1][cur_img])\n cur_img += 1\n keypoints, des = SIFT.detectAndCompute(img, None)\n imglist[2].append(keypoints)\n imglist[3].append(des)\n return imglist" ]
[ "0.6988628", "0.6685474", "0.66373193", "0.66370726", "0.6426989", "0.6391403", "0.63717645", "0.62725484", "0.62609285", "0.62502795", "0.6230983", "0.62293315", "0.6186863", "0.6182871", "0.61326575", "0.6113", "0.6112325", "0.6069663", "0.6068178", "0.6067434", "0.6062822", "0.6061903", "0.6044588", "0.6026369", "0.6022848", "0.5999245", "0.59884995", "0.59814626", "0.5973293", "0.59697825", "0.59697825", "0.59697825", "0.5959872", "0.5950915", "0.5935481", "0.59227157", "0.5920975", "0.5906724", "0.59053296", "0.58973116", "0.5871517", "0.5850656", "0.58483887", "0.58475804", "0.58440393", "0.58382255", "0.5835857", "0.5832421", "0.58308923", "0.5823984", "0.58143264", "0.58112603", "0.5809615", "0.5804421", "0.57914644", "0.57769537", "0.5773841", "0.5766631", "0.5765509", "0.57648987", "0.5762455", "0.57588524", "0.5756241", "0.57514757", "0.57505655", "0.5747348", "0.57452464", "0.5743426", "0.5739922", "0.5739812", "0.5739752", "0.5731025", "0.57112163", "0.5706998", "0.57007366", "0.5698757", "0.5698258", "0.5697854", "0.5695173", "0.56776804", "0.567763", "0.5674257", "0.5668175", "0.56662166", "0.56614375", "0.5653273", "0.5652212", "0.5651433", "0.564378", "0.564077", "0.56400573", "0.5639186", "0.5637114", "0.5635304", "0.5629976", "0.56282055", "0.5627765", "0.5627339", "0.56203467", "0.5615922" ]
0.7011758
0
Load the numpy array of a single sample image
def load_sample_image(image_name): images = load_sample_images() index = None for i, filename in enumerate(images.filenames): if filename.endswith(image_name): index = i break if index is None: raise AttributeError("Cannot find sample image: %s" % image_name) return images.images[index]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadImage(img_path):\n\n img = Image.open(img_path)\n np_img = np.array(img)\n return (np_img)", "def load_img(path: str) -> np.ndarray:\n \n return np.array(Image.open(path))", "def load(path):\n print(\"path\", path)\n print(Path(path).is_file())\n if Path(path).is_file():\n img = image.imread(path)\n print(f\"Loading image of dimensions {img.shape[0]} x \"\n f\"{img.shape[1]}\")\n return np.array(img)\n raise FileNotFoundError", "def read_image(images_root):\n im_array = np.load(images_root)\n return im_array", "def load_image(image_dataset: ImageDataset,\n sample_id: int) -> np.array:\n img = imread(image_dataset.image_paths[sample_id])\n mask = imread(image_dataset.roi_paths[sample_id])\n mask = (1 - (mask > 0).astype(np.uint8))\n img = img * np.repeat(np.expand_dims(mask, axis=-1), 3, axis=-1)\n img = np.transpose(img, (1, 0, 2))\n return img", "def loadImage(self, path: str) -> ndarray:\n try:\n self.img = np.asarray(Image.open(path))\n\n except FileNotFoundError:\n\n print(\"NO such File {}\".format(path))\n return None\n return self.img", "def _load(self) -> np.ndarray:\n with self._fs.open(self._filepath, mode=\"r\") as f:\n image = Image.open(f).convert(\"RGBA\")\n return np.asarray(image)", "def load_single_image(image_path, dim=100):\n if not isinstance(image_path, str):\n img = Image.open(image_path)\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n img = preprocess_data(img, dim)\n else:\n img = cv2.imread(image_path, cv2.IMREAD_COLOR)\n img = preprocess_data(img, dim)\n\n img = np.array([img])\n\n return img", "def open_image(self, filename):\n return np.array(self.ds['test'].load_image(filename))", "def load_single_image(path: str) -> np.uint8:\n if not os.path.exists(path):\n print(f\"Warning, try to load non-exist image {path}\")\n return None\n if path.endswith(\".npy\"):\n img = np.load(path)\n elif path.endswith(\".png\") or path.endswith(\".jpeg\") or path.endswith(\".jpg\"):\n img = plt.imread(path)\n if img.dtype != \"uint8\":\n img = (255 * img).astype(np.uint8)\n return img", "def read_img(path):\n img = Image.open(path)\n img_arr = np.array(img, dtype='int32')\n img.close()\n return img_arr", "def load_image(img_file: str) -> numpy.ndarray:\n img = cv2.imread(img_file, 0) # Load in Gray Scale\n return img", "def load_from_array():\n\n x = np.load(settings.data(\"x.npy\")).reshape(-1, 1, 224, 224)\n y = np.load(settings.data(\"y.npy\"))\n\n return x, y", "def load_full_im(self, im_name):\n # return np.genfromtxt(im_name, delimiter=self.delim)#[:,1:] # first column gives column number\n try: \n return np.loadtxt(im_name, delimiter=self.delim,\n usecols=range(1,self.pic_width+1))\n except IndexError as e:\n error('Image analysis failed to load image '+im_name+'\\n'+str(e))\n return np.zeros((self.pic_width, self.pic_height))", "def load_view(self, filepath):\n npz = np.load(filepath)\n if \"sample\" in npz:\n image = npz[\"sample\"].astype(np.float32)\n # for c in range(image.shape[-1]):\n # image[:, :, c] = (image[:, :, c] - image[:, :, c].mean()) / image[:, :, c].std()\n # image[:, :, c] = ((image[:, :, c] - image[:, :, c].mean()) / image[:, :, c].std() * 255).astype(np.uint8)\n # image = (image - image.mean()) / image.std()\n return image\n\n return None", "def load_view(self, filepath):\n npz = np.load(filepath)\n if \"sample\" in npz:\n image = npz[\"sample\"].astype(np.float32)\n # for c in range(image.shape[-1]):\n # image[:, :, c] = (image[:, :, c] - image[:, :, c].mean()) / image[:, :, c].std()\n # image[:, :, c] = ((image[:, :, c] - image[:, :, c].mean()) / image[:, :, c].std() * 255).astype(np.uint8)\n # image = (image - image.mean()) / image.std()\n return image\n\n return None", "def image_load(path) -> numpy.ndarray:\n # file\n na = numpy.array(Image.open(path))\n # fix shape\n na = numpy.moveaxis(na, [2,0,1], [0,1,2])\n # shape is now (3,h,w), add 1\n na = na.reshape(1,3,na.shape[1],na.shape[2])\n # change type\n na = na.astype(\"float32\") / 255.0\n return na", "def loader(path):\n img = np.load(path)\n img = img[1:4]\n if np.random.choice((True, False)):\n img = img[:, :, ::-1]\n img = np.array(img)\n if np.random.choice((True, False)):\n img = img[:, ::-1, :]\n img = np.array(img)\n\n img = img.transpose((1, 2, 0)) # pytorch is going to rotate it back\n return img", "def read_img(img_path): \n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def load_img_array(fname, grayscale=False, target_size=None, dim_ordering='default'):\n img = load_img(fname,\n grayscale=grayscale,\n target_size=target_size)\n x = img_to_array(img)\n return x", "def load(path):\n img = plt.imread(path)\n dimensions = f\"{img.shape[0]} x {img.shape[1]}\"\n print(f\"Loaded image at {path} of dimensions {dimensions}\")\n return img", "def read_image(image_path):\n im = Image.open(image_path, 'r')\n return np.array(im)", "def read(path: Union[Path, str]) -> np.ndarray:\n return _reader.imread(str(path))", "def read_image(filename):\n img = Image.open(filename)\n im = np.array(img)\n return im", "def load_image_as_array(filename):\n im = Image.open(filename)\n arr = np.asarray(im)\n return arr", "def load(image_path):\n\tpil_image = Image.open(image_path).convert(\"RGB\")\n\t# convert to BGR format\n\timage = np.array(pil_image)[:, :, [2, 1, 0]]\n\treturn image", "def imgRead(filename: str, representation: int) -> np.ndarray:\r\n if representation==LOAD_GRAY_SCALE:\r\n img = cv2.imread(filename,0)\r\n else:\r\n img = cv2.imread(filename)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n return img.astype('uint8')", "def read_img(img_path):\n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def load_ipl_as_array(path): \n img = PIL.Image.open(path).convert('RGBA')\n img = np.array(img)\n return img", "def load_image_into_numpy_array(path):\r\n \r\n return np.array(Image.open(path))", "def read_image(path):\n img = ndimage.imread(path, mode=\"RGB\") \n return img", "def load(filename):\n return np.load(filename)", "def load_image_into_numpy_array(path):\n return np.array(Image.open(path))", "def load_image_into_numpy_array(path):\n return np.array(Image.open(path))", "def load_image(self):\n if isinstance(self.filename, str):\n self.image = np.asarray(PIL.Image.open(self.filename))\n elif isinstance(self.filename, np.ndarray):\n self.image = np.asarray(self.filename)\n if self.image.ndim < 3:\n self.bw = True\n if self.image.ndim < 2:\n self.image = None\n print(\"file {} is not an appropriate format.\".format(\n self.filename))\n if self.image.ndim == 3:\n if self.image.shape[-1] == 1:\n self.image = np.squeeze(self.image)\n elif self.image.shape[-1] > 3:\n self.image = self.image[..., :-1]\n if (self.image[..., 0] == self.image.mean(-1)).mean() == 1:\n self.image = self.image[..., 0]\n self.bw = True\n return self.image", "def read_img(img_id, train_or_test, size):\n img = image.load_img(join(data_dir, train_or_test, img_id + '.jpg'), target_size=size)\n # img = image.img_to_array(img)\n return img", "def load_image_into_numpy_array(path):\n MAX_SIZE = (1440, 1080)\n # img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(path)\n # print(\"Image size before: {}\".format(image.size))\n image.thumbnail(MAX_SIZE)\n # print(\"Image size after resized: {}\".format(image.size))\n (im_width, im_height) = image.size\n return np.array(image).astype(np.uint8)", "def to_numpy(id:int, samples:list=None):\n\t\t\tsamples = listify(samples)\n\t\t\timages = Dataset.Image.to_pillow(id, samples=samples)\n\t\t\timages = [np.array(img) for img in images]\n\t\t\timages = np.array(images)\n\t\t\treturn images", "def get_image_array_from_example(example):\n features = example.features.feature\n img = features['image/encoded'].bytes_list.value[0]\n shape = features['image/shape'].int64_list.value[0:3]\n return np.frombuffer(img, np.uint8).reshape(shape)", "def read(self, index):\n assert type(index) is int\n img = self.db.get_node('/images/img{:04d}'.format(index))\n return np.array(img)", "def read_img(img_id, data_dir, train_or_test, size):\n img = image.load_img(os.path.join(data_dir, train_or_test, '%s.jpg' % img_id), target_size=size)\n img = image.img_to_array(img)\n return img", "def getimg(filename):\n return np.asarray(Image.open('imgdb/'+filename))", "def read_mhd_and_raw(path, numpyFlag=True):\n img = sitk.ReadImage(path)\n if not numpyFlag:\n return img\n\n nda = sitk.GetArrayFromImage(img) # (img(x,y,z)->numpyArray(z,y,x))\n return nda", "def load(filename):\n img = image.load_img(filename, target_size=(299, 299))\n np_image = image.img_to_array(img)\n np_image = np.array(np_image).astype('float32')/255\n\n # Make to a rank 4 tensor (1, 299, 299, 3) -> 1 is for the batch size\n np_image = np.expand_dims(np_image, axis=0)\n\n return np_image", "def load_image_into_numpy_array(self, path):\n \n return np.array(Image.open(path))", "def load_image_as_np(filename):\n try:\n img = image.load_img(filename, color_mode='grayscale') # for newer versions, use \"color_mode='grayscale'\"; For older versions, use \"grayscale=True\"\n return np.atleast_3d(img)\n except Exception as error:\n logging.error(traceback.format_exc())", "def load(self, path, shape=(1024, 1024, 35), dtype='uint16'):\n valid_dtypes = ['uint8', 'uint16']\n if dtype not in valid_dtypes:\n raise ValueError('dtype should be either one of %s' % ', '.join(valid_dtypes))\n\n im = io.imread(path)\n im = numpy.rollaxis(im, 0, 3)\n\n if im.shape != shape and shape is not None:\n factors = tuple(map(lambda z: int(z[0] / z[1]), zip(im.shape, shape)))\n if any([f > 1 for f in factors]):\n # im = resize(im, shape, mode='constant')\n im = downscale_local_mean(im, factors=factors).astype(im.dtype)\n # if 'conf' in path.lower():\n else:\n warnings.warn('Target shape is not a multiple below initial shape')\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n if dtype == 'uint8' and im.dtype != numpy.uint8:\n im = img_as_ubyte(im)\n if dtype == 'uint16' and im.dtype != numpy.uint16:\n im = img_as_uint(im)\n\n self.image_raw = im\n self.name = path", "def img_from_array(array):\n return Image.fromarray(array)", "def load_spe(filename):\n def read_at(data, pos, size, ntype):\n raw.seek(pos)\n return np.fromfile(raw, ntype, size)\n raw = open(filename, 'rb')\n xdim = np.int64(read_at(raw, 42, 1, np.int16)[0])\n ydim = np.int64(read_at(raw, 656, 1, np.int16)[0])\n arr = read_at(raw, 4100, xdim*ydim, np.uint16)\n arr = arr.reshape((ydim, xdim))\n print('data shape: {}'.format(np.shape(arr)))\n if np.shape(arr)[0] == 1:\n arr = arr[0]\n print('data shape: {}'.format(np.shape(arr)))\n return arr", "def load_image(file_name):\n if not osp.exists(file_name):\n print('{} not exist'.format(file_name))\n return\n image = np.asarray(io.imread(file_name))\n if len(image.shape)==3 and image.shape[2]>3:\n image = image[:, :, :3]\n # print(image.shape) #should be (x, x, 3)\n return image", "def load_image(self, image_id):\n # Load image\n# print(self.image_info[image_id]['path'])\n image = cv2.imread(self.image_info[image_id]['path'],cv2.IMREAD_GRAYSCALE) \n image = image[:,:, np.newaxis] #Add 1 dimension for grayscale images\n return image", "def imread(fname):\r\n return skimage.io.imread(fname)", "def read_image(filename):\n\n from matplotlib.image import pil_to_array\n\n with Image.open(filename) as image:\n return pil_to_array(image)", "def fromarray(self, *args, **kwargs):\n return _image.image_fromarray(self, *args, **kwargs)", "def get_input(path):\n img = imread(path)\n return img", "def load_image_file(file, mode='RGB'):\n im = PIL.Image.open(file)\n if mode:\n im = im.convert(mode)\n return np.array(im)", "def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img", "def read_full_img(dir, pattern, side, target=None, model_num=None):\n full_img = load_np(os.path.join(dir, pattern.replace(\"side\", side )))\n return full_img", "def load_image(path: str):\n if path.endswith('.npy'):\n return np.load(path)\n if path.endswith(('.nii', '.nii.gz', '.hdr', '.img')):\n import nibabel as nib\n return nib.load(path).get_data()\n if path.endswith('.tif'):\n from PIL import Image\n with Image.open(path) as image:\n return np.asarray(image)\n\n raise ValueError(f\"Couldn't read image from path: {path}.\\n\"\n \"Unknown file extension.\")", "def imread(filename):\n return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]", "def read_array(self, filename):\n extension = filename.split('.')[-1] # Get file extension\n if extension == 'mat':\n array = sci.loadmat(filename)\n elif extension == 'npy':\n array = np.load(filename)\n else:\n print('Error!!! Unrecognised file type for read_array()')\n array = None\n return array", "def load(image_path):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n # Use skimage io.imread\n out = io.imread(image_path)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def load_image_file(file, mode='RGB'):\n\tim = PIL.Image.open(file)\n\tif mode:\n\t\tim = im.convert(mode)\n\treturn np.array(im)", "def read_image(image_path):\n return np.array(load_img(image_path, color_mode='grayscale')) / 255", "def load_one_img(ds):\n for img in ds.take(1):\n img = img[1, ...]\n yuv_image_tensor = tf.expand_dims(img, axis=0)\n\n return yuv_image_tensor", "def load_image(infilename):\n data = mpimg.imread(infilename)\n return data", "def load_image(default=True):\n if default:\n print(\"in heres\")\n return self.img\n else:\n img = Image.fromarray(cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB))\n self.size = img.shape\n return img", "def load_data(path,size, scale = True):\n images = os.listdir(path)\n images.sort()\n\n X = []\n for i, img in enumerate(images):\n photo = plt.imread(os.path.join(path,img))\n if size:\n photo = tf.image.resize(photo, (size, size))\n X.append(photo)\n \n X = np.array(X)\n if scale:\n X = X/X.max() \n return X", "def load_nib_data(filename, sample_size=None):\n\timg = nib.load(filename)\n\tdata = img.get_data()\n\tif sample_size is not None:\n\t\tnp.random.shuffle(data)\n\t\treturn data[0:sample_size, :, :, :]\n\treturn data", "def load_image_into_numpy_array(img_path, mode=\"int32\"):\n try:\n img = Image.open(img_path)\n img.load()\n data = np.asarray(img, dtype=mode)\n return data\n except PIL.UnidentifiedImageError:\n logging.warning(\"Can't load file! Deleting this file...\")\n os.remove(img_path)\n return None", "def load(self):\n logger.debug(f\"Reading {self.path.name}\")\n self.label = int(Data.fromLabel(self.path.parent.name))\n self.image = skimg.data.imread(self.path)", "def load_bin(file_path):\n return np.load(file_path)", "def load_image(filename, color=True):\n img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)\n if img.ndim == 2:\n img = img[:, :, np.newaxis]\n if color:\n img = np.tile(img, (1, 1, 3))\n elif img.shape[2] == 4:\n img = img[:, :, :3]\n return img", "def read_image(image_path: str) -> np.ndarray:\n assert image_path.exists()\n try:\n with Image.open(image_path) as img:\n image = np.array(img)\n except OSError as e:\n raise OSError(e)\n return image", "def load_image(self, img_name):\n img_data = cv2.imread(img_name, 0)\n return img_data", "def load_image(image_path):\n img_transforms = get_standard_img_transforms()\n image = Image.open(image_path)\n images = img_transforms(image).unsqueeze(0)\n return images", "def image_file_to_array(filename, img_shape, flatten=True):\n image = misc.imread(filename)\n if flatten:\n w, h = img_shape\n image = image.reshape([w * h])\n image = image.astype(np.float32) / 255.0\n image = 1.0 - image\n return image", "def array_from_img(image):\n return np.array(image)", "def load_image_into_numpy_array(path):\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)", "def loadArray(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.ReadAsArray()", "def read_from_filename(self, filename=''):\r\n self.raw_image = skimage.io.imread(filename)\r\n self.bk_image = np.copy( self.raw_image )", "def load_image(file_path):\r\n if (\"\\\\\" in file_path):\r\n raise ValueError(\r\n \"Please use a file path following the Unix convention\")\r\n mat = None\r\n try:\r\n mat = np.asarray(Image.open(file_path), dtype=np.float32)\r\n except IOError:\r\n print((\"No such file or directory: {}\").format(file_path))\r\n raise\r\n if len(mat.shape) > 2:\r\n axis_m = np.argmin(mat.shape)\r\n mat = np.mean(mat, axis=axis_m)\r\n return mat", "def read_file(file_path, counter, new_file_paths):\n print(\"counter\", counter)\n\n im_array = np.array(Image.open(file_path), dtype=\"uint8\")\n pil_im = Image.fromarray(im_array)\n new_array = np.array(pil_im.resize((256, 256)))\n npy_file_name = 'test_data' + os.sep + os.path.basename(file_path)[:-4] + '.npy'\n new_file_paths.append(npy_file_name)\n np.save(npy_file_name, new_array / 255)\n return new_array / 255", "def sample(self, shape=(1,)):\n pass", "def read_image(image_file_path: str):\n\n pixels = numpy.array(Image.open(image_file_path))\n\n return pixels", "def readImage(self, path, tt=1):\n return cv2.imread( path, tt)", "def makeArray(imagePath):\r\n array = None\r\n imageExists = (imagePath and len(imagePath)>0 and len(glob(imagePath))>0)\r\n if (imageExists):\r\n try:\r\n image = gdal.Open(imagePath)\r\n array = image.ReadAsArray()\r\n except:\r\n print \"Could not open/convert src land use image\"\r\n else:\r\n print \"File: %s successfully read and converted to array.\" % (imagePath)\r\n else:\r\n print \"LandUse.makeArray says that the imagePath: %s is not correct\" % (imagePath)\r\n\r\n DTYPE = numpy.uint8\r\n array.dtype = DTYPE\r\n return array", "def get_array(self, scale=1):\n array = cv2.imread(str(self.path), self.read_type)\n\n # resize original image so it can be be scaled without fractions\n x_extra = array.shape[0] % self.scaling\n y_extra = array.shape[1] % self.scaling\n\n x_extra = self.scaling - x_extra if x_extra != 0 else x_extra\n y_extra = self.scaling - y_extra if y_extra != 0 else y_extra\n\n padded_array = cv2.resize(\n array, (int(array.shape[1] + y_extra), int(array.shape[0] + x_extra))\n )\n\n # scale image\n resized_array = cv2.resize(\n padded_array,\n (int(padded_array.shape[1] * scale), int(padded_array.shape[0] * scale)),\n )\n\n # cv2 reads in array as BGR, tensorboard shows as RGB\n if not self.greyscale:\n x = np.copy(resized_array)\n resized_array[:, :, 0] = x[:, :, 2]\n resized_array[:, :, 2] = x[:, :, 0]\n\n # cv2.imshow('image',array)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n if self.greyscale:\n resized_array = np.expand_dims(resized_array, 2)\n return resized_array", "def _open_image(self, path):\n return cv.imread(path, 1)\n # .astype(float)", "def load_nrrd(full_path_filename):\n data = sitk.ReadImage( full_path_filename )\n data = sitk.Cast( sitk.RescaleIntensity(data), sitk.sitkUInt8 )\n data = sitk.GetArrayFromImage(data)\n return(data)", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def batch_to_ndarray(file):\n file_batch = os.listdir(file)\n file_batch.sort(key=lambda x: int(re.findall('\\d+', x)[0]))\n posterior_sample = np.load(file + \"\\\\\" + file_batch[0])\n for batch in file_batch[1:]:\n #print(batch)\n new_batch = np.load(file + \"\\\\\" + batch)\n if new_batch.ndim > 1:\n posterior_sample = np.concatenate((posterior_sample, new_batch),\n axis = posterior_sample.ndim - 1)\n posterior_sample = pd.DataFrame(posterior_sample).dropna(axis=1)\n return posterior_sample", "def read_img(img_path):\n img_list=[]\n print('image loading...')\n for _,_,files in os.walk(img_path):\n for f in files:\n if f.find('.dcm')>=0:\n tmp_img=dicom.dcmread(os.path.join(img_path,f))\n tmp_img=tmp_img.pixel_array#[0::2,0::2]\n img_list.append(tmp_img)\n img_data=np.array(img_list)\n print('done')\n return img_data", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def loadData(path):\r\n X = []\r\n y = []\r\n dir1 = os.listdir(path)\r\n for d1 in dir1:\r\n dir2 = os.listdir(path+'/'+d1)\r\n for d2 in dir2:\r\n if int(d1) == 0:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(1)\r\n elif int(d1) == 1:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(-1)\r\n X = np.array(X, dtype=np.float32)\r\n y = np.array(y, dtype=np.int64)\r\n perm = np.random.permutation(X.shape[0])\r\n X = X[perm]\r\n y = y[perm]\r\n return X, y", "def load_data_from_npy(filename):\n return np.load(filename)", "def load_nifty_volume_as_array(filename):\n img = sitk.ReadImage(filename)\n img_arr = sitk.GetArrayFromImage(img)\n return img_arr", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")" ]
[ "0.6871681", "0.6734598", "0.66831964", "0.6642155", "0.66406447", "0.6627881", "0.65667224", "0.65525585", "0.65401435", "0.6525812", "0.6514177", "0.6475732", "0.645297", "0.63903517", "0.63789004", "0.63789004", "0.6344578", "0.6340352", "0.63352865", "0.6329638", "0.632339", "0.63218594", "0.63039035", "0.629636", "0.6283426", "0.6276444", "0.627105", "0.62565845", "0.62245303", "0.62243456", "0.621197", "0.62112963", "0.6210601", "0.6210601", "0.6210375", "0.62023", "0.6199781", "0.61569774", "0.612506", "0.61124754", "0.60910946", "0.60714066", "0.605697", "0.60494405", "0.6040379", "0.6039965", "0.603914", "0.6037166", "0.6034968", "0.602583", "0.60193723", "0.59912306", "0.59669393", "0.59666026", "0.59573483", "0.5947986", "0.59458035", "0.5945677", "0.59289", "0.5928494", "0.59253174", "0.59184057", "0.5915055", "0.5912776", "0.59115005", "0.5903746", "0.58990383", "0.5898946", "0.58890104", "0.58851075", "0.5877824", "0.5872346", "0.58702743", "0.5864829", "0.5858787", "0.5857751", "0.5853123", "0.5847287", "0.584653", "0.5840706", "0.5813261", "0.5790015", "0.577615", "0.5774127", "0.57724863", "0.57687074", "0.576692", "0.5766798", "0.5764607", "0.5757049", "0.57568544", "0.575204", "0.5750853", "0.5747973", "0.57393086", "0.5738931", "0.57249796", "0.57194614", "0.57194614", "0.57164013" ]
0.625068
28
Recreate the (compressed) image from the code book & labels
def recreate_image(codebook, labels, w, h): d = codebook.shape[1] image = np.zeros((w, h, d)) label_idx = 0 for i in range(w): for j in range(h): image[i][j] = codebook[labels[label_idx]] label_idx += 1 return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image", "def recreate_image(codebook, labels, w, h):\r\n d = codebook.shape[1]\r\n image = np.zeros((w, h, d))\r\n label_idx = 0\r\n for i in range(w):\r\n for j in range(h):\r\n image[i][j] = codebook[labels[label_idx]]\r\n label_idx += 1\r\n return image", "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[int(labels[label_idx])]\n label_idx += 1\n return image", "def recreate_image(codebook, labels, w, h, d):\r\n image = np.zeros((w, h, d))\r\n label_idx = 0\r\n for i in range(w):\r\n for j in range(h):\r\n image[i][j] = codebook[labels[label_idx]]\r\n label_idx += 1\r\n return image", "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0 # 第几个原始图像的像素点\n for i in range(w):\n for j in range(h):\n # 获取原始数据像素点对应的类别中心点坐标\n # 再根据中心点坐标获取对应的像素值\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image", "def prep_data(labels, image_root):\n labels = split_description(labels)\n labels = convert_plastics(labels)\n\n # Encoding shape and color data\n labels['Shape'] = encode_column(labels[['Shape']])\n labels['Color'] = encode_column(labels[['Color']])\n labels['isPlastic'] = encode_column(labels[['isPlastic']])\n labels = add_filenames(labels, image_root)\n labels = labels.dropna().reset_index()\n\n return labels", "def compress_image(filename,k):", "def preprocess(exam, data_folder, save_path, image_format):\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n if len(exam[v]) == 0:\n continue\n else:\n for image in exam[v]:\n image_path = data_folder + '/' + image + '.' + image_format\n # Extract subdirectories\n subdirs = \"/\".join(image.split('/')[:-1])\n save_dirs = os.path.join(save_path, subdirs)\n # Extract image id\n image_id = image.split('/')[-1]\n # Create save directories\n os.makedirs(save_dirs, exist_ok=True)\n png_save_path = os.path.join(save_dirs, image_id + '.png')\n with Image(filename=image_path, format=image_format) as img:\n with img.clone() as i:\n i.auto_level()\n with i.convert('png') as png_image:\n png_image.transform(resize='896x1152!')\n png_image.save(filename=png_save_path)", "def main():\n\n #Parse input arguments\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n\n parser.add_argument(\"-i\", \"--image\", dest=\"image\",\n help=\"specify the name of the image\", metavar=\"IMAGE\")\n\n args = parser.parse_args()\n\n #Load image\n if args.image is None:\n print(\"Please specify the name of image\")\n print(\"use the -h option to see usage information\")\n sys.exit(2)\n else:\n image_name = args.image.split(\".\")[0]\n input_image = cv2.imread(args.image, 0)\n\n\n bin_img = bi.binary_image()\n hist = bin_img.compute_histogram(input_image)\n\n outputDir = 'output/cellct/'\n outputDir_compress = 'output/Compression/'\n\n #Saving histogram to output directory \n hist_fig = plt.plot(hist)\n plt.savefig(outputDir+\"hist.png\")\n\n threshold = bin_img.find_optimal_threshold(hist)\n print(\"Optimal threshold: \", threshold)\n\n binary_img = bin_img.binarize(input_image)\n output_image_name = outputDir + \"binary_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, binary_img)\n\n #blobcoloring\n cell_count_obj = cc.cell_counting()\n\n regions = cell_count_obj.blob_coloring(binary_img)\n stats = cell_count_obj.compute_statistics(regions)\n\n cell_stats_img = cell_count_obj.mark_regions_image(binary_img, stats)\n output_image_name = outputDir + \"cell_stats_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, cell_stats_img)\n\t\n #Compression\n rle_obj = rle.rle()\n rle_code = rle_obj.encode_image(binary_img)\n print(\"-------------- Runlength Code -------------------\")\n print(rle_code)\n\n [height, width] = binary_img.shape\n\n decoded_image = rle_obj.decode_image(rle_code, height, width)\n\n output_image_name = outputDir_compress + \"decoded_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, decoded_image)", "def compress_image(filename,k):\n img_color=plt.imread(filename)\n orig=img_color.copy()\n R=img_color[:,:,0]\n G=img_color[:,:,1]\n B=img_color[:,:,2]\n \n m,n=(R.shape[0],R.shape[1])\n u1,s1,vh1=la.svd(R,full_matrices=False)\n u2,s2,vh2=la.svd(G,full_matrices=False)\n u3,s3,vh3=la.svd(B,full_matrices=False)\n img_color[:,:,0]=u1[:,:k].dot(np.diag(s1[:k]).dot(vh1[:k,:]))\n img_color[:,:,1]=u2[:,:k].dot(np.diag(s2[:k]).dot(vh2[:k,:]))\n img_color[:,:,2]=u3[:,:k].dot(np.diag(s3[:k]).dot(vh3[:k,:]))\n plt.subplot(211)\n img_color[img_color>1]=1.\n img_color[img_color<0]=0.\n plt.imshow(img_color)\n plt.subplot(212)\n plt.imshow(orig)\n plt.show()\n print G", "def compress_image(filename, s):\r\n image = imread(filename) / 255\r\n size = image.shape\r\n orig_entries = image.size\r\n #colored\r\n if len(size) == 3:\r\n #plot original\r\n orig = plt.subplot(121)\r\n orig.imshow(image)\r\n orig.axis(\"off\")\r\n #red in image\r\n R = image[:,:,0]\r\n #green in image\r\n G = image[:,:,1]\r\n #blue in image\r\n B = image[:,:,2]\r\n #approximate red, green and blue in range\r\n new_R, entries_R = svd_approx(R,s)\r\n new_R = np.clip(new_R,0,1)\r\n new_G, entries_G = svd_approx(G,s)\r\n new_G = np.clip(new_G,0,1)\r\n new_B, entries_B = svd_approx(B,s)\r\n new_B = np.clip(new_B,0,1)\r\n #stack all in one array\r\n new_image = np.dstack((new_R,new_G,new_B))\r\n #plot image\r\n new = plt.subplot(122)\r\n new.imshow(new_image)\r\n new.axis(\"off\")\r\n #title image with saved number of entries\r\n plt.suptitle(str(orig_entries - (entries_R+entries_G+entries_B)) + \" Entries\")\r\n\r\n\r\n #grayscale\r\n else:\r\n #plot original\r\n orig = plt.subplot(121)\r\n orig.imshow(image, cmap=\"gray\")\r\n orig.axis(\"off\")\r\n #approximate the image\r\n new_A, entries = svd_approx(image,s)\r\n #plot it\r\n new = plt.subplot(122)\r\n new.imshow(new_A, cmap=\"gray\")\r\n new.axis(\"off\")\r\n #title image with saved number of entries\r\n plt.suptitle(str(orig_entries - entries) + \" Entries\")\r\n\r\n plt.show()", "def make_label_data(self):\n from xml.etree.ElementTree import Element, SubElement, dump, ElementTree, parse\n\n if not self.graphicsView.hasImage():\n self.showImageSelectionMessageBox()\n return\n\n app_doc_data = AppDocData.instance()\n project = app_doc_data.getCurrentProject()\n\n smalls = []\n bigs = []\n\n symbol_list = app_doc_data.getTargetSymbolList(all=True)\n for symbol in symbol_list:\n if symbol.width and symbol.height:\n if symbol.width > 300 or symbol.height > 300:\n bigs.append(symbol.getName())\n else:\n smalls.append(symbol.getName())\n\n symbols = [item for item in self.graphicsView.scene().items() if issubclass(type(item), SymbolSvgItem)]\n names = [smalls, bigs]\n\n img = app_doc_data.activeDrawing.image_origin\n\n small_size = 500\n big_size = 850\n\n save_path = project.getTrainingSymbolFilePath()\n\n index = 0\n for size in [small_size, big_size]:\n offsets = [0, int(size / 2)]\n\n width, height = img.shape[1], img.shape[0]\n width_count, height_count = width // size + 2, height // size + 2\n b_width, b_height = width_count * size, height_count * size\n b_img = np.zeros((b_height, b_width), np.uint8) + 255\n b_img[:height, :width] = img[:, :]\n\n for offset in offsets:\n for row in range(height_count):\n for col in range(width_count):\n x, y = col * size + offset, row * size + offset\n tile_rect = QRectF(x, y, size, size)\n tile_symbols = []\n for symbol in [symbol for symbol in symbols if symbol.name in names[index]]:\n if tile_rect.contains(symbol.sceneBoundingRect()):\n tile_symbols.append(symbol)\n symbols.remove(symbol)\n\n if tile_symbols:\n training_uid = str(uuid.uuid4())\n training_image_path = os.path.join(save_path, training_uid + '.png')\n training_xml_path = os.path.join(save_path, training_uid + '.xml')\n\n # save image\n #_img = b_img[round(tile_rect.top()):round(tile_rect.bottom()),\n # round(tile_rect.left()):round(tile_rect.right())]\n #cv2.imwrite(training_image_path, _img)\n _img = self.graphicsView.image().copy(round(tile_rect.left()), round(tile_rect.top()), round(tile_rect.width()), round(tile_rect.height()))\n _img.save(training_image_path)\n\n # save label\n xml = Element('annotation')\n SubElement(xml, 'folder').text = 'None'\n SubElement(xml, 'filename').text = os.path.basename(save_path)\n\n pathNode = Element('path')\n pathNode.text = save_path.replace('/', '\\\\')\n xml.append(pathNode)\n\n sourceNode = Element('source')\n databaseNode = Element('database')\n databaseNode.text = 'Unknown'\n sourceNode.append(databaseNode)\n xml.append(sourceNode)\n\n sizeNode = Element('size')\n widthNode = Element('width')\n widthNode.text = str(int(tile_rect.width()))\n sizeNode.append(widthNode)\n heightNode = Element('height')\n heightNode.text = str(int(tile_rect.height()))\n sizeNode.append(heightNode)\n depthNode = Element('depth')\n depthNode.text = '3'\n sizeNode.append(depthNode)\n xml.append(sizeNode)\n\n segmentedNode = Element('segmented')\n segmentedNode.text = '0'\n xml.append(segmentedNode)\n\n labelContent = []\n counts = {}\n for item in tile_symbols:\n rect = item.sceneBoundingRect()\n label, xMin, yMin, xMax, yMax = item.name, int(rect.x() - 5 - x), int(rect.y() - 5 - y), int(rect.x() + rect.width() + 5 - x), int(rect.y() + rect.height() + 5 - y)\n xMin = xMin if xMin > 0 else 0\n yMin = yMin if yMin > 0 else 0\n xMax = xMax if xMax < size else size\n yMax = yMax if yMax < size else size\n\n if label == 'None' or label == '':\n continue\n if label not in labelContent:\n labelContent.append(label)\n counts[label] = 1\n else:\n counts[label] = counts[label] + 1\n\n objectNode = Element('object')\n nameNode = Element('name')\n nameNode.text = label\n objectNode.append(nameNode)\n poseNode = Element('pose')\n poseNode.text = 'Unspecified'\n objectNode.append(poseNode)\n truncatedNode = Element('truncated')\n truncatedNode.text = '0'\n objectNode.append(truncatedNode)\n difficultNode = Element('difficult')\n difficultNode.text = '0'\n objectNode.append(difficultNode)\n\n bndboxNode = Element('bndbox')\n xminNode = Element('xmin')\n xminNode.text = str(xMin)\n bndboxNode.append(xminNode)\n yminNode = Element('ymin')\n yminNode.text = str(yMin)\n bndboxNode.append(yminNode)\n xmaxNode = Element('xmax')\n xmaxNode.text = str(xMax)\n bndboxNode.append(xmaxNode)\n ymaxNode = Element('ymax')\n ymaxNode.text = str(yMax)\n bndboxNode.append(ymaxNode)\n objectNode.append(bndboxNode)\n\n xml.append(objectNode)\n\n ElementTree(xml).write(training_xml_path)\n\n index += 1\n\n QMessageBox.about(self, self.tr(\"Notice\"), self.tr('Successfully applied. '))", "def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix", "def decode(qf, st, en_filename, jpg_filename, img_shape):\n row, col, ch = img_shape\n en_bin = read_binstr_frome_file(en_filename)\n zbs = decode_AC_DC(en_bin, img_shape, st)\n sample_size = (row, col)\n if st == (4, 1, 1):\n sample_size = ((row // 2), (col // 2))\n elif st == (4, 2, 2):\n sample_size = (row, (col // 2))\n img = np.zeros((row, col, ch), np.uint8)\n for c, zb in enumerate(zbs):\n blocks = de_zigzag(zb)\n q_t = get_quantization_table_by_factor(qf, channel_select[c % len(zbs)])\n img_blocks = get_dequantization_img_blocks(blocks, q_t)\n\n b_r, b_c = (row, col) if c == 0 else sample_size\n\n tmp = np.ones((b_r, b_c), np.int8) * 128\n for i, (row_offset, col_offset) in enumerate(get_block_iterator(b_r, b_c)):\n tmp[row_offset:row_offset + 8 if row_offset + 8 <= b_r else b_r,\n col_offset:col_offset + 8 if col_offset + 8 <= b_c else b_c] += img_blocks[i]\n\n # inverse subsample\n img_blocks = cv2.resize(tmp, (row, col))\n\n img[:, :, c] = np.round(img_blocks)\n\n if ch == 3:\n img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)\n\n cv2.imwrite(jpg_filename, img)\n\n return img", "def semantic_image_generator(raw_data, output_path, width, height):\n raw_data = np.frombuffer(raw_data, dtype=np.uint8)\n raw_data = raw_data.reshape(height, width, -1)[:, :, 2:3]\n output = np.zeros((height, width, 3), dtype=np.uint8)\n color_map = create_carla_label_colormap_cityscapes_style()\n for i in range(height):\n for j in range(width):\n output[i, j, :] = color_map[int(raw_data[i, j])]\n output = Image.fromarray(output)\n output.save(output_path)\n return output", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def writeImage(image, filename):\n Sky = [128,128,128]\n Building = [128,0,0]\n Pole = [192,192,128]\n Road_marking = [255,69,0]\n Road = [128,64,128]\n Pavement = [60,40,222]\n Tree = [128,128,0]\n SignSymbol = [192,128,128]\n Fence = [64,64,128]\n Car = [64,0,128]\n Pedestrian = [64,64,0]\n Bicyclist = [0,128,192]\n Unlabelled = [0,0,0]\n r = image.copy()\n g = image.copy()\n b = image.copy()\n label_colours = np.array([Sky, Building, Pole, Road_marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])\n for l in range(0,12):\n r[image==l] = label_colours[l,0]\n g[image==l] = label_colours[l,1]\n b[image==l] = label_colours[l,2]\n rgb = np.zeros((image.shape[0], image.shape[1], 3))\n rgb[:,:,0] = r/1.0\n rgb[:,:,1] = g/1.0\n rgb[:,:,2] = b/1.0\n im = Image.fromarray(np.uint8(rgb))\n # im.save('/Users/koheiyamamoto/Desktop/SegNet/out/' + filename)\n im.save('./out/' + filename)", "def decode(p):\n #assert p.endswith('.' + EXTENSION)\n p2 = os.path.basename(p).replace('baseline.png', '.png')\n p2p = os.path.join('/mnt/Volume0/test/clic2020-devkit/result/', p2) #add by me\n pp = os.path.join('/mnt/Volume0/test/clic2020-devkit/targets',p2)\n p2 = os.path.join('/mnt/Volume0/test/clic2020-devkit/inputs/', p2) #add by me\n p1 = pframe_dataset_shared.get_previous_frame_path(p2)\n #p1 = os.path.join('/mnt/Volume0/test/clic2020-devkit/test_data/inputs/', p1)\n #assert os.path.isfile(p1), (p2, p1, p, len(glob.glob('*.png')))\n b = Image.open(p).convert('L')\n f2_reconstructed = decoder(np.array(Image.open(p1)), b)\n Image.fromarray(f2_reconstructed).save(p2p)\n return f2_reconstructed, np.array(Image.open(pp))", "def create_png(image, label):\n sv = \"/home/avojtekova/Desktop/final_results/star_det/generated_images/\" \n \n for i in range(len(image)):\n data = fits.getdata(image[i][0], ext = 0)\n norm = ImageNormalize(data,interval = ZScaleInterval(), stretch = LinearStretch())\n \n print(image[i][0])\n plt.imshow(data, cmap='Greys_r', origin='lower', norm=norm)#[1250:1750, 2000:2500] add this when you want just part of image \n plt.title(label[i])\n plt.axis('off')\n plt.tight_layout()\n plt.legend\n if i<2:\n if not os.path.isdir(sv + image[i][0][-33:-25] + \"/\") :\n os.makedirs(sv + image[i][0][-33:-25] + \"/\")\n plt.savefig(sv + image[i][0][-33:-25] + \"/\" + label[i]+ \"_\" + image[i][0][-33:-25] + \"_big.png\", dpi = 1000)#,bbox_inches='tight', pad_inches = 0) \n else:\n if not os.path.isdir(sv + image[i][0][-40:-32] + \"/\") :\n os.makedirs(sv + image[i][0][-40:-32] + \"/\")\n plt.savefig(sv + image[i][0][-40:-32] + \"/\" + label[i]+image[i][0][-40:-32] + \"_big.png\", dpi = 1000)#,bbox_inches='tight', pad_inches = 0)\n plt.close()", "def create_image_caption_pairs(self):", "def setup_annotations(self):\n sbd_path = get_data_path('sbd')\n target_path = pjoin(self.root, 'SegmentationClass/pre_encoded')\n if not os.path.exists(target_path): os.makedirs(target_path)\n path = pjoin(sbd_path, 'dataset/train.txt')\n sbd_train_list = tuple(open(path, 'r'))\n sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]\n train_aug = self.files['train'] + sbd_train_list\n\n # keep unique elements (stable)\n train_aug = [train_aug[i] for i in \\\n sorted(np.unique(train_aug, return_index=True)[1])]\n self.files['train_aug'] = train_aug\n set_diff = set(self.files['val']) - set(train_aug) # remove overlap\n self.files['train_aug_val'] = list(set_diff)\n\n pre_encoded = glob.glob(pjoin(target_path, '*.png'))\n expected = np.unique(self.files['train_aug'] + self.files['val']).size\n\n if len(pre_encoded) != expected:\n print(\"Pre-encoding segmentation masks...\")\n for ii in tqdm(sbd_train_list):\n lbl_path = pjoin(sbd_path, 'dataset/cls', ii + '.mat')\n data = io.loadmat(lbl_path)\n lbl = data['GTcls'][0]['Segmentation'][0].astype(np.int32)\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, ii + '.png'), lbl)\n\n for ii in tqdm(self.files['trainval']):\n fname = ii + '.png'\n lbl_path = pjoin(self.root, 'SegmentationClass', fname)\n lbl = self.encode_segmap(m.imread(lbl_path))\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, fname), lbl)\n\n assert expected == 9733, 'unexpected dataset sizes'", "def make_label_map(path, label_list):\r\n \r\n img = []\r\n for name in path:\r\n now = np.zeros((224,224))\r\n im = cv2.resize(cv2.imread(name), (224,224)).tolist()\r\n for y, i in enumerate(im):\r\n for x, j in enumerate(i):\r\n try:\r\n now[y, x] = label_list.index(j)\r\n\r\n except ValueError:\r\n now[y, x] = 0\r\n\r\n img.append(now)\r\n return img", "def label_visualize(img_dir):\n img = scipy.misc.imread(img_dir).astype(np.uint8)\n yo = np.nonzero(img == 1)\n visual = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n\n for i in range(0, 34):\n index = np.nonzero(img == i)\n visual[index + (0,)] = labels[i][0]\n visual[index + (1,)] = labels[i][1]\n visual[index + (2,)] = labels[i][2]\n\n scipy.misc.imsave('./' + img_dir.split('/')[-1], visual)", "def load_pascal(data_dir, split='train'):\n # Wrote this function\n # idx = 0\n # if idx >20:\n # idx+=1\n # break\n \"\"\"\n print(\"Begin Load Images ------------------------------------\")\n images = []\n # images_dict -> key: img_file_idx, value: rgb image ndarray (256*256*3)\n images_dict = {}\n # count\n for infile in glob.glob(\"./VOCdevkit/VOC2007/JPEGImages/*.jpg\"):\n # reshape the images to 256*256*3\n file, ext = os.path.splitext(infile)\n file_idx = file[-6:]\n\n try:\n im = Image.open(infile)\n resized_img = im.resize((256, 256), Image.ANTIALIAS)\n resized_arr = np.array(resized_img)\n images_dict[file_idx] = resized_arr.astype(np.float32)\n except IOError:\n print(\"Error\")\n\n save_obj(images_dict,\"images_dict\")\n \"\"\"\n # label_mat: 2d array, each annotation file is one label_col, multiple label_col mean multiple annotation files\n label_mat = []\n weight_mat = []\n image_mat = []\n\n images_dict = load_obj(\"images_dict\")\n print(\"Return Load Images ------------------------------------\")\n\n # for filename in os.listdir(\"./VOCdevkit/VOC2007/ImageSets/Main/\"):\n for filename in enumerate(CLASS_NAMES):\n\n with open(\"./VOCdevkit/VOC2007/ImageSets/Main/\"+filename[1] +\"_\"+split+\".txt\") as fp:\n print(fp)\n image_mat = []\n label_col = []\n weight_col = []\n line = fp.readline()\n cnt = 1\n while line:\n\n label_idx = line.strip()[:-3]\n try:\n # print(\"Line {}: {}\".format(label_idx, type(label_idx)))\n # Be aware!! '000005 ' is different from '000005', there is a space in the first string!!!\n # label_idx = '000005 ' label_idx[:-1]='000005'\n image_mat.append(images_dict[label_idx])\n except IOError:\n print(\"Error Line {}: {}\".format(label_idx, type(label_idx)))\n\n label_flag = int(line.strip()[-2:])\n\n if label_flag is 0 or label_flag is -1:\n label_col.append(np.int32(0))\n else:\n label_col.append(np.int32(1))\n\n if label_flag is 1 or label_flag is -1:\n weight_col.append(np.int32(1))\n else:\n weight_col.append(np.int32(0))\n\n line = fp.readline()\n cnt += 1\n np_label_col = np.asarray(label_col)\n label_mat.append(np_label_col)\n # print(np.shape(label_mat))\n np_weight_col = np.asarray(weight_col)\n weight_mat.append(np_weight_col)\n\n # print('image_mat {}: label_mat {}'.format(np.shape(image_mat), np.shape(label_mat)))\n np_image_mat = np.asarray(image_mat)\n np_label_mat = np.asarray(label_mat)\n np_weight_mat = np.asarray(weight_mat)\n # print('np_image_mat {}: np_label_mat {}'.format(np.shape(np_image_mat), np.shape(np_label_mat)))\n np_trans_label_mat = np_label_mat.transpose()\n np_trans_weight_mat = np_weight_mat.transpose()\n # print(np.shape(np_label_mat))\n # print(np.shape(np_weight_mat))\n print('np_trans_label_mat {}: np_trans_weight_mat {}'.format(np.shape(np_trans_label_mat), np.shape(np_trans_weight_mat)))\n print(\"Return Load Weights and Labels ------------------------------------\")\n return np_image_mat, np_trans_label_mat, np_trans_weight_mat", "def pre_processing_function(label, filename: str, augmentor: Augmentor = None):\n image = imread(filename)\n if augmentor is not None:\n image = np.round(augmentor.run(image)).astype(np.uint8)\n\n return image, label", "def encode_decode(self, img, img_metas):\n pass", "def __init__(self, source, label_config_center, name=None, header=None, \n view_min=None, view_max=None, alpha=255, colormap='gray',\n cross_pos=None):\n if isinstance(source, np.ndarray):\n self._data = np.rot90(source)\n if name == None:\n self._name = 'new_image'\n else:\n self._name = str(name)\n if not isinstance(header, nib.nifti1.Nifti1Header):\n raise ValueError(\"Parameter header must be specified!\")\n elif header.get_data_shape() == source.shape:\n self._header = header\n self._img = None\n else:\n raise ValueError(\"Data dimension does not match.\")\n else:\n self._img = nib.load(source)\n self._header = self._img.get_header()\n basename = os.path.basename(source.strip('/'))\n self._name = re.sub(r'(.*)\\.nii(\\.gz)?', r'\\1', basename)\n self.save_mem_load()\n\n # For convenience, define a shift variable\n self._y_shift = self.get_data_shape()[1] - 1\n\n if view_min == None:\n self._view_min = self._data.min()\n else:\n self._view_min = view_min\n\n if view_max == None:\n self._view_max = self._data.max()\n else:\n self._view_max = view_max\n\n self._alpha = alpha\n self._colormap = colormap\n self._rgba_list = range(self.get_data_shape()[2])\n \n # bool status for the item\n self._visible = True\n if len(self.get_data_shape()) == 3:\n self._4d = False\n else:\n self._4d = True\n self._time_point = 0\n\n # temporal variant for OrthView\n self._cross_pos = cross_pos\n\n # define a dictionary \n self.label_config_center = label_config_center\n self.label_config_center.single_roi_view_update.connect(self.update_single_roi)\n \n # undo redo stacks\n self.undo_stack = DoStack()\n self.redo_stack = DoStack()\n\n self.update_rgba()\n if self._cross_pos:\n self.update_orth_rgba()", "def generate_labels(pics):\r\n return []", "def _prepare_image_and_label(self, data):\n image = tf.io.decode_image(data['image/encoded'], channels=3)\n label = tf.io.decode_image(data['image/segmentation/class/encoded'],\n channels=1)\n height = data['image/height']\n width = data['image/width']\n image = tf.reshape(image, (height, width, 3))\n label = tf.reshape(label, (1, height, width))\n label = tf.cast(label, tf.float32)\n # Normalizes image with mean and std pixel values.\n image = input_utils.normalize_image(image)\n return image, label", "def make_layered_psd_from_images():\n\n\t\n\tdoc = open_document(FILEPATHS[0], show=False)\n\tdoc_root = doc.rootNode()\n\t\n\tdocs = []\n\tdocs.append(doc)\n\n\tall_layers = get_layers(doc)\n\tfor i in range(1, len(FILEPATHS)):\n\t\tdocx = open_document(FILEPATHS[i], show=False)\n\t\tdocs.append(docx)\n\t\tdocx_layers = get_layers(docx)\n\t\tfor layer in docx_layers:\n\t\t\tall_layers.append(layer.clone())\n\t\t\t# doc.rootNode().addChildNode(layer, parent_node)\n\tdoc_root.setChildNodes(all_layers)\n\n\tprint('Debug: all nodes: %s' % doc.rootNode().childNodes())\n\t# doc.refreshProjection()\n\n\tsave_filepath = filepath = QtWidgets.QFileDialog.getSaveFileName()[0]\n\tr = doc.saveAs(save_filepath)\n\tprint('Debug: saved: %s' % save_filepath)\n\t\n\tfor doc in docs:\n\t\tprint('Debug: closing %s' % doc)\n\t\tdoc.close()\n\n\tprint('Debug: Script done')", "def decode_labels(mask, num_classes=41):\n h, w = mask.shape\n outputs = np.zeros((h, w, 3), dtype=np.uint8)\n\n img = Image.new('RGB',(len(mask[0]), len(mask)))\n pixels = img.load()\n for j_, j in enumerate(mask):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs = np.array(img)\n return outputs", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def prepare_example(image_path, annotations, label_map_dict):\n print(\"encoding %s\" % image_path)\n with tf.gfile.GFile(image_path, 'rb') as fid:\n encoded_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_png)\n image = pil.open(encoded_png_io)\n\n if image.format != 'PNG':\n raise ValueError('Image format error')\n\n key = hashlib.sha256(encoded_png).hexdigest()\n # obtain attributes\n width, height = image.size\n img_filename = image_path.split('/')[-1]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n occlud = []\n\n xmin.append(int(annotations[2]) / width)\n ymin.append(int(annotations[3]) / height)\n xmax.append(int(annotations[4]) / width)\n ymax.append(int(annotations[5]) / height)\n class_name = annotations[1]\n classes_text.append(class_name)\n classes.append(label_map_dict[class_name])\n classes_text = [class_text.encode('utf-8') for class_text in classes_text]\n trun, occ = annotations[6].split(',')\n truncated.append(int(trun))\n occlud.append(int(occ))\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_png),\n 'image/format': dataset_util.bytes_feature('png'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.int64_list_feature(occlud),\n }))\n return example", "def decode_image(file_location=\"images/encoded_sample.png\"):\n encoded_image = Image.open(file_location)\n\n x_size = encoded_image.size[0]\n y_size = encoded_image.size[1]\n\n decoded_image = Image.new(\"RGB\", encoded_image.size)\n pixels = decoded_image.load()\n\n for x in range(x_size):\n for y in range(y_size):\n if lsb_of_red_pixel(encoded_image, x, y):\n pixels[x, y] = (255,255,255)\n else:\n pixels[x, y] = (0, 0, 0)\n\n #pixels[x, y] = [(0,0,0) if lsb_of_pixel(red_channel, x, y) else (1,1,1)]\n\n decoded_image.save(\"images/decoded_image.png\")\n decoded_image.show()", "def decode_image(file_location=\"images/encoded_sample.png\"):\n encoded_image = Image.open(file_location)\n red_channel = encoded_image.split()[0]\n\n\n x_size = encoded_image.size[0]\n y_size = encoded_image.size[1]\n\n\n decoded_image = Image.new(\"RGB\", encoded_image.size)\n pixels = decoded_image.load()\n for x in range(x_size):\n for y in range(y_size):\n red_pixel = red_channel.getpixel((x,y))\n binary = bin(red_pixel)\n\n lsb = int(binary[-1])\n if(lsb == 0):\n pixels[x,y] = (0,0,0)\n elif(lsb == 1):\n pixels[x,y] = (255,255,255)\n\n pass\n decoded_image.save(\"images/decoded_image.png\")", "def __init__(self, data_dir_original, data_dir_photoshopped, transform = data_transforms['train']):\n '''\n Notes:\n Would need to just save all the images and label them appropriately.\n Need to figure out how to just extract 1 photoshop transformation fo reach image for each \n \n '''\n cwd = os.path.dirname(os.path.realpath(__file__))\n # Process Photoshopped Images\n data_dir_photoshopped = os.path.join(cwd, '..', data_dir_photoshopped)\n #print(data_dir_photoshopped)\n p_filenames = os.listdir(data_dir_photoshopped)\n p_filenames = [os.path.join(data_dir_photoshopped, f) for f in p_filenames if f.endswith('.jpg')]\n p_labels = [1 for i in range(len(p_filenames))]\n \n # Process Original Images\n data_dir_original = os.path.join(cwd,'..', data_dir_original)\n o_filenames = os.listdir(data_dir_original)\n o_filenames = [os.path.join(data_dir_original, f) for f in o_filenames if f.endswith('.jpg')]\n o_labels = [0 for i in range(len(o_filenames))]\n \n # Add both types together\n self.filenames = p_filenames + o_filenames\n self.labels = p_labels + o_labels\n \n self.filenames, self.labels = shuffle(self.filenames, self.labels, random_state=42)\n self.transform = transform", "def preprocess_labels(label, number_slices):\n labels = [[] for i in range(np.array(label).shape[0])]\n\n for j in range(np.array(label).shape[0]):\n if type(label) is not np.ndarray:\n for i in range(number_slices):\n labels[j].append(np.array(Image.open(label[0][i]), dtype=np.uint8))\n\n label = np.array(labels[0])\n label = label.transpose((1, 2, 0))\n max_mask = np.max(label) * 0.5\n label = np.greater(label, max_mask)\n label = np.expand_dims(label, axis=0)\n\n return label", "def reconstruct_image(patch_list, patch_nb=2):\n line_list = []\n for i in range(0, patch_nb ** 2 - 1, patch_nb):\n line_list.append(cv2.hconcat(patch_list[i : i + patch_nb]))\n final_img = cv2.vconcat(line_list)\n return final_img", "def binary_reconstruct(self, data_path, width, height, dtype=\"uint16\",\n output_dir=\"\", focal_spot_image_name=\"focal_spot\", \n sinogram_image_name=\"sinogram\"):\n\n focal_spot, sinogram = reconstruct_from_cr_data(data_path, width, height, dtype=dtype)\n\n focal_spot = img_as_ubyte(equalize_adapthist(focal_spot))\n focal_spot_path = os.path.join(output_dir, \"%s.png\" % focal_spot_image_name)\n sinogram = img_as_ubyte(equalize_adapthist(sinogram))\n sinogram_path = os.path.join(output_dir, \"%s.png\" % sinogram_image_name)\n\n io.imsave(focal_spot_path, focal_spot)\n io.imsave(sinogram_path, sinogram)", "def imageprepare():\r\n file_name = 'temp_image.png'\r\n im = Image.open(file_name).convert('L')\r\n im = im.resize((20, 20))\r\n p = Image.new('L', (28,28), (255))\r\n p.paste(im,(4,4,24,24))\r\n p.save(\"last_image.png\")\r\n\r\n tv = list(p.getdata()) # get pixel values\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n tva = np.reshape(tva, (28, 28))\r\n\r\n return tva", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def decode_labels(mask, num_images=1, num_classes=21, task='seg'):\n n, h, w, c = mask.shape\n assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n if task == 'normal':\n outputs[i] = mask[i]\n elif task == 'seg':\n img = Image.new('RGB', (w, h), (255, 255, 255)) # unlabeled part is white (255, 255, 255)\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :, 0]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs[i] = np.array(img)\n else:\n raise Exception('task name is not recognized!')\n\n return outputs", "def imageprepare(filename):\n img = Image.open(filename).convert('L')\n rect = img.getbbox()\n im = img.crop(rect)\n im.save(filename + '_pressprocessed.png')\n\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (0)) #creates white canvas of 28x28 pixels\n if width > height: #check which dimension is bigger\n #Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\n if (nheight == 0): #rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n else:\n #Height is bigger. Heigth becomes 20 pixels. \n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\n if (nwidth == 0): #rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n tv = list(newImage.getdata()) #get pixel values\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [ (x)*1.0/255.0 for x in tv] \n return tva", "def plotCode(code):\n # rescale features\n mincode = np.amin(code)\n maxcode = np.amax(code)\n print('Min: ', mincode, 'Max: ', maxcode)\n code = (code - mincode) / (maxcode - mincode)\n # create output image\n sh = np.shape(code)\n W = sh[0]\n H = sh[1]\n out = np.zeros((3*(W+2)-2, 5*(H+2)-2))\n # copy each feature in out\n for w in range(0,3):\n for h in range(0,5):\n c = w*5 + h\n out[w*(W+2):w*(W+2)+W, h*(H+2):h*(H+2)+H] = code[:,:,c]\n return out", "def __init__(self, path_image, path_imagefile, path_bndboxfile, transform):\r\n # -------------------- DATA ARGUMENT\r\n self.shape = 446\r\n self.hue = 0.1\r\n self.saturation = 1.5\r\n self.exposure = 1.5\r\n self.imagelist = []\r\n self.labellist = []\r\n self.transform = transform\r\n label_dir = os.listdir(path_bndboxfile)\r\n image_dir = os.listdir(path_imagefile)\r\n\r\n # read imagepath\r\n for file in image_dir:\r\n file_name = os.path.join(path_imagefile, file)\r\n with open(file_name) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n image_name = line.split()[0] + '.JPEG'\r\n image = os.path.join(path_image, image_name)\r\n self.imagelist.append(image)\r\n\r\n # read imagelabel, i.e, (name, xmin, xmax, ymin, ymax)\r\n for file in label_dir:\r\n if file.split('.')[1] == 'xml':\r\n file_name = os.path.join(path_bndboxfile, file)\r\n with open(file_name) as f:\r\n xml_tree = parse(f).documentElement\r\n objects = xml_tree.getElementsByTagName('object')\r\n for object in objects:\r\n label = []\r\n name = object.getElementsByTagName('name')[0]\r\n label.append(name.childNodes[0].data)\r\n bndbox = object.getElementsByTagName('bndbox')[0]\r\n for node in bndbox.childNodes:\r\n if node.nodeType == node.ELEMENT_NODE:\r\n label.append(node.childNodes[0].data)\r\n self.labellist.append(label)\r\n else:\r\n print('Expect files in xml format. but get {}'.format(file.split('.')[1]))", "def _build_final_image(self, image):\n raise NotImplementedError", "def collate_fn_pad_image_only(batch):\n output = {\n 'id': [],\n 'label': {\n 'intent': [],\n 'semiotic': [],\n 'contextual': [],\n },\n 'image': [],\n }\n\n for sample in batch:\n output['id'].append(sample['id'])\n output['label']['intent'].append(sample['label']['intent'])\n output['label']['semiotic'].append(sample['label']['semiotic'])\n output['label']['contextual'].append(sample['label']['contextual'])\n output['image'].append(sample['image'])\n\n output['label']['intent'] = torch.LongTensor(output['label']['intent'])\n output['label']['semiotic'] = torch.LongTensor(output['label']['semiotic'])\n output['label']['contextual'] = torch.LongTensor(output['label']['contextual'])\n output['image'] = torch.stack(output['image'], dim=0)\n return output", "def decompress(args):\n # Three integers for tensor shapes + nine encoded strings.\n np_dtypes = [np.integer] * 3 + [np.bytes_] * 9\n with open(args.input_file, \"rb\") as f:\n packed = tfc.PackedTensors(f.read())\n arrays = packed.unpack_from_np_dtypes(np_dtypes)\n\n # Build model and restore optimized parameters.\n model = CompressionModel(args)\n checkpoint = tf.train.Checkpoint(model=model)\n restore_path = tf.train.latest_checkpoint(args.checkpoint_dir)\n checkpoint.restore(restore_path)\n curr_decoded = model.decompress(arrays)\n row=int(args.input_file.split('/')[-1].split('.')[0])\n\n # Write reconstructed images out as PNG files.\n for col in range(np.shape(curr_decoded)[1]):\n img = curr_decoded[0,col,:,:,:]/255\n save_img(args.output_file,0,img,row,col+1)", "def label2img2(label):\n buff = F.argmax(label, axis = 1)\n buff = F.vstack((buff, buff, buff))\n\n buff.data[0][buff.data[0] == 0] = 255\n buff.data[1][buff.data[1] == 0] = 0\n buff.data[2][buff.data[2] == 0] = 0\n\n buff.data[0][buff.data[0] == 1] = 0\n buff.data[1][buff.data[1] == 1] = 255\n buff.data[2][buff.data[2] == 1] = 0\n\n buff.data[0][buff.data[0] == 2] = 0\n buff.data[1][buff.data[1] == 2] = 0\n buff.data[2][buff.data[2] == 2] = 255\n\n return buff.data.astype(np.uint8)", "def collate_fn_pad_image_text(batch):\n output = {\n 'id': [],\n 'label': {\n 'intent': [],\n 'semiotic': [],\n 'contextual': [],\n },\n 'caption': [],\n 'image': [],\n }\n\n for sample in batch:\n output['id'].append(sample['id'])\n output['label']['intent'].append(sample['label']['intent'])\n output['label']['semiotic'].append(sample['label']['semiotic'])\n output['label']['contextual'].append(sample['label']['contextual'])\n output['caption'].append(sample['caption'])\n output['image'].append(sample['image'])\n\n output['label']['intent'] = torch.LongTensor(output['label']['intent'])\n output['label']['semiotic'] = torch.LongTensor(output['label']['semiotic'])\n output['label']['contextual'] = torch.LongTensor(output['label']['contextual'])\n output['caption'] = torch.nn.utils.rnn.pad_sequence(output['caption']).t() # (batch_size, sequence_length)\n output['image'] = torch.stack(output['image'], dim=0)\n return output", "def decode_labels(mask, num_images=1, num_classes=20):\n\tn, h, w = mask.shape\n\tassert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)\n\toutputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n\tfor i in range(num_images):\n\t img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))\n\t pixels = img.load()\n\t for j_, j in enumerate(mask[i, :, :]):\n\t\t for k_, k in enumerate(j):\n\t\t\t if k < num_classes:\n\t\t\t\t pixels[k_,j_] = label_colours[k]\n\t outputs[i] = np.array(img)\n\treturn outputs", "def preprare_Xy(directory, img_dim, names):\n # Create empty array for images, with dimensions to which all images will be resized and 3 color channels\n X = np.empty((0, img_dim, img_dim, 3))\n # Create empty list for corresponding labels\n y = []\n \n # For each label name (artist)\n for name in names:\n # Get the paths of all images \n img_paths = glob.glob(os.path.join(directory, name, \"*.jpg\"))\n \n # For each image for the given artist, load the image and append image array and label\n for img_path in tqdm(img_paths):\n img = load_img(img_path, target_size=(img_dim,img_dim))\n img_array = np.array([img_to_array(img)])\n X = np.vstack([X, img_array])\n y.append(name)\n\n # Normalize images using min max regularisation\n X_scaled = (X - X.min())/(X.max() - X.min())\n \n # Binarize labels\n lb = LabelBinarizer()\n y_binary = lb.fit_transform(y)\n \n return X_scaled, y_binary", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def preprocess(self):\n meta_file_path = os.path.join(database_directory, 'data.txt')\n meta = pd.read_csv(meta_file_path, delimiter=' ', header=None)\n meta = meta[meta[0] != '45567.jpg'] # Corrupt image.\n meta.to_pickle(os.path.join(database_directory, 'meta.pkl'))\n for file_name in meta.iloc[:, 0].values:\n if file_name.endswith('.jpg'):\n file_path = os.path.join(database_directory, file_name)\n image = imageio.imread(file_path).astype(np.uint8)\n image = transform.resize(image, (self.preprocessed_image_size, self.preprocessed_image_size),\n preserve_range=True)\n image = image.transpose((2, 0, 1))\n np.save(file_path.replace('.jpg', '.npy'), image)", "def make_sub_data_train(data, config):\n sub_input_sequence = []\n sub_label_sequence = []\n\n\tfor scale in range(2,5):\t \n\n\t for i in range(len(data)):\n\n\t\t#input_, label_, = preprocess(data[i], config.scale) # do bicbuic only one scale\n\t\tinput_, label_, = preprocess(data[i], scale) # do bicbuic turn around all scale\n\t\n\t\tif len(input_.shape) == 3: # is color\n\t\t h, w, c = input_.shape\n\t\telse:\n\t\t h, w = input_.shape # is grayscale\n\t\n\t\t#checkimage(input_)\t\t\n\n\t\tnx, ny = 0, 0\n\t\tfor x in range(0, h - config.image_size + 1, config.stride):\n\t\t nx += 1; ny = 0\n\t\t for y in range(0, w - config.image_size + 1, config.stride):\n\t\t\tny += 1\n\n\t\t\tsub_input = input_[x: x + config.image_size, y: y + config.image_size] # 41 * 41\n\t\t\tsub_label = label_[x: x + config.label_size, y: y + config.label_size] # 41 * 41\n\n\n\t\t\t# Reshape the subinput and sublabel\n\t\t\tsub_input = sub_input.reshape([config.image_size, config.image_size, config.c_dim])\n\t\t\tsub_label = sub_label.reshape([config.label_size, config.label_size, config.c_dim])\n\n\t\t\t# Normialize\n\t\t\tsub_input = sub_input / 255.0\n\t\t\tsub_label = sub_label / 255.0\n\t\t\t\n\t\t\t#cv2.imshow(\"im1\",sub_input)\n\t\t\t#cv2.imshow(\"im2\",sub_label)\n\t\t\t#cv2.imshow(\"residual\",sub_input - sub_label)\n\t\t\t#cv2.waitKey(0)\n\n\t\t\t# Rotate 90,180,270\n\t\t\tfor angle in range(0,360,90):\t\n\t\t\t\tsub_input = rotate(sub_input,angle)\t\n\t\t\t\tsub_label = rotate(sub_label,angle)\t\n\t\t\n\t\t\t\t# Add to sequence\n\t\t\t\tsub_input_sequence.append(sub_input)\n\t\t\t\tsub_label_sequence.append(sub_label)\n\n\t\t\t\tcv2.imshow(\"im1\",sub_input)\n\t\t\t\tcv2.imshow(\"im2\",sub_label)\n\t\t\t\tcv2.imshow(\"residual\",sub_input - sub_label)\n\t\t\t\tcv2.waitKey(1)\n\t\t\t\t\n\n \n # NOTE: The nx, ny can be ignore in train\n return sub_input_sequence, sub_label_sequence, nx, ny", "def prepare_train_patches(images_path, labels_path, indices, patch_size, overlap, overlap_amount, aug_config):\n\n # Load images and labels\n images = extract_images(images_path, indices)\n labels = extract_images(labels_path, indices)\n\n # Get patches\n if overlap:\n image_patches = [patch for im in images for patch in patchify_overlap(im, patch_size, overlap_amount)]\n label_patches = [patch for label in labels for patch in patchify_overlap(label, patch_size, overlap_amount)]\n else:\n image_patches = [patch for im in images for patch in patchify(im, patch_size)]\n label_patches = [patch for label in labels for patch in patchify(label, patch_size)]\n \n if not aug_config:\n return image_patches, label_patches\n\n patches = zip(image_patches, label_patches)\n\n # Rotation needs to be applied on whole image\n if aug_config.do_rotation:\n images_rot = rotate_images(images, aug_config.rotation_angles)\n labels_rot = rotate_images(labels, aug_config.rotation_angles)\n\n for im, label in zip(images_rot, labels_rot):\n p = patchify_no_corner(im, label, patch_size, overlap, overlap_amount)\n image_patches.extend(p[0])\n label_patches.extend(p[1])\n\n # Flip each patch horizontally\n images_flipped = []\n labels_flipped = []\n if aug_config.do_flip:\n flip_hor = iaa.Fliplr(0.5).to_deterministic()\n flip_ver = iaa.Flipud(0.5).to_deterministic()\n images_flipped.extend(flip_hor.augment_images(image_patches))\n images_flipped.extend(flip_ver.augment_images(image_patches))\n labels_flipped.extend(flip_hor.augment_images(label_patches))\n labels_flipped.extend(flip_ver.augment_images(label_patches))\n\n image_patches.extend([im.copy() for im in images_flipped])\n label_patches.extend([im.copy() for im in labels_flipped])\n\n # For all the patches (even new ones), augment channels\n if aug_config.augment_channels:\n image_patches = augment_channels(image_patches, aug_config)\n\n return image_patches, label_patches", "def transform_images(img1,img2):", "def build_filler_images(self):", "def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im", "def preprocessing(fish_class):\n\n\t# encode fish_class into integer\n\tfish_label = fish_labels[fish_class]\n\t\n\t# return a list of image directories for each image\n\timg_handles = glob.glob(data_dir + fish_class + '/' + '*.jpg')\n\t\n\t# build an empty list to store each img as np.ndarray\n\timgs = []\n\t\n\t# build an empty list to store the encoded label for each image\n\tlabels = []\n\t\n\t# iterate through all images in the fish_class folder\n\tfor img_handle in img_handles:\n\t\n\t\t# read img as np.ndarray\n\t\timg = cv2.imread(img_handle)\n\t\t\n\t\t# resize it \n\t\tcv2.resize( img, (new_img_width, new_img_height)\n\t\timg = cv2.resize(img, new_img_size, interpolation=cv2.INTER_CUBIC)\n\t\tstore the img in format of np.ndarray into the imgs \n\t\timgs.append(img)\n\t\t\n\t\t# store a label in labels\n\t\tlabels.append(fish_label)\n\t\n\treturn imgs, labels\n\n# time the preprocessing\nt0 = time.time()\n\n# build an empty list to store preprocessed data\npreprocessed_data = []\n\n# build an empty list to store labels\nencoded_labels = []\n\nfor num, fish in enumerate(classes):\n\tprint num\n\tprint 'Preprocessing imgs of fish %s' %(fish)\n\tprint '----------------------------------------------------------------------'\n\tpreprocessed_imgs, labels = preprocessing(fish)\n\tpreprocessed_data.append(preprocessed_imgs)\n\tencoded_labels.append(labels)", "def prepare_bubble(path): \n img = load_ipl_as_array(path)\n img = gray_bg(img)\n return img", "def encode_image(text_to_encode, template_image=\"images/template_image.jpg\"):\n raw_image = Image.open(template_image)\n hidden_message = write_text(text_to_encode,raw_image.size)\n\n x_size = raw_image.size[0]\n y_size = raw_image.size[1]\n\n red_channel = raw_image.split()[0]\n green_channel = raw_image.split()[1]\n blue_channel = raw_image.split()[2]\n # get all channels from raw_image\n encoded_image = Image.new(\"RGB\", raw_image.size)\n\n for x in range(x_size):\n for y in range(y_size):\n hidden_pixel = hidden_message.getpixel((x, y))\n\n encoded_red_pixel = red_channel.getpixel((x, y))\n if (hidden_pixel == (255, 255, 255)):\n red_channel_pixel = red_channel.getpixel((x, y))\n red_binary = bin(red_channel_pixel)\n red_binary = red_binary[:-1] + \"1\"\n # change the last binary value\n encoded_red_pixel = int(red_binary,2)\n # covert binary back to int\n\n else: # if pixel doesnt = white, that means theres no value, set last binary = 0\n red_channel_pixel = red_channel.getpixel((x, y))\n red_binary = bin(red_channel_pixel)\n red_binary = red_binary[:-1] + \"0\"\n encoded_red_pixel = int(red_binary,2)\n\n encoded_rgb = (encoded_red_pixel,\n green_channel.getpixel((x, y)),\n blue_channel.getpixel((x, y)))\n\n encoded_image.putpixel((x, y), encoded_rgb)\n encoded_image.save(\"images/hidden_message_image.png\")", "def transform(self, previousimage):", "def unpack_data(imagefile, labelfile):\n\t# Open the images with gzip in read binary mode\n\timages = open(imagefile, 'rb')\n\tlabels = open(labelfile, 'rb')\n\t# Read the binary data\n\t# We have to get big endian unsigned int. So we need '>I'\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0]\n\n\tif number_of_images != N:\n\t\traise Exception('number of labels did not match the number of images')\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t\tif i % 1000 == 0:\n\t\t\tprint(\"i: %i\" % i)\n\t\tfor row in range(rows):\n\t\t\tfor col in range(cols):\n\t\t\t\ttmp_pixel = images.read(1) # Just a single byte\n\t\t\t\ttmp_pixel = unpack('>B', tmp_pixel)[0]\n\t\t\t\tx[i][row][col] = tmp_pixel\n\t\ttmp_label = labels.read(1)\n\t\ty[i] = unpack('>B', tmp_label)[0]\n\treturn x, y", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def read_gz(images,labels):\n\t# Open the images with gzip in read binary mode\n\t# images = gzip.open('../MNIST-data/train-images-idx3-ubyte.gz', 'rb')\n\t# labels = gzip.open('../MNIST-data/train-labels-idx1-ubyte.gz', 'rb')\n\n\t# Read the binary data\n\n\t# We have to get big endian unsigned int. So we need '>I'\n\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]#28\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]#28\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0] #60000\n\t# print(number_of_images);\n\n\tif number_of_images != N:\n\t raise Exception('number of labels did not match the number of images')\n\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array #60000X28X28\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t if i % 1000 == 0:\n\t print(\"i: %i\" % i)\n\t for row in range(rows):\n\t for col in range(cols):\n\t tmp_pixel = images.read(1) # Just a single byte\n\t tmp_pixel = unpack('>B', tmp_pixel)[0]\n\t x[i][row][col] = tmp_pixel\n\t tmp_label = labels.read(1)\n\t y[i] = unpack('>B', tmp_label)[0]\n\t # print(y.shape)#60000X1\n\treturn (x, y)", "def __getitem__(self, index):\n img_name = self.files[self.split][index]\n msk_name = img_name.replace(\".bmp\", \".png\")\n\n image_path = os.path.join(self.root, self.split, img_name)\n label_path = os.path.join(self.root, self.split, msk_name)\n\n assert os.path.exists(os.path.join(label_path)), \\\n \"> Corresponding Mask: {} do not exist!!!\".format(msk_name)\n\n image = misc.imread(image_path)\n image = np.array(image, dtype=np.uint8)\n\n # image = Image.fromarray(image, mode='RGB')\n\n # bright_enhancer = ImageEnhance.Brightness(image)\n # image = bright_enhancer.enhance(1.25)\n #\n # con_enhancer = ImageEnhance.Contrast(image)\n # image = con_enhancer.enhance(1.75)\n\n # sharp_enhancer = ImageEnhance.Sharpness(image)\n # image = sharp_enhancer.enhance(2.25)\n\n # image = image.filter(ImageFilter.EMBOSS)\n\n # image = np.array(image, dtype=np.uint8)\n image = image[:, :, ::-1] # From RGB to BGR\n\n # Histogram Equalization\n # image[:, :, 0] = cv2.equalizeHist(image[:, :, 0])\n # image[:, :, 1] = cv2.equalizeHist(image[:, :, 1])\n # image[:, :, 2] = cv2.equalizeHist(image[:, :, 2])\n\n label = misc.imread(label_path, mode=\"L\")\n label[label > 0] = 1\n label = np.array(label, dtype=np.uint8)\n\n # data augmentation used in training\n if self.aug_ext is not None:\n image = self.aug_ext(image)\n if self.augmentations is not None:\n image, label = self.augmentations(image, label)\n\n if self.is_transform:\n image = self.transform(image)\n\n image = image.transpose(2, 0, 1) # From HWC to CHW (For PyTorch we use N*C*H*W tensor)\n return torch.from_numpy(image).float(), torch.from_numpy(label).long()", "def relabel_gifti(atlas, background=BACKGROUND, offset=None):\n\n out = tuple()\n minval = 0\n for hemi in atlas:\n # get necessary info from file\n img = load_gifti(hemi)\n data = img.agg_data()\n labels = img.labeltable.labels\n lt = {v: k for k, v in img.labeltable.get_labels_as_dict().items()}\n\n # get rid of labels we want to drop\n if background is not None:\n for val in background:\n idx = lt.get(val, 0)\n if idx == 0:\n continue\n data[data == idx] = 0\n labels = [f for f in labels if f.key != idx]\n\n # reset labels so they're consecutive and update label keys\n data = _relabel(data, minval=minval, bgval=0)\n ids = np.unique(data)\n for n, i in enumerate(ids):\n labels[n].key = i\n minval = len(ids) - 1 if offset is None else int(offset) - 1\n\n # make new gifti image with updated information\n darr = nib.gifti.GiftiDataArray(data, intent='NIFTI_INTENT_LABEL',\n datatype='NIFTI_TYPE_INT32')\n labeltable = nib.gifti.GiftiLabelTable()\n labeltable.labels = labels\n img = nib.GiftiImage(darrays=[darr], labeltable=labeltable)\n out += (img,)\n\n return out", "def data_augmenter(image, label, shift, rotate, scale, intensity, flip):\n image2 = np.zeros(image.shape, dtype=np.float32)\n label2 = np.zeros(label.shape, dtype=np.int32)\n for i in range(image.shape[0]):\n # For each image slice, generate random affine transformation parameters\n # using the Gaussian distribution\n shift_val = [np.clip(np.random.normal(), -3, 3) * shift,\n np.clip(np.random.normal(), -3, 3) * shift]\n rotate_val = np.clip(np.random.normal(), -3, 3) * rotate\n scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale\n intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity\n\n # Apply the affine transformation (rotation + scale + shift) to the image\n row, col = image.shape[1:3]\n M = cv2.getRotationMatrix2D((row / 2, col / 2), rotate_val, 1.0 / scale_val)\n M[:, 2] += shift_val\n for c in range(image.shape[3]):\n image2[i, :, :, c] = ndimage.interpolation.affine_transform(image[i, :, :, c],\n M[:, :2], M[:, 2], order=1)\n\n # Apply the affine transformation (rotation + scale + shift) to the label map\n label2[i, :, :] = ndimage.interpolation.affine_transform(label[i, :, :],\n M[:, :2], M[:, 2], order=0)\n\n # Apply intensity variation\n image2[i] *= intensity_val\n\n # Apply random horizontal or vertical flipping\n if flip:\n if np.random.uniform() >= 0.5:\n image2[i] = image2[i, ::-1, :, :]\n label2[i] = label2[i, ::-1, :]\n else:\n image2[i] = image2[i, :, ::-1, :]\n label2[i] = label2[i, :, ::-1]\n return image2, label2", "def extract_signature(source_image):\n # read the input image\n img = source_image\n img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1] # ensure binary\n\n # connected component analysis by scikit-learn framework\n blobs = img > img.mean()\n blobs_labels = measure.label(blobs, background=1)\n # image_label_overlay = label2rgb(blobs_labels, image=img)\n\n fig, ax = plt.subplots(figsize=(10, 6))\n\n '''\n # plot the connected components (for debugging)\n ax.imshow(image_label_overlay)\n ax.set_axis_off()\n plt.tight_layout()\n plt.show()\n '''\n\n the_biggest_component = 0\n total_area = 0\n counter = 0\n average = 0.0\n for region in regionprops(blobs_labels):\n if (region.area > 10):\n total_area = total_area + region.area\n counter = counter + 1\n # print region.area # (for debugging)\n # take regions with large enough areas\n if (region.area >= 250):\n if (region.area > the_biggest_component):\n the_biggest_component = region.area\n\n average = (total_area/counter)\n print(\"the_biggest_component: \" + str(the_biggest_component))\n print(\"average: \" + str(average))\n\n # experimental-based ratio calculation, modify it for your cases\n # a4_constant is used as a threshold value to remove connected pixels\n # are smaller than a4_constant for A4 size scanned documents\n a4_constant = (((average/84.0)*250.0)+100)*1.5\n print(\"a4_constant: \" + str(a4_constant))\n\n # remove the connected pixels are smaller than a4_constant\n b = morphology.remove_small_objects(blobs_labels, a4_constant)\n # save the the pre-version which is the image is labelled with colors\n # as considering connected components\n plt.imsave('pre_version.png', b)\n\n # read the pre-version\n img = cv2.imread('pre_version.png', 0)\n # ensure binary\n img = cv2.threshold(img, 0, 255,\n cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n # save the the result\n # cv2.imwrite(\"output.png\", img)\n return img", "def compressImage(K):\n\tA = plt.imread('bird_small.png')\n\tm = A.shape[0] * A.shape[1]\n\tX = np.zeros((m, A.shape[2]))\n\n\t# build matrix X\n\tfor i in range(A.shape[0]):\n\t\tfor j in range(A.shape[1]):\n\t\t\tX[i + j * A.shape[0], :] = A[i, j, :]\n\n\tcentroids, idx = runKmeans(X, K)\n\n\tfor i in range(X.shape[0]):\n\t\tX[i, :] = centroids[int(idx[i]), :]\n\n\tcompressedImage = np.zeros(A.shape)\n\tfor i in range(A.shape[0]):\n\t\tfor j in range(A.shape[1]):\n\t\t\tcompressedImage[i, j, :] = X[i + j * A.shape[0], :]\n\n\tdisplayImage(compressedImage)", "def preprocess(path, path2 , scale):\n image = imread(path)\n label_ = imread(path2)\n\n #label_ = modcrop(label, scale)\n\n # Must be normalized\n input_ = image / 255.\n label_ = label_ / 255.\n\n #input_ = scipy.ndimage.interpolation.zoom(label_, (1./scale), prefilter=False)\n #input_ = scipy.ndimage.interpolation.zoom(input_, (scale/1.), prefilter=False)\n\n return input_, label_", "def testBinaryImage():\n ALIEN = \"0\"*8 + \"11011011\"*2 + \"0\"*8 + \"00001000\" + \\\n \"01000010\" + \"01111110\" + \"0\"*8\n # this function is imported from cs5png.py\n NUM_ROWS = 8\n NUM_COLS = 8\n binaryIm( ALIEN, NUM_COLS, NUM_ROWS )\n # that should create a file, binary.png, in this\n # directory with the 8x8 image...", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def __multilabel_processing(self):\n # read the raw dataset\n self.data['image_name'] = self.data['image_name'].map(lambda x: '{}.{}'.format(x, img_format))\n self.data['tags'] = self.data['tags'].map(lambda x: x.split())\n\n # create a df with the same number of rows as the dataset filled with the name of the unique values in tags\n label_names = self.data['tags'].explode().unique().tolist()\n label_df = pd.DataFrame([label_names] * self.data.shape[0], columns=label_names)\n\n # binarize the labels according to if they exist for each image or not\n self.data = pd.concat([self.data, label_df], axis=1)\n self.data[['image_name'] + label_names] = self.data.apply(lambda x: pd.Series([x[0]] + [1 if label in x[1] else 0 for label in x[2:]]), axis=1)", "def prepare_output(self):\n self.output = np.copy(self.image)\n self._highlight_qr_codes()\n self._write_overlay_info()", "def labels_to_cityscapes_palette(image):\n classes=ZHANG_classes \n result =np.zeros((img.shape[0], img.shape[1], 3),dtype=np.uint8)\n for key, value in classes.items():\n result[np.where(img == key)] = value\n return result", "def AA2Image(readpath, savepath, header, font_data):\n if not os.path.isdir(savepath):\n os.makedirs(savepath)\n print('convert txt to png. save path: ', savepath)\n\n files = glob.glob(readpath+'*.txt')\n\n for file in files:\n ascii_art = AsciiArt(file)\n ascii_art_image = ascii_art.image(font_data)\n filename = header + os.path.basename(file)[:-4] + '.png'\n ascii_art_image = Image.fromarray(ascii_art_image)\n ascii_art_image = ascii_art_image.convert('L')\n ascii_art_image.save(savepath + filename)\n print('saved ', filename)", "def createDataset(sources,output,labels,sparse):\n global has_joblib\n out_path = str(output)\n # delete the output file\n if os.path.exists(os.path.abspath(out_path)):\n os.remove(os.path.abspath(out_path))\n \n # first, list the source files\n fpaths_src, fnames_src = utils.listFiles(directory=os.path.abspath(sources), ext='png')\n \n label_map={}\n \n # read the label file\n if not (labels == None):\n label_map = utils.readLabelMap(labels)\n # check that the numbers match\n print(\"Number of images in label map : %s\"%str(len(label_map.keys())-1))\n print(\"Number of images in source dir: %s\"%str(len(fpaths_src)))\n assert len(label_map.keys())-1 == len(fpaths_src)\n \n # generate KNN classifier\n if not (args.codebook == 'None' or args.codebook == None):\n args.knn = getKNNClassifier() \n else:\n args.knn = None\n \n # precompute number of images\n n_imgs = len(fpaths_src)\n \n # preallocate array\n # if augmentation, calculate (9*4+1)*n samples\n all_features_list = []\n \n # parallel implementation (default, if joblib available)\n if has_joblib:\n image_features = Parallel(n_jobs=args.njobs,verbose=5) (delayed(processImage)(fpaths_src, label_map, fnames_src, img_idx) for img_idx in range(n_imgs))\n # collect all images into a single matrix\n image_features = np.concatenate(image_features, axis=0)\n all_features_list.append(image_features)\n else:\n for img_idx in xrange(n_imgs):\n image_features = processImage(fpaths_src, label_map, fnames_src, img_idx)\n all_features_list.append(image_features)\n \n # make a 2D matrix from the list of features (stack all images vertically)\n feat_matrix = np.concatenate(all_features_list, axis=0).astype(np.float32) \n \n # do scaling of each feature dimension \n #if False:\n if not (args.scale == 0):\n print \"Scaling data...\"\n \n # preserve the labels\n label_vec = feat_matrix[:,0]\n feat_matrix = np.delete(feat_matrix,0,1)\n \n featurestats = np.zeros((2,feat_matrix.shape[1]))\n \n # use soft-normalization (zero-mean, unit var whitening)\n if (args.scale == 1):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # featurestats contains 2 rows, first row = mean, second row = std\n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # use hard-normalization \n elif (args.scale == 2):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # the featurestats contains 2 rows, first row = min, second row = max \n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # normalize each feature dimension\n for feat_idx in xrange(feat_matrix.shape[1]):\n feat_vec = feat_matrix[:,feat_idx]\n \n # soft-normalization (zero-mean, approx. unit variance)\n if (args.scale == 1): \n # if feature statistics are specified\n if not (args.featurestats == None):\n feat_mean = featurestats[0,feat_idx]\n feat_std = featurestats[1,feat_idx]\n else:\n # compute them from the data\n feat_mean = feat_vec.mean()\n feat_std = (feat_vec.std() + 1e-10)\n # store them \n featurestats[0,feat_idx] = feat_mean\n featurestats[1,feat_idx] = feat_std\n \n # shift to zero mean and (unit) variance\n feat_vec_scaled = (feat_vec - feat_mean) / (1.*feat_std)\n \n \n # hard-normalization (min/max = borders estimated from the (training) dataset)\n elif (args.scale == 2):\n if not (args.featurestats == None):\n feat_min = featurestats[0,feat_idx]\n feat_max = featurestats[1,feat_idx]\n else:\n # compute them freshly\n feat_min = np.min(feat_vec)\n feat_max = np.max(feat_vec)\n # store them \n featurestats[0,feat_idx] = feat_min\n featurestats[1,feat_idx] = feat_max\n \n # standardize/normalize between 0 and 1\n feat_vec_std = (feat_vec - feat_min) / (feat_max - feat_min + 1e-10) \n \n # linearly scale between -1 and 1 \n feat_vec_scaled = (1.0*feat_vec_std * (1 - -1)) - 1\n \n \n # set column back to matrix\n feat_matrix[:,feat_idx] = feat_vec_scaled\n \n # finally prepend the label_vec again\n feat_matrix = np.concatenate((np.reshape(label_vec,(feat_matrix.shape[0],1)),feat_matrix), axis=1)\n \n print \"Done.\"\n else:\n print \"Data may not be properly scaled, use the 'svm-scale' implementation of libsvm.\"\n \n if not (args.savefeaturestats == None):\n saveFeatureStats(featurestats) \n\n #Parallel(n_jobs=args.njobs, verbose=5)(delayed(function)(params) for i in range(10))\n # open the output file\n output_file = open(os.path.abspath(out_path), 'wb')\n\n # run through the feature matrix \n print \"Writing %s rows and %s cols to file...\"%(feat_matrix.shape)\n # parallel implementation (default, if joblib available)\n if has_joblib:\n lines = Parallel(n_jobs=args.njobs, verbose=5)(delayed(writeLine)(i, feat_matrix) for i in range(feat_matrix.shape[0]))\n output_file.writelines(lines) \n else:\n for i in xrange(feat_matrix.shape[0]):\n line = writeLine(i, feat_matrix)\n output_file.writelines(line)\n \n output_file.close()\n \n return 0", "def process(self, image):", "def colour_code_segmentation(image, label_values):\r\n\r\n # w = image.shape[0]\r\n # h = image.shape[1]\r\n # x = np.zeros([w,h,3])\r\n # colour_codes = label_values\r\n # for i in range(0, w):\r\n # for j in range(0, h):\r\n # x[i, j, :] = colour_codes[int(image[i, j])]\r\n label_values = [label_values[key] for key in label_values]\r\n colour_codes = np.array(label_values)\r\n x = colour_codes[image.astype(int)]\r\n\r\n return x", "def writencb(master):\r\n \r\n fname = path.join(path.dirname(master.hdrfile), master.stackfname.get())\r\n hdrfile = master.hdrfile\r\n rawstack = master.data.rawstack\r\n energies = master.data.energies\r\n \r\n with open(hdrfile, 'r') as hdr:\r\n ln = hdr.readline()\r\n \r\n while ln[:12] != '{ CentreXPos':\r\n ln = hdr.readline()\r\n params = ln.split(';')\r\n \r\n xdim = float(params[2][9:])\r\n ydim = float(params[3][9:])\r\n xnpx = int(params[6][10:])\r\n ynpx = int(params[7][10:])\r\n \r\n ringcurrent = []\r\n imgids = []\r\n \r\n hdr.readline()\r\n while ln:\r\n ln = hdr.readline()\r\n if ln[:5] == 'Image':\r\n params = ln.split('; ')\r\n imgids.append(params[0][5:8])\r\n ringcurrent.append(float(params[0][-6:]))\r\n \r\n imgnames = [hdrfile[:-4] + '_a' + n + '.xim' for n in imgids]\r\n print(len(imgnames), len(ringcurrent))\r\n # normalize images to ring current 500.0\r\n for i in range(len(rawstack)):\r\n rawstack[i] *= 500.0\r\n rawstack[i] /= ringcurrent[i]\r\n \r\n alldata = np.array([])\r\n for im in rawstack:\r\n alldata = np.append(alldata, np.ndarray.flatten(im[::-1]))\r\n \r\n # check max counts. if possible, scale up by power of 10,\r\n # but keep the max counts below 32768 so that output can\r\n # be stored as 16-bit integers. if max counts are above\r\n # 32768, scale down by power of 10.\r\n \r\n m = np.max(alldata)\r\n scaleexp = np.log10(32767.0/m)\r\n if scaleexp > 0:\r\n scaleexp = int(scaleexp)\r\n else:\r\n scaleexp = int(scaleexp - 1)\r\n scale = (10.0)**scaleexp\r\n \r\n alldata *= scale\r\n \r\n # write .ncb -- just a list of 16-bit integers\r\n (alldata.astype('int16')).tofile(fname)\r\n \r\n # write .dat file associated with .ncb. The pattern is:\r\n #\r\n # [# of pixels in x] [# of pixels in y] [count scale factor]\r\n # 0.00000 [x_width in microns]\r\n # 0.00000 [y_width in microns]\r\n # [# of images]\r\n # [energy, image 1 ]\r\n # [energy, image 2 ]\r\n # [ ... ]\r\n # [energy, last image]\r\n #[filename, image 1] [energy, image 1 ] 2.00 (<--- not sure what the function of this is!)\r\n #[filename, image 2] [energy, image 2 ] 2.00\r\n #[ ... ] 2.00\r\n #[filename, last image] [energy, last image] 2.00\r\n #\r\n \r\n datfile = open(fname[:-3] + 'dat', 'w')\r\n\r\n datfile.write(' '*(12 - len(str(xnpx))) + str(xnpx))\r\n datfile.write(' '*(12 - len(str(ynpx))) + str(ynpx))\r\n datfile.write(' '*(13 - len(str(scale))) + str(scale))\r\n datfile.write('\\n')\r\n \r\n datfile.write(' 0.000000')\r\n datfile.write(' '*(13 - len(str(xdim))) + str(xdim) + '\\n')\r\n datfile.write(' 0.000000')\r\n datfile.write(' '*(13 - len(str(ydim))) + str(ydim) + '\\n')\r\n \r\n datfile.write(' '*(12 - len(str(len(rawstack)))) + str(len(rawstack)) + '\\n')\r\n \r\n for i in range(len(rawstack)):\r\n datfile.write(' '*(13 - len(str(energies[i]))) + str(energies[i]) + '\\n')\r\n\r\n for i in range(len(rawstack)):\r\n datfile.write(imgnames[i][-21:] + ' ' + str(energies[i]))\r\n if str(energies[i])[-2] == '.':\r\n datfile.write('0')\r\n datfile.write(' 2.00\\n')\r\n \r\n datfile.close()\r\n \r\n master.filedisp.set('Wrote stack file to ' + fname)", "def process_images(path, dataset):\n \n print(f\"Processing images {os.path.join(path, dataset)}\", flush=True)\n label_file = os.path.join(path, dataset + '-labels-idx1-ubyte')\n with open(label_file, 'rb') as file:\n _, num = struct.unpack(\">II\", file.read(8))\n labels = numpy.fromfile(file, dtype=numpy.int8) #int8\n new_labels = numpy.zeros((num, 10))\n new_labels[numpy.arange(num), labels] = 1\n\n img_file = os.path.join(path, dataset + '-images-idx3-ubyte')\n with open(img_file, 'rb') as file:\n _, num, rows, cols = struct.unpack(\">IIII\", file.read(16))\n imgs = numpy.fromfile(file, dtype=numpy.uint8).reshape(num, rows, cols) #uint8\n imgs = imgs.astype(numpy.float32) / 255.0\n\n os.remove(label_file); os.remove(img_file)\n print(f\"Saving files under {os.path.join(path, dataset)} path\", flush=True)\n numpy.savez_compressed(os.path.join(path, dataset), imgs=imgs, labels=labels)", "def __init__(self, image_filename, labels_filename, size):\n self._image = None\n self._labels = None\n self._image_metadata = None\n self._labels_metadata = None\n self._size = None\n self._offset = None\n self._labeled_indices = None # initialized in __iter__()\n self._count = None # initialized in __iter__()\n self._max_iter = None # initialized in __iter__()\n # assert valid files\n assert os.path.exists(image_filename), (\"image file not found\")\n assert os.path.exists(labels_filename), (\"labels file not found\")\n # assert equal metadata \n image_metadata = raster.Metadata(image_filename)\n labels_metadata = raster.Metadata(labels_filename) \n assert image_metadata == labels_metadata, (\n \"Metadata are not equivalent. \" + \n \"Try `gdalinfo` on the files. \" + \n \"Look at the docstring for `raster.Metadata.__eq__()`.\")\n assert labels_metadata.ndv is not None, (\n \"labels metadata ndv is None\")\n self._image_metadata = image_metadata\n self._labels_metadata = labels_metadata\n # asserts on image and labels np.ndarrays\n image = raster.load_image(image_filename)\n labels = raster.load_image(labels_filename)\n assert isinstance(image, np.ndarray), (\n \"image must be a numpy.ndarray\")\n assert len(image.shape) == 3, (\n \"image must be an numpy.ndarray with shape (H,W,D)\")\n assert isinstance(labels, np.ndarray), (\n \"labels must be a numpy.ndarray\")\n assert len(labels.shape) == 3, (\n \"lables must be an numpy.ndarray with shape (H,W,D)\")\n # test if shape of both is equal on H,W axes\n assert image.shape[0] == labels.shape[0], (\n \"Image and label height is different\")\n assert image.shape[1] == labels.shape[1], (\n \"Image and label height is different\") \n self._image = image \n self._labels = labels\n # assert on size\n assert isinstance(size, int), (\"size must be an integer\")\n assert size % 2 == 1, (\"size must be an odd integer\")\n assert size > 1, (\"size must be an integer >1\")\n self._size = size\n self._offset = self.size // 2", "def encode(image):\n from encoder import launch\n launch(image)", "def __init__(self, image_root, label_root, img_x, img_y):\n self.images_path = image_root\n self.labels_path = label_root\n self.data_len = 0\n self.images = []\n self.labels = open(self.labels_path, \"r\").readlines()\n self.transform = transforms.Compose([\n transforms.Resize((img_x, img_y)), \n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n for file in self.labels:\n self.data_len += 1\n tem = file.split(\" \")[0]\n temp = tem.split(\"-\")\n self.images.append(self.images_path + temp[0] + '/' + temp[0] + \"-\" + temp[1] + \"/\" + tem + \".png\")", "def ascii_to_tiff(infile, outfile, refIm):", "def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map", "def compress_image(filename = \"hubble.jpg\", s = 2):\n \n image = imread(filename)/255\n \n toShow = plt.subplot(121)\n \n #Color\n if len(np.shape(image)) == 3:\n #Set up original RGB values\n R = np.array(image[:,:,0])\n G = np.array(image[:,:,1])\n B = np.array(image[:,:,2])\n \n R, errR = svd_approx(R,s)\n G, errG = svd_approx(G,s)\n B, errB = svd_approx(B,s)\n imageF = np.dstack((R,G,B))\n err = errR + errG + errB\n toShow.imshow(imageF)\n toShow.set_title(\"New \" + str(err) + \", so \" + str((image.size-err)) + \" \\\"saved\\\"\")\n toShow.axis(\"off\")\n \n toShow = plt.subplot(122)\n toShow.set_title(\"Original \" + str(image.size))\n toShow = plt.imshow(image)\n #Gray\n else:\n imageF, err = svd_approx(image, s)\n# print(np.shape(imageF))\n toShow.imshow(imageF, cmap = \"gray\")\n toShow.set_title(\"New \" + str(err) + \", so \" + str((image.size-err)) + \" \\\"saved\\\"\")\n toShow.axis(\"off\")\n toShow = plt.subplot(122)\n toShow.set_title(\"Original \" + str(image.size))\n toShow = plt.imshow(image, cmap = \"gray\")\n \n plt.suptitle(\"MY PLOTS: \" + str((image.size-err)) + \" \\\"saved\\\" :p\")\n \n return\n raise NotImplementedError(\"Problem 5 Incomplete\")", "def assemble_image(self, raw_image):\n \n # set up the raw image and the assembled template\n if not raw_image.shape == (32,185,388):\n raise ValueError('`raw_image` must have shape (32,185,388), got '\n '%s' % str(raw_image.shape))\n \n # for some reason, bool types don't work. Make them ints\n if raw_image.dtype == np.bool:\n raw_image = raw_image.astype(np.int32)\n \n bounds = 2000 # JAS: total image range is 2000, ensures beam center is at (1000,1000)\n assembled_image = np.zeros((bounds, bounds), dtype=raw_image.dtype)\n \n bg = self.to_basisgrid()\n\n # iterate over quads\n pixel_size = 109.920\n for quad_index in range(4):\n for two_by_one in range(8):\n\n asic_idx = quad_index * 16 + two_by_one * 2 # add one for 2nd asic\n \n # assemble the 2x1 -- insert a 3 px gap\n gap = np.zeros( (185,3), dtype=raw_image.dtype )\n two_by_one_img = np.hstack( (raw_image[quad_index*8+two_by_one,:,:194], gap, \n raw_image[quad_index*8+two_by_one,:,194:]) )\n \n # flip x data to conform w/CXI convention\n #two_by_one_img = two_by_one_img[::-1,:]\n \n # note that which dim is x changes w/two_by_one and quad_index\n # here the rotation is off between dtc/cspad by 180 in some quads\n # JAS: updated rotation to asic_rot - 180 instead of -asic_rot \n # to get proper rotation of asics in assembled image\n p, s, f, shape = bg.get_grid(asic_idx)\n theta = arctan3(f[1], f[0]) * (360. / (np.pi * 2.0))\n\n two_by_one_img = interp.rotate(two_by_one_img,\n theta - 180,\n output=two_by_one_img.dtype,\n reshape=True)\n \n # find the center of the 2x1 in space\n corners0 = bg.get_grid_corners(asic_idx)\n corners1 = bg.get_grid_corners(asic_idx + 1)\n \n # un-swap x-axis and re-swap below -- necessary b/c now we\n # have data in two_by_one_img that needs swap\n corners0[:,0] = -corners0[:,0]\n corners1[:,0] = -corners1[:,0]\n \n center = ( np.concatenate([corners0[:,0], corners1[:,0]]).mean(),\n np.concatenate([corners0[:,1], corners1[:,1]]).mean() )\n\n # find the bottom left corner (note x is cols, so swap inds)\n c = (center[0] / pixel_size - two_by_one_img.shape[1] / 2.,\n center[1] / pixel_size - two_by_one_img.shape[0] / 2.,)\n \n # the assembled image center will be at 1000, 1000 by convention\n cs = int(round(c[0])) + 1000\n rs = int(round(c[1])) + 1000\n\n if (rs < 0) or (rs+two_by_one_img.shape[0] > bounds):\n raise ValueError('rs: out of bounds in rows. CSPAD geometry '\n 'extends beyond 2000 x 2000 grid it is '\n 'assembled on. It is likely that your CSPAD '\n 'geometry is wacky in some respect -- use '\n '`sketch` method to check.')\n if (cs < 0) or (cs+two_by_one_img.shape[1] > bounds):\n raise ValueError('cs: out of bounds in cols. CSPAD geometry '\n 'extends beyond 2000 x 2000 grid it is '\n 'assembled on. It is likely that your CSPAD '\n 'geometry is wacky in some respect -- use '\n '`sketch` method to check.')\n \n assembled_image[rs:rs+two_by_one_img.shape[0],\n cs:cs+two_by_one_img.shape[1]] += two_by_one_img\n \n # swap x-axis to conform to CXI convention\n #assembled_image = assembled_image[:,::-1]\n \n return assembled_image", "def convert_im(im, tree, lbls, block_imgs):\n h, w, _ = im.shape\n step = 16\n for r in range(0, h, step):\n for c in range(0, w, step):\n rnext = min(r+step, h)\n cnext = min(c+step, w)\n patch = im[r:rnext, c:cnext]\n color = np.average(patch, axis=(0, 1))\n\n # Get closest block\n _, ind = tree.query([color], k=1)\n lbl = lbls[ind[0][0]]\n block = block_imgs[lbl]\n\n # Copy values\n rmax = rnext-r\n cmax = cnext-c\n im[r:rnext, c:cnext] = block[:rmax, :cmax]", "def preprocess(path, scale=3):\n image = imread(path, is_grayscale=True)\n label_ = modcrop(image, scale)\n\n # Must be normalized\n \n label_ = label_ / 255.\n \n\n\n input_ = scipy.ndimage.interpolation.zoom(label_, (1. / scale), prefilter=False)\n input_ = scipy.ndimage.interpolation.zoom(input_, (scale / 1.), prefilter=False)\n\n return input_, label_", "def encode_bboxes(ann, bboxes, img_name):\n\n ann_root = ann.getroot()\n\n folder = ET.Element(\"folder\")\n folder.text = ann_root.find('folder').text\n filename = ET.Element(\"filename\")\n filename.text = img_name\n path = ET.Element(\"path\")\n path.text = ann_root.find('folder').text + '/' + img_name\n source = ET.Element(\"source\")\n database = ET.Element(\"database\")\n database.text = ann_root.find(\"source\").find('database').text\n source.append(database)\n size = ET.Element(\"size\")\n width = ET.Element(\"width\")\n width.text = ann_root.find(\"size\").find('width').text\n height = ET.Element(\"height\")\n height.text = ann_root.find(\"size\").find('height').text\n depth = ET.Element(\"depth\")\n depth.text = ann_root.find(\"size\").find('depth').text\n size.append(width)\n size.append(height)\n size.append(depth)\n segmented = ET.Element(\"segmented\")\n segmented.text = ann_root.find('segmented').text\n\n new_root = ET.Element(\"annotation\")\n new_root.append(folder)\n new_root.append(filename)\n new_root.append(path)\n new_root.append(source)\n new_root.append(size)\n new_root.append(segmented)\n\n for b in bboxes:\n xmin = ET.Element(\"xmin\")\n xmin.text = str(int(b[0]))\n ymin = ET.Element(\"ymin\")\n ymin.text = str(int(b[1]))\n xmax = ET.Element(\"xmax\")\n xmax.text = str(int(b[2]))\n ymax = ET.Element(\"ymax\")\n ymax.text = str(int(b[3]))\n name = ET.Element(\"name\")\n name.text = self.classes[int(b[4])]\n bndbox = ET.Element(\"bndbox\")\n bndbox.append(xmin)\n bndbox.append(ymin)\n bndbox.append(xmax)\n bndbox.append(ymax)\n pose = ET.Element(\"pose\")\n truncated = ET.Element(\"truncated\")\n difficult = ET.Element(\"difficult\")\n pose.text = \"Unspecified\"\n truncated.text = \"0\"\n difficult.text = \"0\"\n obj = ET.Element(\"object\")\n obj.append(name)\n obj.append(pose)\n obj.append(truncated)\n obj.append(difficult)\n obj.append(bndbox)\n\n new_root.append(obj)\n\n new_tree = ET.ElementTree(new_root)\n\n return new_tree", "def build_label_transform():\n\n return NALabelEncoder()", "def json2mask(txt, mattr, filepath):\n img = np.zeros((2048, 2448, 3),\n dtype=np.uint8)\n info = json.loads(txt)['codes']\n for code in info:\n barcode_area = (slice(code['y0'], code['y1']),\n slice(code['x0'], code['x1']), slice(0, 3))\n leny = barcode_area[0].stop - barcode_area[0].start\n lenx = barcode_area[1].stop - barcode_area[1].start\n img[barcode_area] = 1\n if leny * lenx > (2048 * 2448) / 16: # if barcodearea larger than a\n # 16th of the original image\n return None\n return img", "def imageprepare(image_data):\n im = Image.open(io.BytesIO(image_data))\n im = remove_transparency(im)\n im = im.resize((28,28))\n width = float(im.size[0])\n height = float(im.size[1])\n new_image = Image.new('L', (28, 28), 255) # creates white canvas of 28x28 pixels\n\n if width > height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if nheight == 0: # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n new_image.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if nwidth == 0: # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n new_image.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n # new_image = ImageOps.invert(new_image)\n\n tv = list(new_image.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n return tva, new_image", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def _handle_image_descriptors(self):\n while self.file_content[self.data_idx] == 0x2c:\n img_left = self.file_content[self.data_idx + 1] + \\\n (self.file_content[self.data_idx + 2] << 8)\n img_top = self.file_content[self.data_idx + 3] + \\\n (self.file_content[self.data_idx + 4] << 8)\n img_width = self.file_content[self.data_idx+5] + \\\n (self.file_content[self.data_idx + 6] << 8)\n #img_height = self.file_content[self.data_idx+7] + \\\n # (self.file_content[self.data_idx + 8] << 8)\n flags = self.file_content[self.data_idx + 9]\n local_col_table_flag = (flags & 0b10000000) != 0\n #interlace_flag = (flags & 0b01000000) != 0\n self.data_idx = self.data_idx + 10\n if local_col_table_flag:\n # read local color table\n print('read local color table. Not implemented yet')\n\n self.lzw_min_code_sz = self.file_content[self.data_idx]\n self.data_idx = self.data_idx + 1\n\n pix_xix = img_left\n pix_yix = img_top\n subblock_data = []\n while self.file_content[self.data_idx] != 0:\n subblock_sz = self.file_content[self.data_idx]\n self.data_idx = self.data_idx + 1\n subblock_data += self.file_content[self.data_idx:self.data_idx + subblock_sz]\n self.data_idx = self.data_idx + subblock_sz\n self.data_idx = self.data_idx + 1\n dec_data = self.decode_subblock(subblock_data)\n for dat in dec_data:\n self.output_image[pix_xix][pix_yix][0] = self.color_table[dat][0]\n self.output_image[pix_xix][pix_yix][1] = self.color_table[dat][1]\n self.output_image[pix_xix][pix_yix][2] = self.color_table[dat][2]\n pix_xix = pix_xix + 1\n if pix_xix == img_left + img_width:\n pix_xix = img_left\n pix_yix = pix_yix + 1" ]
[ "0.79495394", "0.7899799", "0.7884121", "0.7781928", "0.7687692", "0.60020334", "0.5997393", "0.5904447", "0.58416754", "0.5806961", "0.5723574", "0.5705346", "0.56988686", "0.56837463", "0.5680749", "0.5661601", "0.5639231", "0.55816334", "0.55626464", "0.555133", "0.5549232", "0.5540666", "0.5540165", "0.55375177", "0.5529249", "0.55179787", "0.5476792", "0.5471852", "0.54625255", "0.5459958", "0.5443208", "0.5435608", "0.5426546", "0.5415036", "0.5413503", "0.5410668", "0.54001164", "0.5386646", "0.53830963", "0.5382354", "0.53718597", "0.53619826", "0.5359853", "0.5359148", "0.53580767", "0.5353465", "0.53441185", "0.5343863", "0.5342226", "0.53318197", "0.5325037", "0.5320001", "0.53108966", "0.53023094", "0.52967185", "0.52945167", "0.5280433", "0.5267888", "0.52595335", "0.52592963", "0.52579004", "0.52544403", "0.5249228", "0.52439433", "0.52429944", "0.523679", "0.5223073", "0.5222025", "0.52141786", "0.5212515", "0.5211928", "0.52052915", "0.5205099", "0.52047604", "0.5196432", "0.5195197", "0.5192907", "0.51911217", "0.5179115", "0.5178108", "0.5177836", "0.51777935", "0.5172873", "0.5170565", "0.5164806", "0.5163263", "0.51584125", "0.51570296", "0.5156198", "0.5144639", "0.5143265", "0.51422375", "0.5142162", "0.51349455", "0.5133669", "0.5130943", "0.51190376", "0.5117461", "0.51170295", "0.51145685" ]
0.79031765
1
Custom easyconfig parameters for CrayPEToolchain
def extra_options(): extra_vars = { 'PrgEnv': [None, 'PrgEnv module to load, e.g., cray to load PrgEnv-cray, or None for automatic determination', CUSTOM], 'PrgEnv_load': [True, 'Load the PrgEnv module (if True) or just set the corresponding environment variable (if False)', CUSTOM], 'PrgEnv_family': [None, 'Declare to be a member of the PrgEnv family (if \'PrgEnv\), of the cpeToolchain family (if \'cpeToolchain\') or manually unload all known PrgEnv and cpe* modules (if None, needed when LMOD is not used)', CUSTOM], 'CPE_compiler': [None, 'Versionless compiler module to load, or None for automatic determination', CUSTOM], 'CPE_version': [None, 'Version of the CPE, if different from the version of the module', CUSTOM], 'CPE_load': [ 'first', 'First load the cpe module (if \'first\'), after the PrgEnv module (if \'after\'), load it at the end (if \'last\'), or do not load the cpe module (if None)', CUSTOM], 'cray_targets': [[], 'Targetting modules to load', CUSTOM], #'optional_example_param': [None, "Example optional custom parameter", CUSTOM], } return Bundle.extra_options(extra_vars)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def config( **kwargs ):", "def config():", "def config():", "def configuration():", "def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config", "def config(ctx):\n return", "def get_config(self):\n config = {\n 'multichannel': self._multichannel,\n 'complex_part': self._complex_part\n }\n base_config = super().get_config()\n return {**base_config, **config}", "def config(self, **kw):\n self.cfg_fixture.config(**kw)", "def configure(self, options, conf):", "def build_config(self, config):\n config.setdefaults('Makesmith Settings', {'COMport': 'COM5', 'xPitch': 20, 'openFile': \" \"})", "def customize_experiment_config(self, config):\n # TODO: use ConfigList from Coach launcher, and share customization code.\n hyperparams_dict = json.loads(os.environ.get(\"SM_HPS\", \"{}\"))\n\n # Set output dir to intermediate\n # TODO: move this to before customer-specified so they can override\n hyperparams_dict[\"rl.training.local_dir\"] = \"/opt/ml/output/intermediate\"\n\n self.hyperparameters = ConfigurationList() # TODO: move to shared\n for name, value in hyperparams_dict.items():\n # self.map_hyperparameter(name, val) #TODO\n if name.startswith(\"rl.\"):\n # self.apply_hyperparameter(name, value) #TODO\n self.hyperparameters.store(name, value)\n # else:\n # raise ValueError(\"Unknown hyperparameter %s\" % name)\n\n self.hyperparameters.apply_subset(config, \"rl.\")\n return config", "def configure_for_pokered(config=config):\n attrs = {\n \"version\": \"red\",\n\n \"map_dir\": os.path.join(config.path, 'maps/'),\n \"gfx_dir\": os.path.join(config.path, 'gfx/tilesets/'),\n \"to_gfx_name\": red_gfx_name,\n \"block_dir\": os.path.join(config.path, 'gfx/blocksets/'), # not used\n \"block_ext\": '.bst', # not used\n\n \"palettes_on\": False,\n\n \"constants_filename\": os.path.join(config.path, 'constants.asm'),\n\n \"time_of_day\": 1,\n }\n return attrs", "def configs(self):\n raise NotImplementedError()", "def default_kernel_config(defn):\n return [('beam', {}),\n ('hypers',\n {\n 'alpha_a': 4.0,\n 'alpha_b': 2.0,\n 'gamma_a': 3.0, \n 'gamma_b': 6.0\n }\n )]", "def _generate_config(self, type, org, node):\n args = {}\n if type == \"peer\":\n args.update({\"peer_id\": \"{}.{}\".format(node, org)})\n args.update({\"peer_address\": \"{}.{}:{}\".format(node, org, 7051)})\n args.update(\n {\"peer_gossip_externalEndpoint\": \"{}.{}:{}\".format(node, org, 7051)})\n args.update(\n {\"peer_chaincodeAddress\": \"{}.{}:{}\".format(node, org, 7052)})\n args.update({\"peer_tls_enabled\": True})\n args.update({\"peer_localMspId\": \"{}MSP\".format(org.capitalize())})\n\n a = NodeConfig(org)\n a.peer(node, **args)\n else:\n args.update({\"General_ListenPort\": 7050})\n args.update(\n {\"General_LocalMSPID\": \"{}OrdererMSP\".format(org.capitalize())})\n args.update({\"General_TLS_Enabled\": True})\n args.update({\"General_BootstrapFile\": \"genesis.block\"})\n\n a = NodeConfig(org)\n a.orderer(node, **args)", "def config(self):\n pass", "def config(self):\n pass", "def config(self) -> Dict[str, Any]:", "def magic_config(self,parameter_s=''):\n \n page('Current configuration structure:\\n'+\n pformat(self.rc.dict()))", "def config(self):\n raise NotImplementedError", "def cook_config(ext_config_filename):\n mc = base_model_config()\n with open(ext_config_filename, \"r\") as fp:\n ext_mc = edict(json.load(fp, encoding=\"utf8\"))\n for s in ext_mc.keys():\n mc[s] = ext_mc[s]\n # mc.ANCHOR_BOX = set_anchors(mc)\n # print(np.max(np.square(np.array(set_anchors_testing(mc)) - np.array(set_anchors(mc)))))\n # mc.ANCHORS = len(mc.ANCHOR_BOX)\n # H, W, C = _get_output_shape(mc)\n # mc.MODEL_OUTPUT_SHAPE = [H, W, mc.ANCHOR_PER_GRID]\n return mc", "def configure(self, section):", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):", "def get_config(self):\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config", "def build_configs():", "def _get_MindtPy_ECP_config():\n CONFIG = ConfigBlock('MindtPy-GOA')\n\n _add_common_configs(CONFIG)\n _add_ecp_configs(CONFIG)\n _add_oa_cuts_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_bound_configs(CONFIG)\n return CONFIG", "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }", "def config(self, parameter:str=\"config('ssid') for example\", mac: bytes|None = None, essid: str|None =None, ssid: str|None=None, channel: int|None =None, security: int|None=None, key: str|None=None, password: str|None=None, txpower: int|float|None =None) -> None:\n ...", "def config(self) -> NamedTuple:", "def build_config(self, config):\n \n config.setdefaults(\n 'Network', {'IP': '192.168.1.16', 'port': 8000}\n )\n config.setdefaults(\n 'Camera', {'ISO': 100, 'Shutter': 5000, 'Aperture': 4, 'Zoom': 45}\n )\n config.setdefaults(\n 'Admin', {'Logging Path': gs.AUVSI_BASE_FOLDER}\n )\n config.setdefaults(\n 'CV', {'image_rescaling': 0.25}\n )\n \n #\n # Disable multi touch emulation with the mouse.\n #\n from kivy.config import Config\n Config.set('input', 'mouse', 'mouse,disable_multitouch')", "def setup_config():\n global config\n config = modConfig.Config(cmdline.config)", "def configure_specie(self, specie):\r\n pass", "def component_configuration():\n return ProtocolConfig(\"a_protocol\", \"an_author\", \"0.1.0\")", "def create_config(self, context, mgmtport):\n pass", "def config(ctx):\n if not ctx.invoked_subcommand:\n cfg = ctx.obj['cfg']\n for section in cfg.sections():\n print(\"[\", section, \"]\")\n for option in cfg[section]:\n print(option, \" = \", cfg[section][option])", "def get_external_opts_configs(cls):\n return [\n ExternalOptConfig(\n name=\"auth_uri\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_user\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_password\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_tenant_name\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ]", "def setupOptions():\n define(\"conf\", default=\"\",help=\"path to configuration file\")\n define(\"DB_CACHE\", default=\"False\", help=\"Flag\")\n define(\"CELL_NAME\", default=\"beolink.org\", help=\"Default Cell\")\n\n afs.orm.DbMapper.setupOptions() \n return", "def cg_config():\n return {}", "def manage_config() -> dict:\n required_args = {\"embedding_size\", \"hidden_size\", \"num_layers\", \"corpus_dir\"}\n arg_groups = {\n \"general\": {\"recoding_type\"},\n \"model\": {\"embedding_size\", \"hidden_size\", \"num_layers\", \"dropout\"},\n \"train\": {\"weight_decay\", \"learning_rate\", \"batch_size\", \"num_epochs\", \"clip\", \"print_every\", \"eval_every\",\n \"model_save_path\", \"device\", \"model_name\"},\n \"logging\": {\"log_dir\"},\n \"corpus\": {\"corpus_dir\", \"max_seq_len\"},\n \"recoding\": {\"step_type\", \"num_samples\", \"mc_dropout\", \"prior_scale\", \"hidden_size\", \"weight_decay\",\n \"data_noise\", \"share_anchor\", \"use_cross_entropy\"},\n \"step\": {\"predictor_layers\", \"window_size\", \"step_size\", \"hidden_size\"}\n }\n argparser = init_argparser()\n config_object = ConfigSetup(argparser, required_args, arg_groups)\n config_dict = config_object.config_dict\n\n return config_dict", "def init_config(self):\n pass", "def pibooth_configure(cfg):", "def __init__(self):\n self.__parameters: ConfigParams = ConfigParams()", "def __configure(self):\n\n # CUDA\n if self.__cuda:\n if isinstance(self.__cuda, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-cuda={}'.format(self.__cuda))\n elif self.__toolchain.CUDA_HOME:\n self.__configure_opts.append(\n '--with-cuda={}'.format(self.__toolchain.CUDA_HOME))\n else:\n # Default location\n self.__configure_opts.append('--with-cuda=/usr/local/cuda')\n else:\n self.__configure_opts.append('--without-cuda')\n\n # GDRCOPY\n if self.__gdrcopy:\n if isinstance(self.__gdrcopy, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-gdrcopy={}'.format(self.__gdrcopy))\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.append('--with-gdrcopy')\n elif self.__gdrcopy == False:\n self.__configure_opts.append('--without-gdrcopy')\n\n # KNEM\n if self.__knem:\n if isinstance(self.__knem, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-knem={}'.format(self.__knem))\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.append('--with-knem')\n elif self.__knem == False:\n self.__configure_opts.append('--without-knem')\n\n # OFED\n if self.__ofed:\n if isinstance(self.__ofed, string_types):\n # Use specified path\n self.__configure_opts.extend(\n ['--with-verbs={}'.format(self.__ofed),\n '--with-rdmacm={}'.format(self.__ofed)])\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.extend(['--with-verbs', '--with-rdmacm'])\n elif self.__ofed == False:\n self.__configure_opts.extend(['--without-verbs',\n '--without-rdmacm'])\n\n # XPMEM\n if self.__xpmem:\n if isinstance(self.__xpmem, string_types):\n # Use specified path\n self.__configure_opts.append(\n '--with-xpmem={}'.format(self.__xpmem))\n else:\n # Boolean, let UCX try to figure out where to find it\n self.__configure_opts.append('--with-xpmem')\n elif self.__xpmem == False:\n self.__configure_opts.append('--without-xpmem')\n\n # Workaround for format warning considered an error on Power\n if hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:\n if not self.__toolchain.CFLAGS:\n self.__toolchain.CFLAGS = '-Wno-error=format'", "def config(self) -> \"AutomationConfig\":", "def config(self) -> \"AutomationConfig\":", "def configure(self, args):\n pass", "def extra_options():\n extra_vars = {\n 'auto_detect_cpu_features': [True, \"Auto-detect available CPU features, and configure accordingly\", CUSTOM],\n 'with_shared': [True, \"Enable building of shared ELPA libraries\", CUSTOM],\n 'with_single': [True, \"Enable building of single precision ELPA functions\", CUSTOM],\n 'with_generic_kernel': [True, \"Enable building of ELPA generic kernels\", CUSTOM],\n }\n\n for flag in ELPA_CPU_FEATURE_FLAGS:\n if flag == 'sse4_2':\n conf_opt = ['sse', 'sse-assembly']\n elif flag == 'avx512f':\n conf_opt = ['avx512']\n else:\n conf_opt = [flag]\n\n for opt in conf_opt:\n help_msg = \"Configure with --enable-%s (if None, auto-detect support for %s)\" % (opt, flag.upper())\n extra_vars['use_%s' % flag] = [None, help_msg, CUSTOM]\n\n return ConfigureMake.extra_options(extra_vars)", "def initConfiguration():\n UTIL.SYS.s_configuration.setDefaults([\n [\"SYS_COLOR_LOG\", \"1\"],\n [\"HOST\", \"127.0.0.1\"],\n [\"NCTRS_TM_SERVER_PORT\", \"2502\"],\n [\"NCTRS_TM_DU_VERSION\", \"V0\"],\n [\"SPACECRAFT_ID\", \"758\"]])", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def _augment_pipeline_cfg(self):", "def add_config(self):\n\n config = {\n 'invert_byte': InvertByte,\n 'invert_word': InvertWord,\n 'invert_double_word': InvertDoubleWord,\n 'and_byte': AndByte,\n 'and_word': AndWord,\n 'and_double_word': AndDoubleWord,\n 'or_byte': OrByte,\n 'or_word': OrWord,\n 'or_double_word': OrDoubleWord,\n 'exclusive_or_byte': ExclusiveOrByte,\n 'exclusive_or_word': ExclusiveOrWord,\n 'exclusive_or_double_word': ExclusiveOrDoubleWord\n }\n\n return config", "def load_standard_parameters(self):\n paradic = {'x':'0',\n 'y':'0',\n 'n_oct':'8',\n 'n_spo':'3',\n 'sigma_min':'0.8',\n 'delta_min':'0.5',\n 'sigma_in':'0.5',\n 'C_DoG':'0.015',\n 'C_edge':'10',\n 'n_bins':'36',\n 'lambda_ori':'1.5',\n 't':'0.8',\n 'n_hist':'4',\n 'n_ori':'8',\n 'lambda_descr':'6',\n 'flag_match':'1',\n 'C_match':'0.6'}\n self.cfg['param']['paradic'] = paradic\n self.cfg.save()", "def get_config(self):\n config = {'epsilon':self.eps}\n base_config = super(PowTransform, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)", "def configTVassocTool(name,\n tvSGkey,\n tpcSGkey,\n vcSGkey, ):\n\n from JetRecTools.JetRecToolsConf import TrackVertexAssociationTool \n\n toolProperties = dict(\n TrackParticleContainer = tpcSGkey,\n TrackVertexAssociation = tvSGkey,\n VertexContainer = vcSGkey,\n TrackVertexAssoTool = jtm.jetLooseTVAtool,\n )\n\n # Build the tool :\n return TrackVertexAssociationTool(name, **toolProperties)", "def get_default_config_help(self):\n config = super(SignalfxHandler, self).get_default_config_help()\n\n config.update({\n 'url': 'Where to send metrics',\n 'batch': 'How many to store before sending',\n 'auth_token': 'Org API token to use when sending metrics',\n })\n\n return config", "def configure(self):\r\n pass", "def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue", "def RPC_DigitizationToolCommonCfg(flags, name=\"RpcDigitizationTool\", **kwargs):\n from MuonConfig.MuonCondAlgConfig import RpcCondDbAlgCfg # MT-safe conditions access\n acc = RpcCondDbAlgCfg(flags)\n if flags.Digitization.DoXingByXingPileUp:\n kwargs.setdefault(\"FirstXing\", RPC_FirstXing())\n kwargs.setdefault(\"LastXing\", RPC_LastXing())\n kwargs.setdefault(\"OutputObjectName\", \"RPC_DIGITS\")\n if flags.Digitization.PileUpPremixing:\n kwargs.setdefault(\"OutputSDOName\", flags.Overlay.BkgPrefix + \"RPC_SDO\")\n else:\n kwargs.setdefault(\"OutputSDOName\", \"RPC_SDO\")\n # config\n kwargs.setdefault(\"DeadTime\", 100)\n kwargs.setdefault(\"PatchForRpcTime\", True)\n # kwargs.setdefault(\"PatchForRpcTimeShift\", 9.6875)\n kwargs.setdefault(\"PatchForRpcTimeShift\", 12.5)\n kwargs.setdefault(\"turnON_efficiency\", True)\n kwargs.setdefault(\"turnON_clustersize\", True)\n kwargs.setdefault(\"testbeam_clustersize\", 0)\n kwargs.setdefault(\"ClusterSize1_2uncorr\", 0)\n kwargs.setdefault(\"CutProjectedTracks\", 100)\n kwargs.setdefault(\"RPCInfoFromDb\", True)\n kwargs.setdefault(\"Efficiency_fromCOOL\", True)\n kwargs.setdefault(\"EfficiencyPatchForBMShighEta\", False)\n kwargs.setdefault(\"ClusterSize_fromCOOL\", True)\n kwargs.setdefault(\"DumpFromDbFirst\", False)\n kwargs.setdefault(\"PanelId_OFF_fromlist\", False)\n kwargs.setdefault(\"PanelId_OK_fromlist\", False)\n kwargs.setdefault(\"IgnoreRunDependentConfig\", False)\n kwargs.setdefault(\"PrintCalibrationVector\",False )\n kwargs.setdefault(\"PhiAndEtaEff_A\",[0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938])\n kwargs.setdefault(\"OnlyPhiEff_A\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"OnlyEtaEff_A\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"PhiAndEtaEff_C\",[0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938])\n kwargs.setdefault(\"OnlyPhiEff_C\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"OnlyEtaEff_C\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"FracClusterSize1_A\", [0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664])\n kwargs.setdefault(\"FracClusterSize2_A\", [0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986])\n kwargs.setdefault(\"FracClusterSizeTail_A\",[0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035 ])\n kwargs.setdefault(\"MeanClusterSizeTail_A\",[0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598])\n kwargs.setdefault(\"FracClusterSize1_C\", [0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664])\n kwargs.setdefault(\"FracClusterSize2_C\", [0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986])\n kwargs.setdefault(\"FracClusterSizeTail_C\",[0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035 ])\n kwargs.setdefault(\"MeanClusterSizeTail_C\",[0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598])\n RpcDigitizationTool = CompFactory.RpcDigitizationTool\n acc.setPrivateTools(RpcDigitizationTool(name, **kwargs))\n return acc", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": \"ft.onto.base_ontology.Document\",\n \"model_name\": \"ktrapeznikov/biobert_v1.1_pubmed_squad_v2\",\n \"question\": \"Where do I live\",\n \"max_answer_len\": 15,\n \"cuda_devices\": -1,\n \"handle_impossible_answer\": False,\n }\n )\n return config", "def config_section_data():\n config_data = u\"\"\"[fn_sep]\nsep_base_path=/sepm/api/v1\nsep_auth_path=/sepm/api/v1/identity/authenticate\nsep_host=<SEPM server dns name or ip address>\nsep_port=8446\nsep_username=<username>\nsep_password=<password>\nsep_domain=<SEP domain name>\n# Optional settings for access to SEPM via a proxy.\n#http_proxy=http://proxy:80\n#https_proxy=http://proxy:80\n# Limit result sent to Resilient, add full result as an attachment.\nsep_results_limit=200\n# Period of time (seconds) to wait for all endpoints to return a scan result.\nsep_scan_timeout=1800\n\"\"\"\n return config_data", "def tool_config(request):\n if request.is_secure():\n host = 'https://' + request.get_host()\n else:\n host = 'http://' + request.get_host()\n\n url = host + reverse('edx2canvas:lti_launch')\n\n lti_tool_config = ToolConfig(\n title='Add edX Content',\n launch_url=url,\n secure_launch_url=url,\n )\n account_nav_params = {\n 'enabled': 'true',\n 'text': 'Add edX Content',\n 'visibility': 'admins',\n }\n lti_tool_config.set_ext_param('canvas.instructure.com', 'privacy_level', 'public')\n lti_tool_config.set_ext_param('canvas.instructure.com', 'course_navigation', account_nav_params)\n lti_tool_config.description = 'Import content from edX to Canvas'\n\n return http.HttpResponse(\n lti_tool_config.to_xml(), content_type='text/xml', status=200\n )", "def config(self) -> InstrumentConfig:\n ...", "def config (self):\n import wikicode\n class Config (wikicode.extension):\n def run (self):\n self.send_page (\"Generic DC Setup\")\n wikicode.run_extension (Config)", "def _setup_pipeline_cfg(self):", "def get_dynamic_setup_params():\n\n return {\n # Retrieve the long description from the README\n \"long_description\": read_file(\"README.md\")\n }", "def __set_special_config_values(cfg: __Config, config: dict) -> \"__Config\":\n cfg.file_name_plane_masks = lambda i: str(i) + config['file_name_plane_mask_suf']\n cfg.file_name_planercnn_image = lambda i: str(i) + config['file_name_planercnn_image_suf']\n cfg.dir_results = f\"{cfg.edge_detection_type}\" # will be the output folder, create in data dir\n cfg.image_size = tuple(int(x) for x in config['image_size'].split(\" \"))\n return cfg", "def loadParameters (self, filePath):\r\n # productive #onButton\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n config = ConfigParser.RawConfigParser()\r\n config.read(filePath)\r\n\r\n autoCorrectTip = config.getboolean('BooleanSection', 'autoCorrectTip')\r\n invertedContrast = config.getboolean('BooleanSection', 'invertedContrast')\r\n gradient = config.getboolean('BooleanSection', 'gradient')\r\n filterControlPoints = config.getboolean('BooleanSection', 'filterControlPoints')\r\n drawFiducialPoints = config.getboolean('BooleanSection', 'drawFiducialPoints')\r\n autoStopTip = config.getboolean('BooleanSection', 'autoStopTip')\r\n extendNeedle = config.getboolean('BooleanSection', 'extendNeedle')\r\n maxLength = config.getboolean('BooleanSection', 'maxLength')\r\n gaussianAttenuationButton = config.getboolean('BooleanSection', 'gaussianAttenuationButton')\r\n\r\n realNeedleLength = config.getint('IntegerSection', 'realNeedleLength')\r\n sigmaValue = config.getint('IntegerSection', 'sigmaValue')\r\n gradientPonderation = config.getint('IntegerSection', 'gradientPonderation')\r\n exponent = config.getint('IntegerSection', 'exponent')\r\n try:\r\n radiusMax = config.getint('IntegerSection', 'distanceMax') # try deprecated parameter name (old parameter files)\r\n except:\r\n radiusMax = config.getint('IntegerSection', 'radiusMax')\r\n nbRotatingIterations = config.getint('IntegerSection', 'nbRotatingIterations')\r\n numberOfPointsPerNeedle = config.getint('IntegerSection', 'numberOfPointsPerNeedle')\r\n lenghtNeedleParameter = config.getint('IntegerSection', 'lenghtNeedleParameter')\r\n radiusNeedleParameter = config.getint('IntegerSection', 'radiusNeedleParameter')\r\n algoVersParameter = config.getint('IntegerSection', 'algoVersParameter')\r\n\r\n widget.autoCorrectTip.checked = autoCorrectTip\r\n widget.invertedContrast.checked = invertedContrast\r\n widget.gradient.checked = gradient\r\n widget.filterControlPoints.checked = filterControlPoints\r\n widget.drawFiducialPoints.checked = drawFiducialPoints\r\n widget.autoStopTip.checked = autoStopTip\r\n widget.extendNeedle.checked = extendNeedle\r\n widget.maxLength.checked = maxLength\r\n widget.gaussianAttenuationButton.checked = gaussianAttenuationButton\r\n\r\n widget.realNeedleLength.value = realNeedleLength\r\n widget.sigmaValue.value = sigmaValue\r\n widget.gradientPonderation.value = gradientPonderation\r\n widget.exponent.value = exponent\r\n widget.radiusMax.value = radiusMax\r\n widget.nbRotatingIterations.value = nbRotatingIterations\r\n widget.numberOfPointsPerNeedle.value = numberOfPointsPerNeedle\r\n widget.lenghtNeedleParameter.value = lenghtNeedleParameter\r\n widget.radiusNeedleParameter.value = radiusNeedleParameter\r\n widget.algoVersParameter.value = algoVersParameter\r\n print \"#############\"\r\n print \"algoVers: \", algoVersParameter\r\n print \"Parameters successfully loaded!\"", "def set_config(config_name, host, port, core=''):\n global CONFIGS\n CONFIGS[config_name] = {'host': host, 'port': port, 'core': core}", "def at_config(seq, option, value):\n at(\"CONFIG\", seq, [str(option), str(value)])", "def default_configs(cls):\n config = super().default_configs()\n config.update({\"model\": \"openie\"})\n return config", "def configure(self) -> None:", "def config(\n data_folder=settings.data_folder,\n logs_folder=settings.logs_folder,\n imgs_folder=settings.imgs_folder,\n cache_folder=settings.cache_folder,\n cache_responses=settings.cache_responses,\n log_file=settings.log_file,\n log_console=settings.log_console,\n log_level=settings.log_level,\n log_name=settings.log_name,\n log_filename=settings.log_filename,\n useful_idf_objects=settings.useful_idf_objects,\n default_weight_factor=\"area\",\n ep_version=settings.ep_version,\n debug=settings.debug,\n):\n # set each global variable to the passed-in parameter value\n settings.cache_responses = cache_responses\n settings.cache_folder = Path(cache_folder).expand().makedirs_p()\n settings.data_folder = Path(data_folder).expand().makedirs_p()\n settings.imgs_folder = Path(imgs_folder).expand().makedirs_p()\n settings.logs_folder = Path(logs_folder).expand().makedirs_p()\n settings.log_console = log_console\n settings.log_file = log_file\n settings.log_level = log_level\n settings.log_name = log_name\n settings.log_filename = log_filename\n settings.useful_idf_objects = useful_idf_objects\n settings.zone_weight.set_weigth_attr(default_weight_factor)\n settings.ep_version = ep_version\n settings.debug = debug\n\n # if logging is turned on, log that we are configured\n if settings.log_file or settings.log_console:\n get_logger(name=\"archetypal\")\n log(\"Configured archetypal\")", "def printConf(self):\n print \"\"\n for pname, pvalue in self.neededParams.items():\n print pname, pvalue\n for pname, pvalue in self.optionalParams.items():\n print pname, pvalue", "def config(self):\n return None", "def get_dynamic_setup_params():\n return {\n # Retrieve the long description from the README\n # 'long_description': read_file('README.rst'),\n 'install_requires': substitute_crypto_to_req(\n read_requirements('requirements.txt'),\n ),\n # 'extras_require': read_extras(),\n }", "def configure(_workdir):\n\n global workdir\n workdir = _workdir\n\n from os.path import join\n from ConfigParser import ConfigParser\n config = ConfigParser(dict(here=workdir))\n config.read(join(workdir, 'rnaseqlyze.ini'))\n\n for name, value in config.items(\"rnaseqlyze\"):\n globals()[name] = value\n\n import Bio.Entrez\n Bio.Entrez.email = admin_email", "def add_config(config_name, params, config_file=None, make_default=True):\n\n if config_file is None:\n config_file = pkgrs.resource_filename('latools', 'latools.cfg')\n cf = configparser.ConfigParser()\n cf.read(config_file)\n\n # if config doesn't already exist, create it.\n if config_name not in cf.sections():\n cf.add_section(config_name)\n # iterate through parameter dict and set values\n for k, v in params.items():\n cf.set(config_name, k, v)\n # make the parameter set default, if requested\n if make_default:\n cf.set('DEFAULT', 'default_config', config_name)\n\n cf.write(open(config_file, 'w'))\n\n return", "def requested_config_vals():\n return {'transfer_stats_per_file':'opt'}", "def config(self) -> pulumi.Input['ConfigArgs']:\n return pulumi.get(self, \"config\")", "def config():\n return {\n \"COMPONENT_NAME\": \"testing-deleter\",\n \"DEST_SITE\": \"NERSC\",\n \"DISK_BASE_PATH\": \"/path/to/rucio/rse/root\",\n \"HEARTBEAT_PATCH_RETRIES\": \"3\",\n \"HEARTBEAT_PATCH_TIMEOUT_SECONDS\": \"30\",\n \"HEARTBEAT_SLEEP_DURATION_SECONDS\": \"60\",\n \"INPUT_STATUS\": \"detached\",\n \"LTA_REST_TOKEN\": \"fake-lta-rest-token\",\n \"LTA_REST_URL\": \"http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/\",\n \"OUTPUT_STATUS\": \"source-deleted\",\n \"RUN_ONCE_AND_DIE\": \"False\",\n \"SOURCE_SITE\": \"WIPAC\",\n \"WORK_RETRIES\": \"3\",\n \"WORK_SLEEP_DURATION_SECONDS\": \"60\",\n \"WORK_TIMEOUT_SECONDS\": \"30\",\n }", "def configure_for_pokecrystal(config=config):\n attrs = {\n \"version\": \"crystal\",\n\n \"map_dir\": os.path.join(config.path, 'maps/'),\n \"gfx_dir\": os.path.join(config.path, 'gfx/tilesets/'),\n \"to_gfx_name\": lambda x : '%.2d' % x,\n \"block_dir\": os.path.join(config.path, 'tilesets/'),\n \"block_ext\": '_metatiles.bin',\n\n \"palettes_on\": True,\n \"palmap_dir\": os.path.join(config.path, 'tilesets/'),\n \"palette_dir\": os.path.join(config.path, 'tilesets/'),\n\n \"asm_dir\": os.path.join(config.path, 'maps/'),\n\n \"constants_filename\": os.path.join(config.path, 'constants.asm'),\n\n \"header_dir\": os.path.join(config.path, 'maps/'),\n\n \"time_of_day\": 1,\n }\n return attrs", "def configure(self):\n param_values = [self.params[name] for name in self.param_names]\n self.sendPacket(4, struct.pack('<HBBB', param_values))", "def setupconfig():\n from Manager import Studio\n studio = Studio.Instance\n cfgeff = studio.configEffect_st\n cfgeff.bloomToggle.isOn = False\n cfgeff.vignetteToggle.isOn = False\n cfgeff.sunShaftsToggle.isOn = False\n cfgeff.fogToggle.isOn = False\n cfgeff.depthOfFieldToggle.isOn = False\n #cfgeff.ssaoToggle.isOn = True\n #cfgeff.selfShadowToggle.isOn = True\n \n # Turn off backgrounds\n studio.uiBGChanger.onOffToggle.isOn = False", "def configuration_keys(self):\n return ['dispname', 'decker', 'binning']", "def configuration_keys(self):\n return ['dispname', 'decker', 'binning']", "def configuration_keys(self):\n return ['dispname', 'decker', 'binning']", "def __init__(self):\n self.ext_folder = ckan_config.get('ckanext.needupdate.ext_folder', '/usr/lib/ckan/default/src')\n self.ext_prefix = ckan_config.get('ckanext.needupdate.ext_folder', 'ckanext-')\n self.ext_sufix = ckan_config.get('ckanext.needupdate.ext_folder', '')", "def getDefaultParams():\n defpar = [\n # coordinate system\n ['crd_sys', \"'sph'\", 'Coordinate system'],\n ['nx', '[60, 40, 30]', 'Number of grid points in the first dimension'],\n ['xbound', '[0.1*au, 30.*au, 110.*au, 250.*au]', 'Number of radial grid points'],\n ['ny', '[10,30, 30, 10]',\n 'Number of grid points in the second dimension'],\n ['ybound', '[0.1, pi/6., pi/2., 5.*pi/6., 3.04]',\n 'Number of radial grid points'],\n ['nz', '[361]', 'Number of grid points in the third dimension'],\n ['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],\n # star related\n ['tstar', '[3900.0]', 'Temperature of star'],\n ['mstar', '[1.0*ms]', 'Mass of the star(s)'],\n ['rstar', '[2.5*rs]', 'Radius of star'],\n # gas density \n ['Rin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['Rin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['Rout', '[30*au, 120*au]', 'outer bounding edge'],\n ['Rout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['sigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['sig0', '[1e2, 1e1]', 'surface density at Rin in g/cm^2'], \n ['ring_r', '[50*au]', 'location of gaussian ring'], \n ['ring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['ring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['ring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'], \n ['cutgdens', '1e-30', 'cut for density'], \n ['Rt', '100*au', 'radius for scale height'], \n ['Ht', '10*au', 'scale height'], \n ['qheight', '1.25', 'height power-law'], \n # gas species\n ['gasspec_mol_name', \"['12co']\", 'name of molecule'],\n ['gasspec_mol_abun', '[5e-5]', 'mass abundance '],\n ['gasspec_mol_dbase_type', \"['leiden']\", ''],\n ['gasspec_mol_freezeout_dfact', '[1e-3]',\n 'Factor by which the molecular abundance should be decreased in the freeze-out zone'],\n ['mol_freeze_Ht', '[24*au]', 'Height at Rt, with index=qheight, for freeze out to happen'],\n ['mol_freeze_del_hfrac', '0.2', 'Gaussian taper for freeze-out. del H = h * hfrac'],\n ['mol_snowR', '[20*au]', 'Radius when freeze out begins to happen'],\n # dust density\n # flat power-law parts\n ['dRin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['dRin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['dRout', '[30*au, 120*au]', 'outer bounding edge'],\n ['dRout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['dsigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['dsig0', '[1e2, 1e1]', 'surface density at Rin'],\n # Lynden-Bell parts\n ['dLB_Rin', '[0.1*au]', 'inner bounding radius'], \n ['dLB_Rsig', '[30*au]', 'charcteristic radius'],\n ['dLB_sigp', '[-1.0]', 'power-law exponent. Careful, the sign is different from the usual function by a negative sign for consistency with flat power-law'], \n ['dLB_sig0', '[1e2]', 'surface density'], \n # ring parts\n ['dring_r', '[50*au]', 'location of gaussian ring'],\n ['dring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['dring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['dring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'],\n ['cutddens', '1e-30', 'cut for dust density'],\n ['dRt', '[100*au]', 'radius for scale height for each grain size'], \n ['dHt', '[10*au]', 'scale height for each grain size'], \n ['dqheight', '[1.25]', 'scale height power-law for dust'], \n # temperature\n ['T0mid', '50', 'mid plane temperature at Rt'],\n ['T0atm', '50', 'atmosphere temperature at Rt'],\n ['zqratio', '3', 'factor of Ht of where temperature transition occurs'],\n ['qmid', '-0.5', 'midplane temperature exponent'],\n ['qatm', '-0.5', 'atmosphere temperature exponent'],\n ['hdel', '2', 'temperature transition exponent '],\n ['cuttemp', '10', 'temperature cut'], \n # alignment\n ['altype', \"'toroidal'\", 'alignment type']\n ]\n\n return defpar", "def configure(self):\n pass", "def configure(self):\n pass", "def get_test_config(cls, cluster, role, env, job, filler=''):\n return cls.CONFIG_BASE % {'job': job, 'role': role, 'env': env, 'cluster': cluster,\n 'inner': filler}", "def __writeConfig(self):\n page = None\n\n #TODO: get values of configurations here\n particles = \"#f\" if not base.particleMgrEnabled else \"#t\"\n volume = str(round(base.musicManager.getVolume(), 2))\n mute = \"#f\" if base.AppHasAudioFocus else \"#t\"\n #TODO: add any configuration variable name that you have added\n customConfigVariables = [\n \"\", \"particles-enabled\", \"audio-mute\", \"audio-volume\"]\n if os.path.exists(prcFile):\n # open the config file and change values according to current\n # application settings\n page = loadPrcFile(Filename.fromOsSpecific(prcFile))\n removeDecls = []\n for dec in range(page.getNumDeclarations()):\n # Check if our variables are given.\n # NOTE: This check has to be done to not loose our base or other\n # manual config changes by the user\n if page.getVariableName(dec) in customConfigVariables:\n decl = page.modifyDeclaration(dec)\n removeDecls.append(decl)\n for dec in removeDecls:\n page.deleteDeclaration(dec)\n # NOTE: particles-enabled and audio-mute are custom variables and\n # have to be loaded by hand at startup\n # Particles\n page.makeDeclaration(\"particles-enabled\", particles)\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", mute)\n else:\n # Create a config file and set default values\n cpMgr = ConfigPageManager.getGlobalPtr()\n page = cpMgr.makeExplicitPage(\"%s Pandaconfig\"%appName)\n # set OpenGL to be the default\n page.makeDeclaration(\"load-display\", \"pandagl\")\n # get the displays width and height\n w = self.pipe.getDisplayWidth()\n h = self.pipe.getDisplayHeight()\n # set the window size in the config file\n page.makeDeclaration(\"win-size\", \"%d %d\"%(w, h))\n # set the default to fullscreen in the config file\n page.makeDeclaration(\"fullscreen\", \"1\")\n # particles\n page.makeDeclaration(\"particles-enabled\", \"#t\")\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", \"#f\")\n # create a stream to the specified config file\n configfile = OFileStream(prcFile)\n # and now write it out\n page.write(configfile)\n # close the stream\n configfile.close()", "def get_config_template(self) -> cconfig.Config:", "def _opt_config(self):\n return self._opt_method.config", "def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)" ]
[ "0.68244946", "0.6493101", "0.6493101", "0.64695317", "0.63397986", "0.62136227", "0.6132952", "0.60887027", "0.6073328", "0.6053588", "0.60361594", "0.6020469", "0.60147214", "0.6013279", "0.5986166", "0.5982526", "0.5982526", "0.59803295", "0.5972104", "0.59715974", "0.5953171", "0.5938953", "0.59374064", "0.59374064", "0.59374064", "0.59374064", "0.59339356", "0.592969", "0.59136856", "0.59074736", "0.58948463", "0.58880717", "0.58791053", "0.5868777", "0.58556473", "0.58495635", "0.5836435", "0.5832155", "0.5825922", "0.58197224", "0.5817282", "0.58097696", "0.5803807", "0.5802965", "0.57988256", "0.5792995", "0.57905513", "0.57905513", "0.57900906", "0.5769966", "0.576617", "0.57422805", "0.57422805", "0.57421273", "0.57099426", "0.57083505", "0.5704571", "0.57036424", "0.568994", "0.5687748", "0.56821626", "0.56813145", "0.567492", "0.56703824", "0.5652204", "0.565173", "0.5649904", "0.56475514", "0.56320983", "0.5624056", "0.56138694", "0.5613132", "0.56071264", "0.5600286", "0.55979", "0.5596932", "0.559573", "0.55839115", "0.55803716", "0.5579603", "0.55793434", "0.5577762", "0.55717623", "0.55703396", "0.5565373", "0.55653524", "0.5562333", "0.5561838", "0.55600786", "0.55600786", "0.55600786", "0.5556708", "0.5555909", "0.5551925", "0.5551925", "0.55463666", "0.5542689", "0.5534747", "0.5530358", "0.5521898" ]
0.5761126
51
Prepare build environment (skip loaded of dependencies).
def prepare_step(self, *args, **kwargs): kwargs['load_tc_deps_modules'] = False super(CrayPEToolchain, self).prepare_step(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_environment():\n global _ENV_SETUP_DONE\n if _ENV_SETUP_DONE:\n return\n _ENV_SETUP_DONE = True\n\n _configure_libraries()\n\n custom_module_path = os.environ.get(\"DETECTRON2_ENV_MODULE\")\n\n if custom_module_path:\n setup_custom_environment(custom_module_path)\n else:\n # The default setup is a no-op\n pass", "def setup_environment(self):\n self.run_command(\"cd {}\".format(quote(str(self.builddir))))\n env_vars = self._build_env_variables_string()\n if env_vars:\n env_vars = quote(env_vars)\n command = \"{} DISTRO={} MACHINE={} . {} build-{}\".format(\n env_vars,\n quote(self.distro),\n quote(self.machine),\n quote(self.init_env_file),\n quote(self.distro),\n )\n self.run_command(command)", "def setup(c):\n c.run('nox --envdir .')", "def prepare(self):\n # This is a no-op; the native subprocess environment is ready-to-use.\n pass", "def set_up(dev=False):\n _install_dependencies()", "def prepare():\n sh('pip install pylint pyflakes behave nose clonedigger pep8 sphinx')\n sh('pip install watchdog coverage ipython sphinx_rtd_theme')\n develop()", "def __setup(self):\n\n build_environment = []\n\n # The download URL has the format contains vMAJOR.MINOR in the\n # path and the tarball contains MAJOR.MINOR.REVISION, so pull\n # apart the full version to get the MAJOR and MINOR components.\n match = re.match(r'(?P<major>\\d+)\\.(?P<minor>\\d+)', self.version)\n major_minor = 'v{0}.{1}'.format(match.groupdict()['major'],\n match.groupdict()['minor'])\n tarball = 'openmpi-{}.tar.bz2'.format(self.version)\n url = '{0}/{1}/downloads/{2}'.format(self.baseurl, major_minor,\n tarball)\n\n # CUDA\n if self.cuda:\n if self.__toolchain.CUDA_HOME:\n self.configure_opts.append(\n '--with-cuda={}'.format(self.__toolchain.CUDA_HOME))\n else:\n self.configure_opts.append('--with-cuda')\n else:\n self.configure_opts.append('--without-cuda')\n\n # InfiniBand\n if self.infiniband:\n self.configure_opts.append('--with-verbs')\n else:\n self.configure_opts.append('--without-verbs')\n\n # UCX\n if self.__ucx:\n if isinstance(self.__ucx, string_types):\n # Use specified path\n self.configure_opts.append('--with-ucx={}'.format(self.__ucx))\n else:\n self.configure_opts.append('--with-ucx')\n\n # If UCX was built with CUDA support, it is linked with\n # libcuda.so.1, which is not available during the\n # build stage. Assume that if OpenMPI is built with\n # CUDA support, then UCX was as well...\n if self.cuda:\n cuda_home = \"/usr/local/cuda\"\n if self.__toolchain.CUDA_HOME:\n cuda_home = self.__toolchain.CUDA_HOME\n self.__commands.append('ln -s {0} {1}'.format(\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so'),\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so.1')))\n if not self.__toolchain.LD_LIBRARY_PATH:\n build_environment.append('LD_LIBRARY_PATH=\"{}:$LD_LIBRARY_PATH\"'.format(os.path.join(cuda_home, 'lib64', 'stubs')))\n\n if self.directory:\n # Use source from local build context\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd, self.directory),\n toolchain=self.__toolchain))\n else:\n # Download source from web\n self.__commands.append(self.download_step(url=url,\n directory=self.__wd))\n self.__commands.append(self.untar_step(\n tarball=os.path.join(self.__wd, tarball), directory=self.__wd))\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version)),\n environment=build_environment,\n toolchain=self.__toolchain))\n\n self.__commands.append(self.build_step())\n\n if self.__check:\n self.__commands.append(self.check_step())\n\n self.__commands.append(self.install_step())\n\n # Set library path\n libpath = os.path.join(self.prefix, 'lib')\n if self.ldconfig:\n self.__commands.append(self.ldcache_step(directory=libpath))\n else:\n self.__environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)\n\n if self.directory:\n # Using source from local build context, cleanup directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, self.directory)]))\n else:\n # Using downloaded source, cleanup tarball and directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, tarball),\n os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version))]))", "def process_develop_setup():\n if 'develop' in sys.argv and os.path.exists('build'):\n # Remove `build` directory created by a regular installation\n shutil.rmtree('build')\n elif 'develop' not in sys.argv and os.path.exists('gfootball_engine'):\n # If `pip install .` is called after development mode,\n # remove the 'fonts' directory copied by a `develop` setup\n copied_fonts = 'third_party/gfootball_engine/fonts'\n if os.path.exists(copied_fonts):\n shutil.rmtree(copied_fonts)\n # Remove .so files (.pyd on Windows)\n for empty_lib in glob.glob(\"brainball_cpp_engine*\"):\n os.remove(empty_lib)\n # Finally, remove symlink to the gfootball_engine directory\n if not os.path.exists('gfootball_engine'):\n return\n if os.path.islink('gfootball_engine'):\n if platform.system() == 'Windows':\n os.remove('gfootball_engine')\n else:\n os.unlink('gfootball_engine')\n else:\n shutil.rmtree('gfootball_engine')", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def prepare():\n with cd(env.code_dir):\n run('svn up api')\n run('svn up pyramid-oauth2')", "def setup_develop():\n workon = '.'\n if VENVWRAPPER:\n workon=os.getenv(\"WORKON_HOME\")\n cmd = '{workon}/{env}/bin/python setup.py develop'.format(\n envs=ENVS, env=VENV, workon=workon)\n print(cmd)\n subprocess.call(cmd.split())", "def bootstrap():\n\n require('environment', provided_by=env.environments)\n sudo('mkdir -p %(root)s' % env, user=env.deploy_user)\n clone_repo()\n setup_dirs()\n link_config_files()\n update_services()\n create_virtualenv()\n update_requirements()\n create_local_settings()", "def _set_environment_vars(self):\n os.environ[\"PATH\"] = os.path.join(self.source_folder, \"depot_tools\") + os.pathsep + os.environ[\"PATH\"]\n os.environ[\"DEPOT_TOOLS_PATH\"] = os.path.join(self.source_folder, \"depot_tools\")\n if tools.os_info.is_windows:\n os.environ[\"DEPOT_TOOLS_WIN_TOOLCHAIN\"] = \"0\"\n os.environ[\"GYP_MSVS_VERSION\"] = \"2017\" if str(self.settings.compiler.version) == \"15\" else \"2019\"", "def bootstrap():\n _require_environment()\n\n adduser()\n install_python()\n install_git()\n install_apache()\n install_mysql()\n setup_project()", "def install_deps_temp(self):\n if self.distribution.install_requires:\n self.distribution.fetch_build_eggs(\n self.distribution.install_requires)\n if self.distribution.tests_require:\n self.distribution.fetch_build_eggs(self.distribution.tests_require)", "def prepare(skip_static=False):\n\n local('npm install')\n local('grunt build')\n\n with warn_only():\n local('git add staticfiles')\n local('git add {{ project_name }}/templates')\n local('git commit -m \"PRODUCTION ONLY: Build static files.\"')\n\n files_to_remove = (\n '.bowerrc',\n '.editorcinfig',\n '.gitignore',\n '.jshintrc',\n 'bower.json',\n 'dev-only-package.json',\n 'error.log',\n 'fabfile.py',\n 'Gruntfile.js',\n 'migrate.sh',\n 'README.md',\n 'serve.sh',\n 'flush_cache.py',\n )\n\n with warn_only():\n for file_ in files_to_remove:\n local('git rm {}'.format(file_))\n\n # store it\n local('git commit -m \"PRODUCTION ONLY: Removing files.\"')\n\n if skip_static:\n local('touch .skipDjango')\n local('git add .skipDjango')\n local('git commit -m \"PRODUCTION ONLY: Skip static files\"')", "def setup():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n setup_directories()\n setup_virtualenv()\n clone_repo()\n checkout_latest()\n install_requirements()\n install_apache_conf()\n deploy_to_s3()", "def start_build(args):\n\n path = os.path.join(SCRATCH_DIR, args.project)\n \n # Set up virtual environment\n print(\"Setting up virtual python environment in %s\" % path)\n venv.create(path, clear=True, symlinks=True, with_pip=False)\n\n # Pull in repository data\n sourcepath = os.path.join(path, 'source')\n print(\"Cloning from git repository %s (branch: %s)\" % (args.source, args.sourcebranch))\n subprocess.run((GIT, 'clone', '--branch', args.sourcebranch, '--depth=1', '--no-single-branch', args.source, sourcepath),\n check=True)\n\n # Activate venv and install pips if needed. For dev/test, we will\n # assume that all requirements are available at the system level,\n # rather than needing to install them into the venv.\n ### note: this makes it difficult to test requirements.txt, but it\n ### will do for now. Debugging requirements.txt failures on the\n ### production buildbot is not difficult to correct.\n if IS_PRODUCTION and os.path.exists(os.path.join(sourcepath, 'requirements.txt')):\n print(\"Installing pips\")\n subprocess.run(('/bin/bash', '-c',\n 'source bin/activate; pip3 install -r source/requirements.txt'),\n cwd=path, check=True)\n else:\n print(\"On dev/test requirements.txt is not processed, skipping pip\")\n\n # Where are our tools?\n if IS_PRODUCTION:\n tool_dir = PELICANFILES\n else:\n tool_dir = THIS_DIR\n print(\"TOOLS:\", tool_dir)\n\n pelconf_yaml = os.path.join(sourcepath, AUTO_SETTINGS_YAML)\n if os.path.exists(pelconf_yaml):\n settings_path = os.path.join(path, AUTO_SETTINGS)\n if IS_PRODUCTION:\n builtin_plugins = PLUGINS\n else:\n builtin_plugins = os.path.join(tool_dir, os.pardir, 'plugins')\n generate_settings(pelconf_yaml, settings_path, [ builtin_plugins ], sourcepath)\n else:\n # The default name, but we'll pass it explicitly.\n settings_path = os.path.join(sourcepath, 'pelicanconf.py')\n\n # Set currently supported plugins\n ### this needs to be removed, as it is too indeterminate.\n with open(settings_path, 'a') as f:\n f.write(\"\"\"\ntry:\n PLUGINS += ['toc']\nexcept:\n PLUGINS = ['toc', 'gfm']\n\"\"\")\n\n # Call pelican\n buildpath = os.path.join(path, 'build/output')\n os.makedirs(buildpath, exist_ok = True)\n buildcmd = ('/bin/bash', '-c',\n 'source bin/activate; cd source && '\n ### note: adding --debug can be handy\n f'(pelican content --settings {settings_path} -o {buildpath})',\n )\n print(\"Building web site with:\", buildcmd)\n env = os.environ.copy()\n env['LIBCMARKDIR'] = LIBCMARKDIR\n subprocess.run(buildcmd, cwd=path, check=True, env=env)\n\n count = len(glob.glob(f'{buildpath}/**/*.html', recursive=True))\n print(f\"{count} html files.\")\n if args.count > 0 and args.count > count:\n print(\"Not enough html pages in the Web Site. Minimum %s > %s found in the Web Site.\" % (args.count, count))\n sys.exit(4)\n\n # Done for now\n print(\"Web site successfully generated!\")\n\n # It is much easier to do all the below, if we chdir()\n os.chdir(sourcepath)\n\n # Copy to result branch\n print(\"Copying web site to branch:\", args.outputbranch)\n\n try:\n subprocess.run((GIT, 'rev-parse', '--verify', \"origin/%s\" % args.outputbranch),\n check=True)\n print(\"- Doing fresh checkout of branch %s\" % args.outputbranch)\n subprocess.run((GIT, 'checkout', args.outputbranch, '-f'), check=True)\n subprocess.run((GIT, 'pull'), check=True)\n except:\n print(\"- Branch %s does not exist (yet), creating it...\" % args.outputbranch)\n # If .asf.yaml exists, which it should, make a copy of it in memory for later\n asfyml = os.path.join(sourcepath, '.asf.yaml')\n myyaml = None\n if os.path.exists(asfyml):\n myyaml = open(asfyml).read()\n subprocess.run((GIT, 'checkout', '--orphan', args.outputbranch), check=True)\n subprocess.run((GIT, 'rm', '-rf', '.'), check=True)\n # Add .asf.yaml back in if we found it.\n if myyaml:\n open(asfyml, \"w\").write(myyaml)\n subprocess.run((GIT, 'add', '.asf.yaml'), check=True)\n\n print(\"- Adding new content to branch\")\n # RM output dir if it already exists\n outputdir = os.path.join(sourcepath, 'output')\n if os.path.isdir(outputdir):\n print(\"Removing existing output dir %s\" % outputdir)\n shutil.rmtree(outputdir)\n shutil.move(buildpath, outputdir)\n subprocess.run((GIT, 'add', 'output/'), check=True)\n\n # Check if there are any changes.\n cp = subprocess.run((GIT, 'diff', '--cached', '--quiet'))\n if cp.returncode == 0:\n # There were no differences reported.\n print('Nothing new to commit. Ignoring this build.')\n else:\n print(\"- Committing to %s\" % args.source)\n subprocess.run((GIT, 'commit', '-m', 'Automatic Site Publish by Buildbot'), check=True)\n\n # If we're not in production, then avoid pushing changes.\n if IS_PRODUCTION:\n print('- Pushing changes, for publishing')\n subprocess.run((GIT, 'push', args.source, args.outputbranch), check=True)\n\n print('Success. Done.')\n # for dev/test provide viewing instructions\n if not IS_PRODUCTION:\n if args.listen:\n try:\n subprocess.run(('pelican','-l'), check=True)\n except KeyboardInterrupt:\n pass\n else:\n print(f'To test output:\\ncd {sourcepath}; pelican -l')", "def SetupEnvironment(self):\n pass", "def bootstrap():\n sub_install_packages()\n sub_install_virtualenv()\n sub_create_virtualenv()\n sub_install_python_requirements()", "def build_virtualenv():\n\n puts(yellow(\"Install dependencies from requirements.txt\"))\n with cd(env.source_dir):\n with prefix('source %s' % in_rwd('bin/activate')):\n sudo('pip install -r %s' % env.requirements_file,\n user=env.app_user)\n sudo('python setup.py develop', user=env.app_user)", "def setup_package(pkg, dirty, context=\"build\"):\n if context not in [\"build\", \"test\"]:\n raise ValueError(\"'context' must be one of ['build', 'test'] - got: {0}\".format(context))\n\n set_module_variables_for_package(pkg)\n\n # Keep track of env changes from packages separately, since we want to\n # issue warnings when packages make \"suspicious\" modifications.\n env_base = EnvironmentModifications() if dirty else clean_environment()\n env_mods = EnvironmentModifications()\n\n # setup compilers for build contexts\n need_compiler = context == \"build\" or (context == \"test\" and pkg.test_requires_compiler)\n if need_compiler:\n set_compiler_environment_variables(pkg, env_mods)\n set_wrapper_variables(pkg, env_mods)\n\n env_mods.extend(modifications_from_dependencies(pkg.spec, context, custom_mods_only=False))\n\n # architecture specific setup\n platform = spack.platforms.by_name(pkg.spec.architecture.platform)\n target = platform.target(pkg.spec.architecture.target)\n platform.setup_platform_environment(pkg, env_mods)\n\n if context == \"build\":\n pkg.setup_build_environment(env_mods)\n\n if (not dirty) and (not env_mods.is_unset(\"CPATH\")):\n tty.debug(\n \"A dependency has updated CPATH, this may lead pkg-\"\n \"config to assume that the package is part of the system\"\n \" includes and omit it when invoked with '--cflags'.\"\n )\n elif context == \"test\":\n env_mods.extend(\n inspect_path(\n pkg.spec.prefix,\n spack.user_environment.prefix_inspections(pkg.spec.platform),\n exclude=is_system_path,\n )\n )\n pkg.setup_run_environment(env_mods)\n env_mods.prepend_path(\"PATH\", \".\")\n\n # First apply the clean environment changes\n env_base.apply_modifications()\n\n # Load modules on an already clean environment, just before applying Spack's\n # own environment modifications. This ensures Spack controls CC/CXX/... variables.\n if need_compiler:\n for mod in pkg.compiler.modules:\n load_module(mod)\n\n # kludge to handle cray mpich and libsci being automatically loaded by\n # PrgEnv modules on cray platform. Module unload does no damage when\n # unnecessary\n on_cray, _ = _on_cray()\n if on_cray and not dirty:\n for mod in [\"cray-mpich\", \"cray-libsci\"]:\n module(\"unload\", mod)\n\n if target.module_name:\n load_module(target.module_name)\n\n load_external_modules(pkg)\n\n implicit_rpaths = pkg.compiler.implicit_rpaths()\n if implicit_rpaths:\n env_mods.set(\"SPACK_COMPILER_IMPLICIT_RPATHS\", \":\".join(implicit_rpaths))\n\n # Make sure nothing's strange about the Spack environment.\n validate(env_mods, tty.warn)\n env_mods.apply_modifications()\n\n # Return all env modifications we controlled (excluding module related ones)\n env_base.extend(env_mods)\n return env_base", "def setup():\r\n global venvs\r\n\r\n try:\r\n os.mkdir(basedir)\r\n except OSError, e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n os.chdir(basedir)\r\n\r\n # Delete virtualenvs and recreate\r\n for venv in glob('venv-*'):\r\n shutil.rmtree(venv)\r\n for py in available_python_versions():\r\n check_call(['virtualenv', '-p', py,\r\n '--system-site-packages', 'venv-%s' % py])\r\n venvs.append((py, 'venv-%s' % py))\r\n\r\n # Check out and update the repository\r\n if not os.path.exists('Theano'):\r\n try:\r\n check_call(['git', 'clone', ipy_repository])\r\n except CalledProcessError:\r\n check_call(['git', 'clone', ipy_http_repository])\r\n os.chdir(repodir)\r\n check_call(['git', 'checkout', 'master'])\r\n try:\r\n check_call(['git', 'pull', ipy_repository, 'master'])\r\n except CalledProcessError:\r\n check_call(['git', 'pull', ipy_http_repository, 'master'])\r\n os.chdir(basedir)", "def build(ctx):\n ctx.run(\"vsce package\", replace_env=False)", "def prepare_environment(params: Params):\n seed = params.pop_int(\"random_seed\", 13370)\n numpy_seed = params.pop_int(\"numpy_seed\", 1337)\n torch_seed = params.pop_int(\"pytorch_seed\", 133)\n\n if seed is not None:\n random.seed(seed)\n if numpy_seed is not None:\n numpy.random.seed(numpy_seed)\n if torch_seed is not None:\n torch.manual_seed(torch_seed)\n # Seed all GPUs with the same seed if available.\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(torch_seed)\n\n log_pytorch_version_info()", "def prepare_environment(base_path):\n if os.path.exists(base_path):\n shutil.rmtree(base_path)\n os.makedirs(base_path)", "def setup_build_tests(self):\n # Now copy the relative files\n self.cache_extra_test_sources(self.build_relpath)\n\n # Ensure the path exists since relying on a relative path at the\n # same level as the normal stage source path.\n mkdirp(self.install_test_root)", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def setup():\n require('hosts', 'project_path', provided_by=envs.ENVS)\n\n if not exists(env.project_path):\n abort(red('Project path ({project_path}) does not exist. '\n 'Create it on the server before continuing.'.format(**env)))\n\n with cd(env.project_path):\n run('mkdir -p api renderer conf markup_renderer')\n run('mkdir -p api/static api/uploads')\n\n make_release_folders('api')\n make_release_folders('renderer')", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/collective/demo.plone.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n if env.latest:\n if env.python3:\n sudo('ln -s local_demo_nightly_py3.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_demo_nightly_py2.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n else:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/starzel/buildout/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def perform():\n others = not flag_do_only_gcc_build\n locate_gcc_subdir()\n setup_cross()\n if others:\n setup_kernel_headers()\n setup_binutils()\n setup_prereqs()\n setup_sysroot()\n setup_gcc()\n if others:\n setup_kernel_headers()\n setup_glibc()", "def setup_dev():\n setup_general()", "def build_essential(self):\n self.install_package(\"build-essential\")", "def setup(self):\n\n if os.name == 'nt':\n windows_env_start.print_banner(bootstrap=True, no_shell_file=False)\n else:\n enable_colors()\n\n steps = [\n ('CIPD package manager', self.cipd),\n ('Python environment', self.virtualenv),\n ('Host tools', self.host_tools),\n ]\n\n if self._is_windows:\n steps.append((\"Windows scripts\", self.win_scripts))\n\n self._log(\n Color.bold('Downloading and installing packages into local '\n 'source directory:\\n'))\n\n max_name_len = max(len(name) for name, _ in steps)\n\n self._env.comment('''\nThis file is automatically generated. DO NOT EDIT!\nFor details, see $PW_ROOT/pw_env_setup/py/pw_env_setup/env_setup.py and\n$PW_ROOT/pw_env_setup/py/pw_env_setup/environment.py.\n'''.strip())\n\n if not self._is_windows:\n self._env.comment('''\nFor help debugging errors in this script, uncomment the next line.\nset -x\nThen use `set +x` to go back to normal.\n'''.strip())\n\n self._env.echo(\n Color.bold(\n 'Activating environment (setting environment variables):'))\n self._env.echo('')\n\n for name, step in steps:\n self._log(' Setting up {name:.<{width}}...'.format(\n name=name, width=max_name_len),\n end='',\n flush=True)\n self._env.echo(\n ' Setting environment variables for {name:.<{width}}...'.\n format(name=name, width=max_name_len),\n newline=False,\n )\n\n start = time.time()\n spin = spinner.Spinner()\n with spin():\n result = step(spin)\n stop = time.time()\n\n self._log(result.status_str(stop - start))\n\n self._env.echo(result.status_str())\n for message in result.messages():\n sys.stderr.write('{}\\n'.format(message))\n self._env.echo(message)\n\n if not result.ok():\n return -1\n\n self._log('')\n self._env.echo('')\n\n self._env.finalize()\n\n self._env.echo(Color.bold('Checking the environment:'))\n self._env.echo()\n\n self._env.doctor()\n self._env.echo()\n\n self._env.echo(\n Color.bold('Environment looks good, you are ready to go!'))\n self._env.echo()\n\n with open(self._shell_file, 'w') as outs:\n self._env.write(outs)\n\n deactivate = os.path.join(\n self._install_dir,\n 'deactivate{}'.format(os.path.splitext(self._shell_file)[1]))\n with open(deactivate, 'w') as outs:\n self._env.write_deactivate(outs)\n\n config = {\n # Skipping sysname and nodename in os.uname(). nodename could change\n # based on the current network. sysname won't change, but is\n # redundant because it's contained in release or version, and\n # skipping it here simplifies logic.\n 'uname': ' '.join(getattr(os, 'uname', lambda: ())()[2:]),\n 'os': os.name,\n }\n\n with open(os.path.join(self._install_dir, 'config.json'), 'w') as outs:\n outs.write(\n json.dumps(config, indent=4, separators=(',', ': ')) + '\\n')\n\n if self._json_file is not None:\n with open(self._json_file, 'w') as outs:\n self._env.json(outs)\n\n return 0", "def setup():\n global venvs\n \n try:\n os.mkdir(basedir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n os.chdir(basedir)\n \n # Delete virtualenvs and recreate\n for venv in glob('venv-*'):\n shutil.rmtree(venv)\n for py in available_python_versions():\n check_call(['virtualenv', '-p', py, '--system-site-packages', 'venv-%s' % py])\n venvs.append((py, 'venv-%s' % py))\n \n # Check out and update the repository\n if not os.path.exists('ipython'):\n try :\n check_call(['git', 'clone', ipy_repository])\n except CalledProcessError :\n check_call(['git', 'clone', ipy_http_repository])\n os.chdir(repodir)\n check_call(['git', 'checkout', 'master'])\n try :\n check_call(['git', 'pull', ipy_repository, 'master'])\n except CalledProcessError :\n check_call(['git', 'pull', ipy_http_repository, 'master'])\n os.chdir(basedir)", "def initFromEnv(self):\n #self.command = 'scram' # SB I think this line is not needed\n self[\"SCRAM_ARCH\"] = None\n\n if 'SCRAM_ARCH' in os.environ:\n self[\"SCRAM_ARCH\"] = os.environ[\"SCRAM_ARCH\"]\n else:\n stdout, _, _ = execute_command(command='scram arch')\n self[\"SCRAM_ARCH\"] = stdout\n\n try:\n self[\"CMSSW_BASE\"] = os.environ[\"CMSSW_BASE\"]\n self[\"CMSSW_VERSION\"] = os.environ[\"CMSSW_VERSION\"]\n# Commenting these two out. I don't think they are really needed\n# self.cmsswReleaseBase = os.environ[\"CMSSW_RELEASE_BASE\"]\n# self.localRT = os.environ[\"LOCALRT\"]\n except KeyError as ke:\n self[\"CMSSW_BASE\"] = None\n self[\"CMSSW_VERSION\"] = None\n# self.cmsswReleaseBase = None\n# self.localRT = None\n msg = \"Please make sure you have setup the CMS enviroment (cmsenv). Cannot find %s in your env\" % str(ke)\n msg += \"\\nPlease refer to https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial#CMS_environment for how to setup the CMS enviroment.\"\n raise EnvironmentException(msg)", "def prepare_environment(base_path):\n shutil.rmtree(base_path, ignore_errors=True)\n if not os.path.isdir(base_path):\n os.makedirs(base_path)", "def setup_tf_environment() -> None:\n _setup_cpu_environment()\n _setup_gpu_environment()", "def pre_build(self):\n pass", "def build():\n local('wintersmith build')", "def setUp(self):\n coloredlogs.install(level=logging.DEBUG)\n # Create a temporary working directory.\n self.working_directory = tempfile.mkdtemp()\n # Create a temporary build directory.\n self.build_directory = os.path.join(self.working_directory, 'build')\n # Create a temporary virtual environment.\n self.virtual_environment = os.path.join(self.working_directory, 'environment')\n python = 'python%i.%i' % (sys.version_info[0], sys.version_info[1])\n assert os.system('virtualenv --python=%s %s' % (pipes.quote(python), pipes.quote(self.virtual_environment))) == 0\n # Make sure pip-accel uses the pip in the temporary virtual environment.\n os.environ['PATH'] = '%s:%s' % (os.path.join(self.virtual_environment, 'bin'), os.environ['PATH'])\n os.environ['VIRTUAL_ENV'] = self.virtual_environment\n # Make pip and pip-accel use the temporary working directory.\n os.environ['PIP_DOWNLOAD_CACHE'] = os.path.join(self.working_directory, 'download-cache')\n os.environ['PIP_ACCEL_CACHE'] = self.working_directory\n # Initialize the required subdirectories.\n self.pip_accel = __import__('pip_accel')\n self.pip_accel.initialize_directories()", "def init():\n run('mkdir -p %(releases_dir)s' % env)\n execute(cleanup)\n execute(create_directories)", "def setup_environment(self, spack_env, run_env):\n run_env.prepend_path('PICARD',\n join_path(self.prefix, 'bin', 'picard.jar'))", "def _prepare_env_file(settings):\n\n\tenv_extractor = BinpkgEnvExtractor(background=False,\n\t\tscheduler=EventLoop(main=False), settings=settings)\n\n\tif env_extractor.dest_env_exists():\n\t\t# There are lots of possible states when doebuild()\n\t\t# calls this function, and we want to avoid\n\t\t# clobbering an existing environment file.\n\t\treturn os.EX_OK\n\n\tif not env_extractor.saved_env_exists():\n\t\t# If the environment.bz2 doesn't exist, then ebuild.sh will\n\t\t# source the ebuild as a fallback.\n\t\treturn os.EX_OK\n\n\tenv_extractor.start()\n\tenv_extractor.wait()\n\treturn env_extractor.returncode", "def _prepare_bootstrap(self):\r\n wrote_setuptools = False\r\n setuptools = DistributionHelper.distribution_from_path(\r\n self._interpreter.get_location('setuptools'),\r\n name='setuptools')\r\n\r\n if setuptools is None:\r\n raise RuntimeError('Failed to find setuptools while building pex!')\r\n\r\n for fn, content_stream in DistributionHelper.walk_data(setuptools):\r\n if fn == 'pkg_resources.py' or fn.startswith('_markerlib'):\r\n self._chroot.write(content_stream.read(), os.path.join(self.BOOTSTRAP_DIR, fn), 'resource')\r\n wrote_setuptools = True\r\n\r\n if not wrote_setuptools:\r\n raise RuntimeError(\r\n 'Failed to extract pkg_resources from setuptools. Perhaps pants was linked with an '\r\n 'incompatible setuptools.')\r\n\r\n libraries = (\r\n 'twitter.common.python',\r\n 'twitter.common.python.http',\r\n )\r\n\r\n for name in libraries:\r\n dirname = name.replace('twitter.common.python', '_twitter_common_python').replace('.', '/')\r\n provider = get_provider(name)\r\n if not isinstance(provider, DefaultProvider):\r\n mod = __import__(name, fromlist=['wutttt'])\r\n provider = ZipProvider(mod)\r\n for fn in provider.resource_listdir(''):\r\n if fn.endswith('.py'):\r\n self._chroot.write(provider.get_resource_string(name, fn),\r\n os.path.join(self.BOOTSTRAP_DIR, dirname, fn), 'resource')", "def initialize():\n environment = Environment()\n environment.setup()", "def setUp(self):\r\n coloredlogs.install(level=logging.DEBUG)\r\n # Create a temporary working directory.\r\n self.working_directory = tempfile.mkdtemp()\r\n # Create a temporary build directory.\r\n self.build_directory = os.path.join(self.working_directory, 'build')\r\n # Create a temporary virtual environment.\r\n self.virtual_environment = os.path.join(self.working_directory, 'environment')\r\n python = 'python%i.%i' % (sys.version_info[0], sys.version_info[1])\r\n assert os.system('virtualenv --python=%s %s' % (pipes.quote(python), pipes.quote(self.virtual_environment))) == 0\r\n # Make sure pip-accel uses the pip in the temporary virtual environment.\r\n os.environ['PATH'] = '%s:%s' % (os.path.join(self.virtual_environment, 'bin'), os.environ['PATH'])\r\n os.environ['VIRTUAL_ENV'] = self.virtual_environment\r\n # Make pip and pip-accel use the temporary working directory.\r\n os.environ['PIP_DOWNLOAD_CACHE'] = os.path.join(self.working_directory, 'download-cache')\r\n os.environ['PIP_ACCEL_CACHE'] = self.working_directory\r\n # Initialize the required subdirectories.\r\n self.pip_accel = __import__('pip_accel')\r\n self.pip_accel.initialize_directories()", "def _setup_env():\n env.home_path = os.path.expanduser('~')\n env.env_path = os.getenv('WORKON_HOME')\n\n if not env.env_path:\n warn(\"You should set the WORKON_HOME environment variable to\" \\\n \" the root directory for your virtual environments.\")\n env.env_path = env.sites_path\n\n env.project_path = join(env.sites_path, env.project_name)\n env.ve_path = join(env.env_path, env.project_name)\n env.activate_path = join(env.ve_path, 'bin', 'activate')", "def setup_project():\n _require_environment()\n\n # Checks if needed conf files for this environment already exist\n if not os.path.exists(_interpolate('%(settings)s.py')):\n abort(_interpolate('There is no settings.py for %(environment)s - create one, and commit'))\n if not os.path.exists(_interpolate('config/apache_%(environment)s.conf')):\n abort(_interpolate('There is no Apache conf for %(environment)s - use task \"generate_apache_conf\" to generate one, and commit'))\n if not os.path.exists(_interpolate('config/wsgi_%(environment)s.py')):\n abort(_interpolate('There is no WSGI conf for %(environment)s - use task \"generate_wsgi_conf\" to generate one, and commit'))\n\n # Configures virtualenv and clones git repo\n _setup_virtualenv()\n _clone_gitrepo()\n\n # Issues extra commands at project's level, if any\n extra_commands()\n\n # Sets up Apache, MySQL\n _setup_project_apache()\n _drop_database_mysql()\n _setup_project_mysql()\n\n # Finish installation\n pip_install()\n update_project()", "def collect_env():\n env_info = mmengine_collect_env()\n\n # MMEngine does not add the hipcc compiler information when collecting\n # environment information, so it is added here. When MMEngine v0.3.0 is\n # released, the code here can be removed.\n cuda_available = torch.cuda.is_available()\n if cuda_available and env_info.get('NVCC') == 'Not Available':\n CUDA_HOME = env_info['CUDA_HOME']\n if CUDA_HOME is not None and osp.isdir(CUDA_HOME):\n if CUDA_HOME == '/opt/rocm':\n try:\n nvcc = osp.join(CUDA_HOME, 'hip/bin/hipcc')\n nvcc = subprocess.check_output(\n f'\"{nvcc}\" --version', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('HIP version:')\n build = nvcc.rfind('')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n else:\n try:\n nvcc = osp.join(CUDA_HOME, 'bin/nvcc')\n nvcc = subprocess.check_output(f'\"{nvcc}\" -V', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('Cuda compilation tools')\n build = nvcc.rfind('Build ')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n env_info['NVCC'] = nvcc\n\n env_info['MMCV'] = mmcv.__version__\n\n try:\n from mmcv.ops import get_compiler_version, get_compiling_cuda_version\n except ModuleNotFoundError:\n env_info['MMCV Compiler'] = 'n/a'\n env_info['MMCV CUDA Compiler'] = 'n/a'\n else:\n env_info['MMCV Compiler'] = get_compiler_version()\n env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()\n\n return env_info", "def init(cx):\n\n\n # create the folder structure\n for d in PROJECT_DIRS:\n cx.run(\"mkdir -p {}\".format(d))\n cx.run(\"touch {}/.keep\".format(d))", "def getpythonenv(options,buildout):\n crypt=''\n if os.uname()[0] != 'Darwin':\n crypt=' -lcrypt '\n myfile = open(\n os.path.join(\n options['compile-directory'],\n 'Modules',\n 'Setup.local'),\n 'w'\n )\n myfile.write(\"\"\"\nzlib zlibmodule.c %(zlib)s\ncrypt cryptmodule.c %(crypt)s\nbz2 bz2module.c %(bzip2)s\n_curses _cursesmodule.c %(ncurses)s\n_curses_panel _curses_panel.c %(ncurses)s\nreadline readline.c %(readline)s\n_socket socketmodule.c\nsyslog syslogmodule.c\n_ssl _ssl.c %(ssl)s\ncStringIO cStringIO.c\ncPickle cPickle.c\npyexpat pyexpat.c -DHAVE_EXPAT_H %(expat)s\n_bsddb _bsddb.c %(db)s\n\"\"\" % {\n 'db': '-I%(db)s/include -L%(db)s/lib -Wl,-rpath,%(db)s/lib -ldb-%(dbv)s' % {\n 'db': os.path.abspath(buildout['db']['location']),\n 'dbv': buildout['db']['version']\n },\n 'readline': '-I%(readline)s/include -L%(readline)s/lib -Wl,-rpath,%(readline)s/lib -lhistory -lreadline' % {\n 'readline': os.path.abspath(buildout['readline']['location'])\n },\n 'ssl': '-I%(openssl)s/include -I%(openssl)s/include/openssl -L%(openssl)s/lib -Wl,-rpath -Wl,%(openssl)s/lib -lcrypto -lssl' % {\n 'openssl': os.path.abspath(buildout['openssl']['location'])\n },\n 'bzip2': '-I%(bzip2)s/include -L%(bzip2)s/lib -Wl,-rpath,%(bzip2)s/lib -lbz2' % {\n 'bzip2': os.path.abspath(buildout['bzip2']['location'])\n },\n 'zlib': '-I%(zlib)s/include -L%(zlib)s/lib -Wl,-rpath,%(zlib)s/lib -lz' % {\n 'zlib': os.path.abspath(buildout['zlib']['location'])\n },\n 'ncurses': '-I%(ncurses)s/include/ncurses -I%(ncurses)s/include -L%(ncurses)s/lib -Wl,-rpath -Wl,%(ncurses)s/lib -lpanel -lform -lmenu -lncurses' % {\n 'ncurses': os.path.abspath(buildout['ncurses']['location'])\n },\n 'expat': '-I%(expat)s/include -L%(expat)s/lib -Wl,-rpath,%(expat)s/lib -lexpat ' % {\n 'expat': os.path.abspath(buildout['expat']['location'])\n },\n 'crypt': crypt,\n}\n)\n myfile.close()\n os.environ['OPT'] = os.environ['CFLAGS']", "def init(self):\n # Create the default project files\n self.create_from_templates()\n\n # Add all the newly created files to the git staging area\n self.add_all_untracked()\n\n # Check that a compatible version of Python is available; install it if not\n self._pyenv.ensure_python(self.get_python_version())\n\n # Create virtualenv\n self._pyenv.create_virtualenv(self.name, self.get_python_version())", "def pre_build_ex(config, functions):\r\n print(\"pre_build_ex\")\r\n config[\"BUILD_DIR_PATH\"] = os.path.join(config[\"WORKSPACE\"],\r\n 'Build',\r\n config[\"PLATFORM_BOARD_PACKAGE\"],\r\n \"{}_{}\".format(\r\n config[\"TARGET\"],\r\n config[\"TOOL_CHAIN_TAG\"]))\r\n # set BUILD_DIR path\r\n config[\"BUILD_DIR\"] = os.path.join('Build',\r\n config[\"PLATFORM_BOARD_PACKAGE\"],\r\n \"{}_{}\".format(\r\n config[\"TARGET\"],\r\n config[\"TOOL_CHAIN_TAG\"]))\r\n config[\"BUILD_X64\"] = os.path.join(config[\"BUILD_DIR_PATH\"], 'X64')\r\n config[\"BUILD_IA32\"] = os.path.join(config[\"BUILD_DIR_PATH\"], 'IA32')\r\n\r\n if not os.path.isdir(config[\"BUILD_DIR_PATH\"]):\r\n try:\r\n os.makedirs(config[\"BUILD_DIR_PATH\"])\r\n except OSError:\r\n print(\"Error while creating Build folder\")\r\n sys.exit(1)\r\n\r\n #@todo: Replace this with PcdFspModeSelection\r\n if config.get(\"API_MODE_FSP_WRAPPER_BUILD\", \"FALSE\") == \"TRUE\":\r\n config[\"EXT_BUILD_FLAGS\"] += \" -D FSP_MODE=0\"\r\n else:\r\n config[\"EXT_BUILD_FLAGS\"] += \" -D FSP_MODE=1\"\r\n return None", "def pre_build_hook(self):", "def main(no_dev: bool):\n is_dev = not no_dev\n rewrite_pyproject(is_dev)\n if is_dev:\n make_dev_pyproject()", "def bootstrap(environment: Environment):\n pass", "def run_setup_develop(cwd, env):\n log_path = join(env['VIRTUAL_ENV'], 'holland_install.log')\n return subprocess.call(['python', 'setup.py', 'develop'],\n stdout=open(log_path, 'a'),\n stderr=subprocess.STDOUT,\n cwd=cwd,\n env=env)", "def generate(env):\n\n # FIXME: this is already too late\n #if env.get('quiet', False):\n # quietCommandLines(env)\n\n # shortcuts\n debug = env['debug']\n machine = env['machine']\n platform = env['platform']\n x86 = env['machine'] == 'x86'\n gcc = env['platform'] in ('linux', 'freebsd', 'darwin')\n msvc = env['platform'] in ('windows', 'winddk', 'wince')\n\n # Tool\n if platform == 'winddk':\n env.Tool('winddk')\n elif platform == 'wince':\n env.Tool('wcesdk')\n else:\n env.Tool('default')\n\n # Put build output in a separate dir, which depends on the current\n # configuration. See also http://www.scons.org/wiki/AdvancedBuildExample\n build_topdir = 'build'\n build_subdir = env['platform']\n if env['dri']:\n build_subdir += \"-dri\"\n if env['llvm']:\n build_subdir += \"-llvm\"\n if env['machine'] != 'generic':\n build_subdir += '-' + env['machine']\n if env['debug']:\n build_subdir += \"-debug\"\n if env['profile']:\n build_subdir += \"-profile\"\n build_dir = os.path.join(build_topdir, build_subdir)\n # Place the .sconsign file in the build dir too, to avoid issues with\n # different scons versions building the same source file\n env['build'] = build_dir\n env.SConsignFile(os.path.join(build_dir, '.sconsign'))\n\n # C preprocessor options\n cppdefines = []\n if debug:\n cppdefines += ['DEBUG']\n else:\n cppdefines += ['NDEBUG']\n if env['profile']:\n cppdefines += ['PROFILE']\n if platform == 'windows':\n cppdefines += [\n 'WIN32',\n '_WINDOWS',\n '_UNICODE',\n 'UNICODE',\n # http://msdn2.microsoft.com/en-us/library/6dwk3a1z.aspx,\n 'WIN32_LEAN_AND_MEAN',\n 'VC_EXTRALEAN',\n '_CRT_SECURE_NO_DEPRECATE',\n ]\n if debug:\n cppdefines += ['_DEBUG']\n if platform == 'winddk':\n # Mimic WINDDK's builtin flags. See also:\n # - WINDDK's bin/makefile.new i386mk.inc for more info.\n # - buildchk_wxp_x86.log files, generated by the WINDDK's build\n # - http://alter.org.ua/docs/nt_kernel/vc8_proj/\n cppdefines += [\n ('_X86_', '1'),\n ('i386', '1'),\n 'STD_CALL',\n ('CONDITION_HANDLING', '1'),\n ('NT_INST', '0'),\n ('WIN32', '100'),\n ('_NT1X_', '100'),\n ('WINNT', '1'),\n ('_WIN32_WINNT', '0x0501'), # minimum required OS version\n ('WINVER', '0x0501'),\n ('_WIN32_IE', '0x0603'),\n ('WIN32_LEAN_AND_MEAN', '1'),\n ('DEVL', '1'),\n ('__BUILDMACHINE__', 'WinDDK'),\n ('FPO', '0'),\n ]\n if debug:\n cppdefines += [('DBG', 1)]\n if platform == 'wince':\n cppdefines += [\n '_CRT_SECURE_NO_DEPRECATE',\n '_USE_32BIT_TIME_T',\n 'UNICODE',\n '_UNICODE',\n ('UNDER_CE', '600'),\n ('_WIN32_WCE', '0x600'),\n 'WINCEOEM',\n 'WINCEINTERNAL',\n 'WIN32',\n 'STRICT',\n 'x86',\n '_X86_',\n 'INTERNATIONAL',\n ('INTLMSG_CODEPAGE', '1252'),\n ]\n if platform == 'windows':\n cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_USER']\n if platform == 'winddk':\n cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_DISPLAY']\n if platform == 'wince':\n cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_CE']\n env.Append(CPPDEFINES = cppdefines)\n\n # C preprocessor includes\n if platform == 'winddk':\n env.Append(CPPPATH = [\n env['SDK_INC_PATH'],\n env['DDK_INC_PATH'],\n env['WDM_INC_PATH'],\n env['CRT_INC_PATH'],\n ])\n\n # C compiler options\n cflags = []\n if gcc:\n if debug:\n cflags += ['-O0', '-g3']\n else:\n cflags += ['-O3', '-g3']\n if env['profile']:\n cflags += ['-pg']\n if env['machine'] == 'x86':\n cflags += [\n '-m32',\n #'-march=pentium4',\n '-mmmx', '-msse', '-msse2', # enable SIMD intrinsics\n #'-mfpmath=sse',\n ]\n if env['machine'] == 'x86_64':\n cflags += ['-m64']\n cflags += [\n '-Wall',\n '-Wmissing-prototypes',\n '-Wno-long-long',\n '-ffast-math',\n '-pedantic',\n '-fmessage-length=0', # be nice to Eclipse\n ]\n if msvc:\n # See also:\n # - http://msdn.microsoft.com/en-us/library/19z1t1wy.aspx\n # - cl /?\n if debug:\n cflags += [\n '/Od', # disable optimizations\n '/Oi', # enable intrinsic functions\n '/Oy-', # disable frame pointer omission\n ]\n else:\n cflags += [\n '/Ox', # maximum optimizations\n '/Oi', # enable intrinsic functions\n '/Ot', # favor code speed\n #'/fp:fast', # fast floating point \n ]\n if env['profile']:\n cflags += [\n '/Gh', # enable _penter hook function\n '/GH', # enable _pexit hook function\n ]\n cflags += [\n '/W3', # warning level\n #'/Wp64', # enable 64 bit porting warnings\n ]\n if env['machine'] == 'x86':\n cflags += [\n #'/QIfist', # Suppress _ftol\n #'/arch:SSE2', # use the SSE2 instructions\n ]\n if platform == 'windows':\n cflags += [\n # TODO\n ]\n if platform == 'winddk':\n cflags += [\n '/Zl', # omit default library name in .OBJ\n '/Zp8', # 8bytes struct member alignment\n '/Gy', # separate functions for linker\n '/Gm-', # disable minimal rebuild\n '/WX', # treat warnings as errors\n '/Gz', # __stdcall Calling convention\n '/GX-', # disable C++ EH\n '/GR-', # disable C++ RTTI\n '/GF', # enable read-only string pooling\n '/G6', # optimize for PPro, P-II, P-III\n '/Ze', # enable extensions\n '/Gi-', # disable incremental compilation\n '/QIfdiv-', # disable Pentium FDIV fix\n '/hotpatch', # prepares an image for hotpatching.\n #'/Z7', #enable old-style debug info\n ]\n if platform == 'wince':\n # See also C:\\WINCE600\\public\\common\\oak\\misc\\makefile.def\n cflags += [\n '/Zl', # omit default library name in .OBJ\n '/GF', # enable read-only string pooling\n '/GR-', # disable C++ RTTI\n '/GS', # enable security checks\n # Allow disabling language conformance to maintain backward compat\n #'/Zc:wchar_t-', # don't force wchar_t as native type, instead of typedef\n #'/Zc:forScope-', # don't enforce Standard C++ for scoping rules\n #'/wd4867',\n #'/wd4430',\n #'/MT',\n #'/U_MT',\n ]\n # Automatic pdb generation\n # See http://scons.tigris.org/issues/show_bug.cgi?id=1656\n env.EnsureSConsVersion(0, 98, 0)\n env['PDB'] = '${TARGET.base}.pdb'\n env.Append(CFLAGS = cflags)\n env.Append(CXXFLAGS = cflags)\n\n # Assembler options\n if gcc:\n if env['machine'] == 'x86':\n env.Append(ASFLAGS = ['-m32'])\n if env['machine'] == 'x86_64':\n env.Append(ASFLAGS = ['-m64'])\n\n # Linker options\n linkflags = []\n if gcc:\n if env['machine'] == 'x86':\n linkflags += ['-m32']\n if env['machine'] == 'x86_64':\n linkflags += ['-m64']\n if platform == 'winddk':\n # See also:\n # - http://msdn2.microsoft.com/en-us/library/y0zzbyt4.aspx\n linkflags += [\n '/merge:_PAGE=PAGE',\n '/merge:_TEXT=.text',\n '/section:INIT,d',\n '/opt:ref',\n '/opt:icf',\n '/ignore:4198,4010,4037,4039,4065,4070,4078,4087,4089,4221',\n '/incremental:no',\n '/fullbuild',\n '/release',\n '/nodefaultlib',\n '/wx',\n '/debug',\n '/debugtype:cv',\n '/version:5.1',\n '/osversion:5.1',\n '/functionpadmin:5',\n '/safeseh',\n '/pdbcompress',\n '/stack:0x40000,0x1000',\n '/driver',\n '/align:0x80',\n '/subsystem:native,5.01',\n '/base:0x10000',\n\n '/entry:DrvEnableDriver',\n ]\n if env['profile']:\n linkflags += [\n '/MAP', # http://msdn.microsoft.com/en-us/library/k7xkk3e2.aspx\n ]\n if platform == 'wince':\n linkflags += [\n '/nodefaultlib',\n #'/incremental:no',\n #'/fullbuild',\n '/entry:_DllMainCRTStartup',\n ]\n env.Append(LINKFLAGS = linkflags)\n\n # Default libs\n env.Append(LIBS = [])\n\n # Custom builders and methods\n createConvenienceLibBuilder(env)\n createCodeGenerateMethod(env)\n createInstallMethods(env)\n\n # for debugging\n #print env.Dump()", "def make_module_dep(self):\n\n #\n # First do some processing of and checks on the parameters\n #\n\n # One value that we will need a lot\n if self.cfg['PrgEnv_family'] == None:\n PrgEnv_family = None\n else:\n PrgEnv_family = self.cfg['PrgEnv_family'].lower()\n\n # Illegal parameter combination: PrgEnv_family True and PrgEnv_load True.\n if PrgEnv_family == 'prgenv' and self.cfg['PrgEnv_load']:\n raise EasyBuildError('Setting PrgEnv_family to \\'PrgEnv\\' and PrgEnv_load to True is not a valid combination.')\n\n # Illegal parameter combination: PrgEnv_load False and CPE_load == 'after'\n if self.cfg['CPE_load'] == 'after' and not self.cfg['PrgEnv_load']:\n raise EasyBuildError('Setting CPE_load to \\'after\\' and PrgEnv_load to False is not a valid combination.')\n\n # Determine the PrgEnv module\n if self.cfg['PrgEnv'] is None:\n try:\n prgenv_name = MAP_TOOLCHAIN_PRGENV[self.cfg['name']]\n except:\n raise EasyBuildError('%s is not a supported toolchain, you\\'ll need to specify both PrgEnv and CPE_compiler.',\n self.cfg['name'])\n else:\n prgenv_name = self.cfg['PrgEnv']\n if not 'PrgEnv-' + prgenv_name in KNOWN_PRGENVS:\n print_warning('PrgEnv-%s is not a supported PrgEnv module. Are you sure it is not a typo?', prgenv_mod)\n\n prgenv_mod = 'PrgEnv-' + prgenv_name\n\n self.log.debug(\"Detected PrgEnv-module: %s (version may be added through dependencies)\", prgenv_mod)\n\n # Determine the compiler module\n if self.cfg['CPE_compiler'] in [ None, 'auto']:\n try:\n compiler_mod = MAP_TOOLCHAIN_COMPILER[self.cfg['name']]\n except:\n raise EasyBuildError('%s is not a supported toolchain, you\\'ll need to specify both PrgEnv and CPE_compiler.',\n self.cfg['name'])\n else:\n compiler_mod = self.cfg['CPE_compiler']\n\n self.log.debug(\"Detected compiler module: %s (version may be added through dependencies\", compiler_mod)\n\n # Cray wrappers module\n craype_mod = 'craype'\n\n # Determine the cpe module (if needed)\n if self.cfg['CPE_load'] != None:\n if self.cfg['CPE_version'] is None:\n cpe_load_version = self.cfg['version']\n else:\n cpe_load_version = self.cfg['CPE_version']\n\n self.log.debug(\"Loading CPE version: %s (may be overwritten by dependencies)\", cpe_load_version)\n\n cpe_mod = 'cpe/' + cpe_load_version\n\n # Build a list of dependencies without version\n collect_deps = []\n force_compiler = False\n force_craype = False\n for dep in self.toolchain.dependencies:\n mod_name = dep['full_mod_name']\n if mod_name.startswith(prgenv_mod):\n prgenv_mod = mod_name\n elif mod_name.startswith(compiler_mod):\n compiler_mod = mod_name\n force_compiler = True\n elif mod_name.startswith(craype_mod):\n craype_mod = mod_name\n force_craype = True\n elif not (mod_name == 'cpe' or mod_name.startswith('cpe/')):\n collect_deps.append(mod_name)\n\n #\n # Now start generating the load commands and other stuff.\n #\n\n collect_statements = [''] # Will start with an empty line.\n\n # Do we need a family directive?\n if PrgEnv_family == 'prgenv':\n collect_statements = collect_statements + [ 'family(\\'PrgEnv\\')', '' ]\n elif PrgEnv_family == 'cpetoolchain':\n collect_statements = collect_statements + [ 'family(\\'cpeToolchain\\')', '' ]\n\n # Do we need to unload the PrgEnv modules?\n if PrgEnv_family == None and self.cfg['PrgEnv_load']:\n # Need to unload all PrgEnv-* modules except the one used by the module\n for prgenv in [prgenv for prgenv in KNOWN_PRGENVS if not prgenv_mod.startswith(prgenv)]:\n collect_statements.append(self.module_generator.unload_module(prgenv).strip())\n collect_statements.append('')\n elif (PrgEnv_family == 'cpetoolchain' or PrgEnv_family == None) and not self.cfg['PrgEnv_load'] :\n # Need to unload all PrgEnv-* modules.\n for prgenv in KNOWN_PRGENVS:\n collect_statements.append(self.module_generator.unload_module(prgenv).strip())\n collect_statements.append('')\n\n # Do we need to unload the cpe* modules?\n if PrgEnv_family == None:\n for cpe in [cpe for cpe in KNOWN_CPEMODS if not self.name.startswith(cpe)]:\n collect_statements.append(self.module_generator.unload_module(cpe).strip())\n collect_statements.append('')\n\n\n # Set PE_ENV if no PrgEnv-* module is loaded.\n if not self.cfg['PrgEnv_load']:\n collect_statements.append(self.module_generator.set_environment('PE_ENV', prgenv_name.upper(), False).lstrip())\n\n # Load the cpe module (if CPE_load is first)\n if self.cfg['CPE_load'] != None and self.cfg['CPE_load'].lower() == 'first':\n collect_statements.append(self.module_generator.load_module(cpe_mod, recursive_unload=False).lstrip())\n\n # load statement for selected PrgEnv module (only when not loaded yet)\n if self.cfg['PrgEnv_load']:\n collect_statements.append(self.module_generator.load_module(prgenv_mod, recursive_unload=False).lstrip())\n\n # Load the cpe module (if CPE_load is after)\n if self.cfg['CPE_load'] != None and self.cfg['CPE_load'].lower() == 'after':\n collect_statements.append(self.module_generator.load_module(cpe_mod, recursive_unload=False).lstrip())\n\n # Prepare the load statements for the targeting modules\n for dep in self.cfg['cray_targets']:\n collect_statements.append(self.module_generator.load_module(dep, recursive_unload=False).lstrip())\n\n # Load the selected compiler module, if not done through the dependencies or PrgEnv\n if (not self.cfg['PrgEnv_load']) or force_compiler:\n collect_statements.append(self.module_generator.load_module(compiler_mod, recursive_unload=False).lstrip())\n\n # Load the Cray compiler wrapper module, if not done through the dependencies\n if (not self.cfg['PrgEnv_load']) or force_craype:\n collect_statements.append(self.module_generator.load_module(craype_mod, recursive_unload=False).lstrip())\n\n # Now load the dependencies, using the full name and version if they are specified that way.\n for dep in collect_deps:\n collect_statements.append(self.module_generator.load_module(dep).lstrip())\n\n # Load the cpe module (if CPE_load is last)\n if self.cfg['CPE_load'] != None and self.cfg['CPE_load'].lower() == 'last':\n collect_statements.append(self.module_generator.load_module(cpe_mod, recursive_unload=False).lstrip())\n\n # Assemble all module unload/load statements.\n txt = '\\n'.join(collect_statements)\n return txt", "def _init_remote():\r\n require('path', provided_by = [staging])\r\n\r\n create_project_dir()\r\n deploy_nosyncdb()\r\n create_virtualenv()\r\n install_requirements()\r\n create_db()\r\n create_secret_settings()\r\n syncdb()\r\n createsuperuser()\r\n install_site()\r\n reload()", "def build():\n os.makedirs(DIST_DIR, exist_ok=True)\n\n if \"WHEEL\" in os.environ:\n whl = build_wheel()\n else:\n click.echo(\"Not building wheels.\")\n\n if \"WHEEL\" in os.environ and \"DOCKER\" in os.environ:\n # Docker image requires wheels\n build_docker_image(whl)\n else:\n click.echo(\"Not building Docker image.\")\n\n if \"PYINSTALLER\" in os.environ:\n build_pyinstaller()\n else:\n click.echo(\"Not building PyInstaller packages.\")", "def develop():\n# Install package in development mode\n sh('python setup.py develop')", "def __init__(self, conf, python, requirements, tagged_env_vars):\n self._env_dir = conf.env_dir\n self._repo_subdir = conf.repo_subdir\n self._install_timeout = conf.install_timeout # gh-391\n self._default_benchmark_timeout = conf.default_benchmark_timeout # gh-973\n self._tagged_env_vars = tagged_env_vars\n self._path = os.path.abspath(os.path.join(\n self._env_dir, self.dir_name))\n self._project = conf.project\n\n self._is_setup = False\n\n self._cache = build_cache.BuildCache(conf, self._path)\n self._build_root = os.path.abspath(os.path.join(self._path, 'project'))\n\n self._requirements = requirements\n # These are needed for asv to build and run the project, not part of\n # benchmark name mangling\n self._base_requirements = {}\n # gh-1314\n asv_runner_path = os.getenv(\"ASV_RUNNER_PATH\", \"\")\n module_path = Path(asv_runner_path) / \"asv_runner\"\n\n # Check if the path points to a directory containing the \"asv_runner\" module\n if module_path.is_dir() and (module_path / \"__init__.py\").is_file():\n spec = importlib.util.spec_from_file_location(\"asv_runner\",\n module_path / \"__init__.py\")\n # Attempt to load the module\n asv_runner_module = importlib.util.module_from_spec(spec)\n try:\n spec.loader.exec_module(asv_runner_module)\n self._base_requirements[\"pip+asv_runner\"] = asv_runner_path\n except Exception as e:\n self._base_requirements[\"pip+asv_runner\"] = \"\"\n log.warning(f\"Failed to load module from ASV_RUNNER_PATH: {e}\")\n else:\n self._base_requirements[\"pip+asv_runner\"] = \"\"\n if asv_runner_path:\n log.warning(\"ASV_RUNNER_PATH does not point\"\n \"to a directory containing the 'asv_runner' module\")\n if not util.ON_PYPY:\n # XXX: What if pypy installed asv tries to benchmark a cpython\n # python?\n self._base_requirements[\"pip+pympler\"] = \"\"\n if (Path.cwd() / \"poetry.lock\").exists():\n self._base_requirements[\"poetry-core\"] = \"\"\n\n if (Path.cwd() / \"pdm.lock\").exists():\n self._base_requirements[\"pdm\"] = \"\"\n\n # Update the _base_requirements if needed\n for key in list(self._requirements.keys()):\n if key in self._base_requirements:\n self._base_requirements[key] = self._requirements[key]\n del self._requirements[key]\n\n self._build_command = conf.build_command\n self._install_command = conf.install_command\n self._uninstall_command = conf.uninstall_command\n\n self._global_env_vars = {}\n self._global_env_vars['ASV'] = 'true'\n self._global_env_vars['ASV_PROJECT'] = conf.project\n self._global_env_vars['ASV_CONF_DIR'] = os.path.abspath(os.getcwd())\n self._global_env_vars['ASV_ENV_NAME'] = self.name\n self._global_env_vars['ASV_ENV_DIR'] = self._path\n self._global_env_vars['ASV_ENV_TYPE'] = self.tool_name\n\n installed_commit_hash = self._get_installed_commit_hash()\n self._set_commit_hash(installed_commit_hash)", "def init_repo():\n print(f\"Initializing development environment for {pkg_name}\")\n try:\n git_init = run('git init .'.split(), check=True)\n print('Initialized git repository')\n if repo:\n git_add_remote = run(f'git remote add origin {repo}'.split(), check=True)\n print(f'Found url, set origin: {repo}')\n git_add = run('git add -A'.split(), check=True)\n git_commit = run(shlex.split(f'git commit -m \"first commit of {pkg_name} \"'), check=True)\n git_tag = run(shlex.split('git tag -a -m \"first tag\" 0.0.1'), check=True)\n print('First commit.')\n pipenv_versioneer = run('pipenv run versioneer install'.split(), check=True)\n print('Installed versioneer')\n pipenv_install_dev = run('pipenv run pip install -e .'.split(), check=True)\n print('Installed package in development mode.')\n git_add_after = run('git add -A'.split(), check=True)\n git_commit_after = run(shlex.split('git commit -m \"added versioneer support.\"'), check=True)\n print('second commit.')\n print('All set!')\n except CalledProcessError as e:\n print(e)", "def task_prepare_build():\n\n import sys\n\n python_path = sys.executable.split(os.sep)\n venv_path = str(Path(os.sep.join(python_path[:-2])))\n\n def get_dst_path():\n import platform\n\n print(f\"Going on with {venv_path} as the virtual environment exclusively used for using pyinstaller.\")\n arch = platform.system()\n if arch == \"Windows\":\n return Path(venv_path) / \"Lib/site-packages/mad_gui/qt_designer/build/\"\n if arch in [\"Linux\", \"Darwin\"]:\n python_dirs = os.listdir(Path(venv_path) / \"lib/\")\n warnings.warn(\n f\"dodo.py: Assuming your python 3.7 installation is in {Path(venv_path)}/lib/{python_dirs[0]}\"\n )\n return Path(venv_path) / \"lib\" / python_dirs[0] / \"site-packages/mad_gui/qt_designer/build/\"\n raise ValueError(\"What operating system is this?!\")\n\n def set_up_paths():\n if not os.path.exists(get_dst_path().parent):\n raise FileNotFoundError(\n \"Apparently mad_gui is not installed in this environemnt. Use `pip install . ` to do so.\"\n )\n dst_path = get_dst_path()\n os.makedirs(dst_path, exist_ok=True)\n\n def convert_ui_to_py():\n dst_path = get_dst_path()\n ui_files = [file for file in os.listdir(dst_path.parent) if \".ui\" in file]\n print(\"\\n\")\n for file in ui_files:\n print(f\"Converting from: {dst_path.parent}{os.sep}{file}\")\n print(f\"To: {dst_path}{os.sep}{file.split('.')[0]}.py\\n\")\n os.popen(f\"pyside2-uic -o {dst_path}{os.sep}{file.split('.')[0]}.py {dst_path.parent}{os.sep}{file}\")\n\n print(\n \"Info: These conversion should take place in the virutal environment you are going to use with \"\n \"pyinstaller.\"\n )\n\n return {\n \"actions\": [set_up_paths, convert_ui_to_py],\n \"verbosity\": 2,\n }", "def _preparation_workflow(self):\n self._validate_environment()\n self._validate_parameters()\n self._update_verbosity()", "def setup():\n\n debs = (\"python-setuptools\", \"apache2\", \"libapache2-mod-wsgi\")\n\n require(\"hosts\", provided_by=[production, staging])\n sudo(\"apt-get install %s\" % \" \".join(debs))\n sudo(\"easy_install virtualenv pip\")\n sudo(\"mkdir -p %(path)s\" % env)\n with cd(\"%(path)s\" % env):\n sudo(\"mkdir -p releases; mkdir -p packages\")\n sudo(\"virtualenv --no-site-packages .\")\n sudo(\"mkdir -p /var/log/twit-demo; chown www-data:www-data /var/log/twit-demo\")", "def build_env_wheels() -> Iterable[Path]:\n return []", "def build_env(build_env_wheels: Iterable[Path], tmp_path_factory: pytest.TempPathFactory) -> Path:\n d = tmp_path_factory.mktemp(\"pdm-test-env\")\n p = Core().create_project(d)\n env = PythonEnvironment(p, prefix=str(d))\n for wheel in build_env_wheels:\n install_wheel(str(wheel), env)\n return d", "def build():\n\timport subprocess\n\tfrom os import listdir, getcwd\n\tfrom os.path import isfile, join\n\tonlyfiles = [f for f in listdir(getcwd()) if isfile(join(getcwd(), f))]\n\n\tif not 'requirements.txt' in onlyfiles:\n\t\traise SystemExit('File including depencencies not found. You will have to install them manually.')\n\n\tsubprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])\n\n\tprint('All dependencies installed successfully.\\nYou can run Simplex now!')", "def preCheckDeps(self):\n \n # add cmake dependency\n if( self.mode == \"install\" and self.hasCMakeBuildSupport ):\n self.addExternalDependency( [\"CMake\" ] )\n if self.name != \"LCIO\":\n self.addExternalDependency( [\"ILCUTIL\" ] )", "def prepare(self):\n super(Test200SmartSanityDownload004, self).prepare()\n\n self.logger.info('Preconditions:')\n self.logger.info('1. Open Micro/WINr; ')\n self.logger.info('2. Set up connection with PLC;')\n self.logger.info('3. Create a project which has OB,DB,SDB;')\n self.MicroWIN.test_prepare('ob_db_sdb_01.smart')\n self.PROJECT.project_open('ob_db_sdb_02.smart')", "def pre_configure_project(source_dir, build_dir):\n ListenerManager.call(_project_pre_configure_manager, source_dir, build_dir)", "def auto_setup(basedir=None, devices=None, logdir=None, project_root=None, compress=None):\n if basedir:\n if os.path.isfile(basedir):\n basedir = os.path.dirname(basedir)\n if basedir not in G.BASEDIR:\n G.BASEDIR.append(basedir)\n if devices:\n for dev in devices:\n connect_device(dev)\n if logdir:\n logdir = script_log_dir(basedir, logdir)\n set_logdir(logdir)\n if project_root:\n ST.PROJECT_ROOT = project_root\n if compress:\n ST.SNAPSHOT_QUALITY = compress", "def install():\n build()\n sh(\"%s setup.py develop\" % PYTHON)", "def pre_build(self):", "def build(session: nox.Session) -> None:\n\n dist_dir = DIR.joinpath(\"dist\")\n if dist_dir.exists():\n shutil.rmtree(dist_dir)\n\n session.install(\".[dev]\")\n session.run(\"flit\", \"build\")", "def _require_environment():\n require('environment', 'host', provided_by=ENVS.keys())", "def collect_env():\n env_info = {}\n env_info['sys.platform'] = sys.platform\n env_info['Python'] = sys.version.replace('\\n', '')\n\n cuda_available = torch.cuda.is_available()\n env_info['CUDA available'] = cuda_available\n\n if cuda_available:\n devices = defaultdict(list)\n for k in range(torch.cuda.device_count()):\n devices[torch.cuda.get_device_name(k)].append(str(k))\n for name, device_ids in devices.items():\n env_info['GPU ' + ','.join(device_ids)] = name\n\n from mmcv.utils.parrots_wrapper import _get_cuda_home\n CUDA_HOME = _get_cuda_home()\n env_info['CUDA_HOME'] = CUDA_HOME\n\n if CUDA_HOME is not None and osp.isdir(CUDA_HOME):\n try:\n nvcc = osp.join(CUDA_HOME, 'bin/nvcc')\n nvcc = subprocess.check_output(f'\"{nvcc}\" -V', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('Cuda compilation tools')\n build = nvcc.rfind('Build ')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n env_info['NVCC'] = nvcc\n\n try:\n # Check C++ Compiler.\n # For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...',\n # indicating the compiler used, we use this to get the compiler name\n import sysconfig\n cc = sysconfig.get_config_var('CC')\n if cc:\n cc = osp.basename(cc.split()[0])\n cc_info = subprocess.check_output(f'{cc} --version', shell=True)\n env_info['GCC'] = cc_info.decode('utf-8').partition(\n '\\n')[0].strip()\n else:\n # on Windows, cl.exe is not in PATH. We need to find the path.\n # distutils.ccompiler.new_compiler() returns a msvccompiler\n # object and after initialization, path to cl.exe is found.\n import locale\n import os\n from distutils.ccompiler import new_compiler\n ccompiler = new_compiler()\n ccompiler.initialize()\n cc = subprocess.check_output(\n f'{ccompiler.cc}', stderr=subprocess.STDOUT, shell=True)\n encoding = os.device_encoding(\n sys.stdout.fileno()) or locale.getpreferredencoding()\n env_info['MSVC'] = cc.decode(encoding).partition('\\n')[0].strip()\n env_info['GCC'] = 'n/a'\n except subprocess.CalledProcessError:\n env_info['GCC'] = 'n/a'\n\n env_info['PyTorch'] = torch.__version__\n env_info['PyTorch compiling details'] = get_build_config()\n\n try:\n import torchvision\n env_info['TorchVision'] = torchvision.__version__\n except ModuleNotFoundError:\n pass\n\n env_info['OpenCV'] = cv2.__version__\n\n env_info['MMCV'] = mmcv.__version__\n\n try:\n from mmcv.ops import get_compiler_version, get_compiling_cuda_version\n except ModuleNotFoundError:\n env_info['MMCV Compiler'] = 'n/a'\n env_info['MMCV CUDA Compiler'] = 'n/a'\n else:\n env_info['MMCV Compiler'] = get_compiler_version()\n env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()\n\n return env_info", "def prepare_env_for_all_codes(i):\n\n # Check vars\n if 'code_deps' not in i: return {'cm_return':1, 'cm_error':'\"code_deps\" is not defined in \"code prepare_env_for_all_codes\"'}\n\n include_paths=[]\n lib_paths=[]\n\n # Load OS\n os_uoa=''\n if 'os_uoa' in i and i['os_uoa']!='': os_uoa=i['os_uoa']\n elif 'cm_default_os_uoa' in cm_kernel.ini['dcfg'] and cm_kernel.ini['dcfg']['cm_default_os_uoa']!='':\n os_uoa=cm_kernel.ini['dcfg']['cm_default_os_uoa']\n\n if os_uoa=='' not in i:\n return {'cm_return':1, 'cm_error':'\"os_uoa\" is not defined and not in kernel'}\n\n ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['os'],\n 'cm_action':'load',\n 'cm_data_uoa':os_uoa}\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n os_cfg=r['cm_data_obj']['cfg']\n os_path=r['cm_path']\n os_uid=r['cm_uid']\n os_alias=r['cm_alias']\n\n s_code_deps=''\n a_code_deps=[]\n if 'code_deps' in i:\n for xx in i['code_deps']:\n yy=xx.keys()[0]\n x=xx[yy]\n\n if x=='':\n return {'cm_return':1, 'cm_error':'dependency \"'+yy+'\" is empty, please check your input'}\n\n # Check if code was installed\n if i.get('no_strict_check','')!='yes':\n ii={'cm_run_module_uoa':ini['cm_module_uid'],\n 'cm_action':'load',\n 'cm_data_uoa':x}\n r=cm_kernel.access(ii)\n if r['cm_return']==16:\n return {'cm_return':1, 'cm_error':'dependency is not resolved - code '+x+' ('+yy+') is not installed'}\n elif r['cm_return']>0: return r\n code_cfg=r['cm_data_obj']['cfg']\n if code_cfg.get('build_finished_successfully','')!='yes':\n return {'cm_return':1, 'cm_error':'dependency is not resolved - code '+x+' ('+yy+') is not installed'}\n\n code_path=r['cm_path']\n include_paths.append(os.path.join(code_path, 'include'))\n\n if 'state_input' in code_cfg and \\\n 'run_set_env2' in code_cfg['state_input'] and \\\n 'CM_TARGET_FILE' in code_cfg['state_input']['run_set_env2']:\n lib_paths.append(os.path.join(code_path, os_cfg['lib_dir'], \n code_cfg['state_input']['run_set_env2']['CM_TARGET_FILE']))\n\n # Environment script\n r=get_env({'cm_data_uoa':x, 'os_uoa':os_uoa})\n if r['cm_return']>0: return r\n\n# z=os_cfg['env_call']+' '+os.path.join(cm_kernel.ini[cm_kernel.env_cm_bin],r['cm_string'])\n z1=os_cfg['env_set']+' '+yy+'='+os_cfg['env_quotes']+x+os_cfg['env_quotes']\n z=os_cfg['env_call']+' '+r['cm_string']\n\n if s_code_deps!='': s_code_deps+=' '+os_cfg['env_separator']+' '\n s_code_deps+=z1\n if s_code_deps!='': s_code_deps+=' '+os_cfg['env_separator']+' '\n s_code_deps+=z\n # FGG added again setting environment variable since calling other scripts can change it\n # for example, we set CM_CODE_DEP_COMPILER and then call GMP that was compiled with another\n # compiler, then it will change this variable to a wrong value and further tools will \n # not be working correctly ...\n if s_code_deps!='': s_code_deps+=' '+os_cfg['env_separator']+' '\n s_code_deps+=z1\n\n a_code_deps.append(z1)\n a_code_deps.append(z)\n a_code_deps.append(z1)\n\n return {'cm_return':0, 'cm_string':s_code_deps, 'cm_array':a_code_deps, 'env_separator': os_cfg['env_separator'],\n 'include_paths':include_paths, 'lib_paths':lib_paths}", "def build_python_environment(self):\n # build command for creating the python environment\n cmd_args = {\n 'exe': self.env_executable,\n 'cmds': \" \".join(self.env_commands),\n 'flags': \" \".join(self.env_flags),\n 'args': \" \".join(self.env_arguments),\n }\n cmd_create_env = self.cmd_env.format(**cmd_args)\n print(\"Building new python environment ({}) ... \"\n .format(self.proj_name))\n with click_spinner.spinner():\n errno, stdout, stderr = utils.run_command(cmd_create_env, env=None,\n shell=True)\n if errno:\n raise Exception(\"Environment setup failed (STDERR: {})\"\n .format(stderr))", "def install_deps():\n pipenv_dev = run('pipenv install --dev'.split(), check=True)\n print('Installed dependencies and virtual environment. Type `pipenv shell` to activate later.')", "def setup():\n require('hosts', provided_by=[prod])\n require('code_root')\n sudo('apt-get update')\n sudo('apt-get install -y python-setuptools')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('aptitude install -y apache2')\n sudo('aptitude install -y libapache2-mod-wsgi')\n sudo('apt-get install -y nginx')\n update_webserver_config()\n sudo('mkdir -p %s; cd %s; virtualenv .;' % (env.code_root, env.code_root))\n sudo('cd %s;mkdir releases; mkdir shared; mkdir packages; mkdir shared/media; mkdir shared/media/file;' % (env.code_root))\n deploy()", "def _prepare_env(self, graph, **kwargs):\n raise NotImplementedError", "def test_environment_scripts_generated_envvars(require_run):\n consumer_pkg = textwrap.dedent(r\"\"\"\n from conan import ConanFile\n from conan.tools.env import VirtualBuildEnv, VirtualRunEnv\n class Pkg(ConanFile):\n settings = \"os\"\n requires = \"require_pkg/1.0\"\n tool_requires = \"build_require_pkg/1.0\"\n generators = \"VirtualRunEnv\", \"VirtualBuildEnv\"\n \"\"\")\n\n client = TestClient()\n conanfile_br = (GenConanfile().with_package_file(\"bin/myapp\", \"myexe\")\n .with_package_file(\"lib/mylib\", \"mylibcontent\")\n .with_settings(\"os\"))\n conanfile_require = (GenConanfile().with_package_file(\"bin/myapp\", \"myexe\")\n .with_package_file(\"lib/mylib\", \"mylibcontent\")\n .with_settings(\"os\"))\n if require_run:\n conanfile_require.with_package_type(\"application\")\n client.save({\"build_require_pkg/conanfile.py\": conanfile_br,\n \"require_pkg/conanfile.py\": conanfile_require,\n \"consumer_pkg/conanfile.py\": consumer_pkg})\n\n client.run(\"export build_require_pkg --name=build_require_pkg --version=1.0\")\n client.run(\"export require_pkg --name=require_pkg --version=1.0\")\n\n client.run(\"install consumer_pkg --build='*'\")\n if platform.system() == \"Windows\":\n conanbuildenv = client.load(\"consumer_pkg/conanbuildenv.bat\")\n if require_run:\n conanrunenv = client.load(\"consumer_pkg/conanrunenv.bat\")\n assert \"LD_LIBRARY_PATH\" not in conanbuildenv\n assert \"LD_LIBRARY_PATH\" not in conanrunenv\n else:\n assert not os.path.exists(\"consumer_pkg/conanrunenv.bat\")\n else:\n if require_run:\n conanbuildenv = client.load(\"consumer_pkg/conanbuildenv.sh\")\n conanrunenv = client.load(\"consumer_pkg/conanrunenv.sh\")\n assert \"LD_LIBRARY_PATH\" in conanbuildenv\n assert \"LD_LIBRARY_PATH\" in conanrunenv\n else:\n assert not os.path.exists(\"consumer_pkg/conanrunenv.sh\")\n\n if require_run:\n # Build context LINUX - Host context LINUX\n client.run(\"install consumer_pkg -s:b os=Linux -s:h os=Linux --build='*'\")\n conanbuildenv = client.load(\"consumer_pkg/conanbuildenv.sh\")\n conanrunenv = client.load(\"consumer_pkg/conanrunenv.sh\")\n assert \"LD_LIBRARY_PATH\" in conanbuildenv\n assert \"LD_LIBRARY_PATH\" in conanrunenv\n\n # Build context WINDOWS - Host context WINDOWS\n client.run(\"install consumer_pkg -s:b os=Windows -s:h os=Windows --build='*'\")\n conanbuildenv = client.load(\"consumer_pkg/conanbuildenv.bat\")\n conanrunenv = client.load(\"consumer_pkg/conanrunenv.bat\")\n assert \"LD_LIBRARY_PATH\" not in conanbuildenv\n assert \"LD_LIBRARY_PATH\" not in conanrunenv\n\n # Build context LINUX - Host context WINDOWS\n client.run(\"install consumer_pkg -s:b os=Linux -s:h os=Windows --build='*'\")\n conanbuildenv = client.load(\"consumer_pkg/conanbuildenv.sh\")\n conanrunenv = client.load(\"consumer_pkg/conanrunenv.bat\")\n assert \"LD_LIBRARY_PATH\" in conanbuildenv\n assert \"LD_LIBRARY_PATH\" not in conanrunenv\n\n # Build context WINDOWS - Host context LINUX\n client.run(\"install consumer_pkg -s:b os=Windows -s:h os=Linux --build='*'\")\n conanbuildenv = client.load(\"consumer_pkg/conanbuildenv.bat\")\n conanrunenv = client.load(\"consumer_pkg/conanrunenv.sh\")\n assert \"LD_LIBRARY_PATH\" not in conanbuildenv\n assert \"LD_LIBRARY_PATH\" in conanrunenv", "def setup_prod():\n setup_general()", "def generic_env_configure_vars(self, verbose=False):\n\n if self.settings.os == \"Windows\":\n self.output.fatal(\"Cannot build on Windows, sorry!\")\n return\n\n if self.settings.os == \"Linux\" or self.settings.os == \"Macos\":\n libs = 'LIBS=\"%s\"' % \" \".join([\"-l%s\" % lib for lib in self.deps_cpp_info.libs])\n ldflags = 'LDFLAGS=\"%s\"' % \" \".join([\"-L%s\" % lib for lib in self.deps_cpp_info.lib_paths]) \n archflag = \"-m32\" if self.settings.arch == \"x86\" else \"\"\n cflags = 'CFLAGS=\"-fPIC %s %s\"' % (archflag, \" \".join(self.deps_cpp_info.cflags))\n cpp_flags = 'CPPFLAGS=\"%s %s\"' % (archflag, \" \".join(self.deps_cpp_info.cppflags))\n command = \"env %s %s %s %s\" % (libs, ldflags, cflags, cpp_flags)\n # elif self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n # cl_args = \" \".join(['/I\"%s\"' % lib for lib in self.deps_cpp_info.include_paths])\n # lib_paths= \";\".join(['\"%s\"' % lib for lib in self.deps_cpp_info.lib_paths])\n # command = \"SET LIB=%s;%%LIB%% && SET CL=%s\" % (lib_paths, cl_args)\n # if verbose:\n # command += \" && SET LINK=/VERBOSE\"\n \n return command", "def prepare_env():\n if APPID is None or APIKEY is None or EHSM_IP is None:\n print(\"Please set environment variable APPID, APIKEY, ehsm_ip!\")\n exit(1)\n generate_primary_key(EHSM_IP, EHSM_PORT)\n global encrypted_primary_key_path\n encrypted_primary_key_path = \"./encrypted_primary_key\"\n generate_data_key(EHSM_IP, EHSM_PORT, encrypted_primary_key_path, 32)\n global encrypted_data_key_path\n encrypted_data_key_path = \"./encrypted_data_key\"\n patch_encryption()", "def environment_preparation():\n report_file_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('report_location')}\"\n )\n data_location_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('data_location')}\"\n )\n if f\"{Common.get_config_value('report_location')}\":\n if os.path.isdir(f\"{report_file_path}\"):\n for data_path, directory_list, file_list in os.walk(\n f\"{report_file_path}\"\n ):\n [os.remove(f\"{report_file_path}/{file}\") for file in file_list]\n else:\n os.mkdir(f\"{report_file_path}\")\n workbook = xlwt.Workbook()\n workbook.add_sheet(\"test1\")\n workbook.save(f\"{report_file_path}/report.xls\")\n if (\n f'{Common.get_config_value(\"data_location\")}'\n not in Common.get_config_value(\"unsupported_path\")\n ):\n try:\n if os.path.isdir(f\"{data_location_path}\"):\n for data_path, directory_list, file_list in os.walk(\n f\"{data_location_path}\"\n ):\n [os.remove(f\"{data_path}/{file}\") for file in file_list]\n else:\n os.mkdir(f\"{data_location_path}\")\n except OSError as ex:\n Common.logger.warning(f\"Path not found {ex}\")\n else:\n Common.logger.warning(f\"Path not found\")\n Common.logger.info(\"Environment preparation completed successfully\")", "def setup(verbose=True):\n global _SETUP_DONE\n if _SETUP_DONE:\n return\n _ensure_base_dirs(verbose=False)\n _setup_logging()\n _SETUP_DONE = True", "def set_dev(session):\n set_environment_variables(PYBAMM_ENV, session=session)\n envbindir = session.bin\n session.install(\"-e\", \".[all]\")\n session.install(\"cmake\")\n if sys.platform == \"linux\" or sys.platform == \"darwin\":\n session.run(\n \"echo\",\n \"export\",\n f\"LD_LIBRARY_PATH={PYBAMM_ENV['LD_LIBRARY_PATH']}\",\n \">>\",\n f\"{envbindir}/activate\",\n external=True, # silence warning about echo being an external command\n )", "def set_env(self, propagated_env_vars={}):\n os.environ['BUILD_ROOT'] = self.build_root\n # This is how we tell run-test.sh what set of C++ binaries to use for mini-clusters in Java\n # tests.\n for env_var_name, env_var_value in propagated_env_vars.iteritems():\n os.environ[env_var_name] = env_var_value", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def setUp(self):\n test_env_setup()" ]
[ "0.68191844", "0.679081", "0.6736225", "0.67010844", "0.6668821", "0.66451913", "0.66270363", "0.6605766", "0.6512834", "0.6408785", "0.6339571", "0.63157713", "0.6313925", "0.6302971", "0.6236211", "0.62336695", "0.62183577", "0.6171169", "0.6163672", "0.6096524", "0.60917133", "0.60557914", "0.6034636", "0.6026261", "0.60039854", "0.600288", "0.60022724", "0.59898984", "0.59840405", "0.59829974", "0.59554106", "0.5941774", "0.5940225", "0.59301364", "0.59121734", "0.59027785", "0.58991075", "0.5893415", "0.5860139", "0.5855944", "0.58485246", "0.58455884", "0.58446753", "0.5839336", "0.5838319", "0.58359504", "0.5833404", "0.58167005", "0.581453", "0.5811932", "0.5809235", "0.5808623", "0.580019", "0.5799725", "0.57793814", "0.5766335", "0.57643104", "0.5761718", "0.57549185", "0.57533604", "0.5743042", "0.57378346", "0.5737437", "0.57167137", "0.5707503", "0.57054704", "0.56737584", "0.5672074", "0.5654998", "0.5646128", "0.56443006", "0.5636056", "0.5621322", "0.5620432", "0.56191516", "0.5616296", "0.56145453", "0.5597868", "0.55974656", "0.5596411", "0.5594968", "0.559227", "0.55914223", "0.5586892", "0.5583493", "0.55832124", "0.5582144", "0.5573809", "0.5568971", "0.5567211", "0.5556607", "0.5552911", "0.55490357", "0.55460525", "0.55460525", "0.55460525", "0.55460525", "0.55460525", "0.55460525", "0.55415905" ]
0.6293774
14
Generate load/swap statements for the module file
def make_module_dep(self): # # First do some processing of and checks on the parameters # # One value that we will need a lot if self.cfg['PrgEnv_family'] == None: PrgEnv_family = None else: PrgEnv_family = self.cfg['PrgEnv_family'].lower() # Illegal parameter combination: PrgEnv_family True and PrgEnv_load True. if PrgEnv_family == 'prgenv' and self.cfg['PrgEnv_load']: raise EasyBuildError('Setting PrgEnv_family to \'PrgEnv\' and PrgEnv_load to True is not a valid combination.') # Illegal parameter combination: PrgEnv_load False and CPE_load == 'after' if self.cfg['CPE_load'] == 'after' and not self.cfg['PrgEnv_load']: raise EasyBuildError('Setting CPE_load to \'after\' and PrgEnv_load to False is not a valid combination.') # Determine the PrgEnv module if self.cfg['PrgEnv'] is None: try: prgenv_name = MAP_TOOLCHAIN_PRGENV[self.cfg['name']] except: raise EasyBuildError('%s is not a supported toolchain, you\'ll need to specify both PrgEnv and CPE_compiler.', self.cfg['name']) else: prgenv_name = self.cfg['PrgEnv'] if not 'PrgEnv-' + prgenv_name in KNOWN_PRGENVS: print_warning('PrgEnv-%s is not a supported PrgEnv module. Are you sure it is not a typo?', prgenv_mod) prgenv_mod = 'PrgEnv-' + prgenv_name self.log.debug("Detected PrgEnv-module: %s (version may be added through dependencies)", prgenv_mod) # Determine the compiler module if self.cfg['CPE_compiler'] in [ None, 'auto']: try: compiler_mod = MAP_TOOLCHAIN_COMPILER[self.cfg['name']] except: raise EasyBuildError('%s is not a supported toolchain, you\'ll need to specify both PrgEnv and CPE_compiler.', self.cfg['name']) else: compiler_mod = self.cfg['CPE_compiler'] self.log.debug("Detected compiler module: %s (version may be added through dependencies", compiler_mod) # Cray wrappers module craype_mod = 'craype' # Determine the cpe module (if needed) if self.cfg['CPE_load'] != None: if self.cfg['CPE_version'] is None: cpe_load_version = self.cfg['version'] else: cpe_load_version = self.cfg['CPE_version'] self.log.debug("Loading CPE version: %s (may be overwritten by dependencies)", cpe_load_version) cpe_mod = 'cpe/' + cpe_load_version # Build a list of dependencies without version collect_deps = [] force_compiler = False force_craype = False for dep in self.toolchain.dependencies: mod_name = dep['full_mod_name'] if mod_name.startswith(prgenv_mod): prgenv_mod = mod_name elif mod_name.startswith(compiler_mod): compiler_mod = mod_name force_compiler = True elif mod_name.startswith(craype_mod): craype_mod = mod_name force_craype = True elif not (mod_name == 'cpe' or mod_name.startswith('cpe/')): collect_deps.append(mod_name) # # Now start generating the load commands and other stuff. # collect_statements = [''] # Will start with an empty line. # Do we need a family directive? if PrgEnv_family == 'prgenv': collect_statements = collect_statements + [ 'family(\'PrgEnv\')', '' ] elif PrgEnv_family == 'cpetoolchain': collect_statements = collect_statements + [ 'family(\'cpeToolchain\')', '' ] # Do we need to unload the PrgEnv modules? if PrgEnv_family == None and self.cfg['PrgEnv_load']: # Need to unload all PrgEnv-* modules except the one used by the module for prgenv in [prgenv for prgenv in KNOWN_PRGENVS if not prgenv_mod.startswith(prgenv)]: collect_statements.append(self.module_generator.unload_module(prgenv).strip()) collect_statements.append('') elif (PrgEnv_family == 'cpetoolchain' or PrgEnv_family == None) and not self.cfg['PrgEnv_load'] : # Need to unload all PrgEnv-* modules. for prgenv in KNOWN_PRGENVS: collect_statements.append(self.module_generator.unload_module(prgenv).strip()) collect_statements.append('') # Do we need to unload the cpe* modules? if PrgEnv_family == None: for cpe in [cpe for cpe in KNOWN_CPEMODS if not self.name.startswith(cpe)]: collect_statements.append(self.module_generator.unload_module(cpe).strip()) collect_statements.append('') # Set PE_ENV if no PrgEnv-* module is loaded. if not self.cfg['PrgEnv_load']: collect_statements.append(self.module_generator.set_environment('PE_ENV', prgenv_name.upper(), False).lstrip()) # Load the cpe module (if CPE_load is first) if self.cfg['CPE_load'] != None and self.cfg['CPE_load'].lower() == 'first': collect_statements.append(self.module_generator.load_module(cpe_mod, recursive_unload=False).lstrip()) # load statement for selected PrgEnv module (only when not loaded yet) if self.cfg['PrgEnv_load']: collect_statements.append(self.module_generator.load_module(prgenv_mod, recursive_unload=False).lstrip()) # Load the cpe module (if CPE_load is after) if self.cfg['CPE_load'] != None and self.cfg['CPE_load'].lower() == 'after': collect_statements.append(self.module_generator.load_module(cpe_mod, recursive_unload=False).lstrip()) # Prepare the load statements for the targeting modules for dep in self.cfg['cray_targets']: collect_statements.append(self.module_generator.load_module(dep, recursive_unload=False).lstrip()) # Load the selected compiler module, if not done through the dependencies or PrgEnv if (not self.cfg['PrgEnv_load']) or force_compiler: collect_statements.append(self.module_generator.load_module(compiler_mod, recursive_unload=False).lstrip()) # Load the Cray compiler wrapper module, if not done through the dependencies if (not self.cfg['PrgEnv_load']) or force_craype: collect_statements.append(self.module_generator.load_module(craype_mod, recursive_unload=False).lstrip()) # Now load the dependencies, using the full name and version if they are specified that way. for dep in collect_deps: collect_statements.append(self.module_generator.load_module(dep).lstrip()) # Load the cpe module (if CPE_load is last) if self.cfg['CPE_load'] != None and self.cfg['CPE_load'].lower() == 'last': collect_statements.append(self.module_generator.load_module(cpe_mod, recursive_unload=False).lstrip()) # Assemble all module unload/load statements. txt = '\n'.join(collect_statements) return txt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exec_module(self, module):\n\n if not self.filename.endswith(config.FILE_EXT) and not self.filename.endswith(\n \"__init__.py\"\n ):\n print(\"Fatal error: ExtensionLoader is asked to load a normal file.\")\n print(\"filename:\", self.filename)\n print(\"Expected extension:\", config.FILE_EXT)\n raise SystemExit\n\n name = module.__name__\n if module.__name__ == config.MAIN_MODULE_NAME:\n module.__name__ = \"__main__\"\n config.MAIN_MODULE_NAME = None\n\n with open(self.filename) as f:\n source = f.read()\n\n transforms.identify_requested_transformers(source)\n\n if config.TRANSFORMERS:\n original = source\n source = transforms.add_all_imports(source)\n source = transforms.apply_source_transformations(source)\n\n if config.DIFF and original != source:\n self.write_html_diff(name, original, source)\n\n if config.CONVERT and self.filename.endswith(config.FILE_EXT):\n print(\"############### Original source: ############\\n\")\n print(original)\n print(\"\\n############### Converted source: ############\\n\")\n print(source)\n print(\"=\" * 50, \"\\n\")\n\n source = transforms.apply_ast_transformations(source)\n exec(source, vars(module))", "def exec_module(self, module):\n pass", "def modules():", "def run_module(args, module_path, workspace, module_data):\n\n mod_path = module_path.replace('./', '')\n curr_path = os.getcwd()\n tfvar_path = module_path.replace('./components/', '')\n print(\"curr_path = {0}\".format(curr_path))\n print(\"DEBUG module_path = {0}\".format(module_path))\n module_name = module_path.split('/')[-1]\n print(\"DEBUG module_name = {0}\".format(module_name))\n\n key_config = \"\\\"key={0}/terraform.tfstate\\\"\".format(module_name)\n bucket_region_config = \"\\\"region={0}\\\"\".format(module_data[\"bucket_region\"])\n bucket_config = \"\\\"bucket={0}\\\"\".format(module_data[\"bucket\"])\n dynamodb_config = \"\\\"dynamodb_table={0}\\\"\".format(module_data[\"dynamodb\"])\n\n plan_output_file = \"plan.out\"\n tf_varfile = f\"{curr_path}/tfvars/{tfvar_path}/{workspace}.tfvars\"\n tf_varfile_common = f\"{curr_path}/tfvars/terraform.tfvars\"\n tf_varfile_tags = f\"{curr_path}/tfvars/core/taggings/{workspace}.tfvars\"\n backend_override = f\"{curr_path}/variables/config/backend_override.tf\"\n providers_override = f\"{curr_path}/variables/config/providers_override.tf\"\n\n softlinking_files(mod_path)\n\n remove_prev_run = f\"cd {module_path} && rm -f {plan_output_file} && rm -rf .terraform\"\n cp_override_cmd = f\"cd {module_path} && cp {backend_override} . && cp {providers_override} .\"\n\n tf_plan_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform plan -out {plan_output_file} --var-file {tf_varfile} --var-file {tf_varfile_common} --var-file {tf_varfile_tags}\"\n tf_plan_destroy_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform plan -destroy --var-file {tf_varfile} --var-file {tf_varfile_common} --var-file {tf_varfile_tags} -out {plan_output_file}\"\n tf_apply_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform apply {plan_output_file}\"\n tf_init_cmd = f\"cd {module_path} && terraform init --backend-config={key_config} --backend-config={bucket_region_config} --backend-config={dynamodb_config} --backend-config={bucket_config} && terraform workspace new {workspace} || terraform workspace select {workspace}\"\n print(tf_init_cmd) # let's leave this in\n\n os.system(remove_prev_run)\n os.system(cp_override_cmd)\n os.system(tf_init_cmd)\n\n if args.action.lower() == 'plan':\n # always auto approve 'plan' action\n os.system(tf_plan_cmd)\n elif args.action.lower() == 'plan-destroy':\n # always auto approve 'plan' action\n os.system(tf_plan_destroy_cmd)\n elif args.action.lower() == 'apply':\n if args.approve:\n # auto-approve flag enabled so skip user confirmation\n os.system(tf_plan_cmd)\n os.system(tf_apply_cmd)\n else:\n os.system(tf_plan_cmd)\n # confirm with user first\n if user_confirmation(\"Sure you want to APPLY {0}\".format(module_path)):\n os.system(tf_apply_cmd)\n else:\n print(\"User aborting...\")\n elif args.action.lower() == 'apply-destroy':\n if args.approve:\n os.system(tf_plan_cmd)\n os.system(tf_apply_cmd)\n else:\n # confirm with user first\n os.system(tf_plan_destroy_cmd)\n if user_confirmation(\"Sure you want to APPLY DESTROY {0}\".format(module_path)):\n os.system(tf_apply_cmd)\n else:\n print(\"User aborting...\")", "def module_file(module):\n ...", "def GenPy(mod,fname):\n f = open(fname, 'w')\n title = \"\"\"#\n# This file is generated automatically\n# Author:IAN\n# http://www.iknot.org\n\"\"\"\n f.write(title)\n for i in mod.__dict__.keys():\n s = \"def \" + i + \"():\" + \"\\n\"\n f.write(s)\n s = \" return\"\n f.write(s + \"\\n\")\n f.close()\n kcs_ui.message_noconfirm('py file saved to:%s'%(fname))", "def test_module_doc():\r\n\r\n for fname in os.listdir('.'):\r\n if fname.endswith('.py'):\r\n f = fname.split('.')[0]\r\n print 'Executing ', fname\r\n execfile(fname, locals())", "def framework_load_weights(self):\n omit_modules = cfg.get('omit_modules_from_loading', [])\n\n for dest_module_path, path in self.get_load_paths():\n _print(\"Loading submodule \\\"{}\\\" from {}.\".format(dest_module_path, path))\n\n if \":\" in path:\n source_module_path, source_path = path.split(':')\n else:\n source_path = path\n source_module_path = dest_module_path\n\n start = time.time()\n\n device = get_pytorch_device()\n\n loaded_state_dict = torch.load(source_path, map_location=device)['model']\n\n if source_module_path:\n source_module_path_with_sep = source_module_path + '.'\n\n loaded_state_dict = type(loaded_state_dict)(\n {k: v for k, v in loaded_state_dict.items() if k.startswith(source_module_path_with_sep)}\n )\n\n assert loaded_state_dict, (\n f\"File contains no tensors with prefix `{source_module_path_with_sep}` (file: {source_path})\"\n )\n\n if dest_module_path != source_module_path:\n # Rename variables from the loaded state dict by replacing `source_module_path` with `dest_module_path`.\n\n _source_module_path = source_module_path + '.' if source_module_path else source_module_path\n _dest_module_path = dest_module_path + '.' if dest_module_path else dest_module_path\n\n loaded_state_dict = {\n k.replace(_source_module_path, _dest_module_path, 1): v\n for k, v in loaded_state_dict.items()\n }\n\n module = self.updater.model\n\n state_dict = module.state_dict()\n\n intersection = set(state_dict.keys()) & set(loaded_state_dict.keys())\n\n if not intersection:\n raise Exception(\n f\"Loading variables with spec ({dest_module_path}, {path}) \"\n f\"would have no effect (no variables found).\"\n )\n loaded_state_dict = {k: loaded_state_dict[k] for k in intersection}\n\n if omit_modules:\n omitted_variables = {\n k: v for k, v in loaded_state_dict.items()\n if any(k.startswith(o) for o in omit_modules)\n }\n\n print(\"Omitting the following variables from loading:\")\n describe_structure(omitted_variables)\n\n loaded_state_dict = {\n k: v for k, v in loaded_state_dict.items()\n if k not in omitted_variables\n }\n\n _print(\"Loading variables:\")\n describe_structure(loaded_state_dict)\n\n state_dict.update(loaded_state_dict)\n\n module.load_state_dict(state_dict, strict=True)\n\n _print(\"Done loading weights for module {}, took {} seconds.\".format(dest_module_path, time.time() - start))", "def main():\n ModLoader.add_constructor(\"!ec2rlcore.module.Module\", ModLoader.ignoretag)\n\n mod_src_dir = os.path.join(os.getcwd(), \"src\")\n try:\n os.stat(mod_src_dir)\n except Exception:\n os.mkdir(mod_src_dir)\n\n try:\n for mod_file_name in os.listdir(os.path.join(root_ec2rl_dir, \"mod.d\")):\n if mod_file_name == \"ex_remediation.yaml\":\n continue\n with open(os.path.join(root_ec2rl_dir, \"mod.d\", mod_file_name), \"r\") as yamlfile:\n module = yaml.load(yamlfile, Loader=ModLoader)\n if module[\"language\"] == \"python\":\n mod_src_path = os.path.join(mod_src_dir, \"{}.py\".format(module[\"name\"]))\n with open(mod_src_path, \"w\") as pyfile:\n pyfile.write(module[\"content\"])\n print(\"Wrote: {}\".format(mod_src_path))\n print(\"Conversion complete.\")\n except Exception as ex:\n print(ex)\n print(\"Conversion failed. Please review the exception to resolve\")", "def main(module_filename: str):\n version = load_unicode_version()\n print(f\"Generating module for Unicode {version[0]}.{version[1]}.{version[2]}\")\n\n eaw_map = load_east_asian_widths()\n zw_map = load_zero_widths()\n\n # Characters marked as zero-width in zw_map should be zero-width in the final map\n width_map = list(\n map(lambda x: EffectiveWidth.ZERO if x[1] else x[0], zip(eaw_map, zw_map))\n )\n\n # Override for soft hyphen\n width_map[0x00AD] = EffectiveWidth.NARROW\n\n # Override for Hangul Jamo medial vowels & final consonants\n for i in range(0x1160, 0x11FF + 1):\n width_map[i] = EffectiveWidth.ZERO\n\n tables = make_tables(TABLE_CFGS, enumerate(width_map))\n\n print(\"------------------------\")\n total_size = 0\n for (i, table) in enumerate(tables):\n size_bytes = len(table.to_bytes())\n print(f\"Table {i} Size: {size_bytes} bytes\")\n total_size += size_bytes\n print(\"------------------------\")\n print(f\" Total Size: {total_size} bytes\")\n\n emit_module(module_filename, version, tables)\n print(f'Wrote to \"{module_filename}\"')", "def load_sources(self):\n self.pymodule = imp.load_source(self.name, self.path)", "def generate_source():\n \"\"\"their dependencies\"\"\"\n global dictionary_names, dictionary_slices\n src = \"\"\n for s in dictionary_slices:\n src += deconstruct(s)\n src += \" '\" + pointer_to_name(s)\n src += \"' define\\n\"\n return src + \"\\n\"", "def _load_statements(self):\n home = Path(\".\")\n context = {\"table_name\": self.TABLE}\n self.sql = {}\n for path in home.glob(\"./sql/*\"):\n with open(path) as f:\n template = Template(f.read().strip())\n self.sql[path.stem] = template.render(context)", "def gen_python_addr_module(module_name,root,creg_base,sreg_base):\n fo = open(module_name+\".py\",\"w\")\n fo.write(\"\"\n \"\\\"\\\"\\\"This file is automatically generated by the \"+sys.argv[0]+\" script\\n\"\n \"All modifications should be done in that file\\n\\\"\\\"\\\"\\n\"+\n root.name+\"_dict=\")\n (res,creg_base,sreg_base)=root.gen_python_addr(creg_base,sreg_base)\n fo.write(res+\"\\n\")\n fo.write(\"\"\n \"#Convert the dictionary to object, as described in https://stackoverflow.com/a/6993694/1735409\\n\"\n \"class Struct(object):\\n\"\n \" def __init__(self, data):\\n\"\n \" for name, value in data.items():\\n\"\n \" setattr(self, name, self._wrap(value))\\n\"\n \" def _wrap(self, value):\\n\"\n \" if isinstance(value, (tuple, list, set, frozenset)):\\n\"\n \" return type(value)([self._wrap(v) for v in value])\\n\"\n \" else:\\n\"\n \" return Struct(value) if isinstance(value, dict) else value\\n\"+\n root.name+\"=Struct(\"+root.name+\"_dict)\\n\")\n fo.close()", "def gen_load(self,\n code: 'Code',\n frame: 'StackLayout',\n symtab: 'SymbolTable',\n regalloc: 'AllocInfo',\n dest_symb: 'RegisterSymb' = None) -> int:\n pass", "def generate(module_name, module_path, target_dir):\n if not (Path(module_path) / 'builtins.stub.py').exists():\n copy(Path(__file__).parent.parent / 'stubs/builtins.stub.py', module_path)\n build_swift_wrappers_module(module_name, module_path, target_dir)", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module = module_manager.get_module_by_name(self._values[\"name\"])\n module_manager.update_module(module)", "def _boilerplate_to_python(indent):\n indent_str = \" \" * indent\n boilerplate = indent_str + \"import core.vba_library\\n\"\n boilerplate = indent_str + \"import core.vba_context\\n\"\n boilerplate += indent_str + \"from core.utils import safe_print\\n\"\n boilerplate += indent_str + \"from core.utils import safe_str_convert\\n\"\n boilerplate += indent_str + \"from core.utils import plus\\n\"\n boilerplate += indent_str + \"from core.utils import eq\\n\"\n boilerplate += indent_str + \"from core.utils import neq\\n\"\n boilerplate += indent_str + \"from core.utils import lt\\n\"\n boilerplate += indent_str + \"from core.utils import lte\\n\"\n boilerplate += indent_str + \"from core.utils import gt\\n\"\n boilerplate += indent_str + \"from core.utils import gte\\n\"\n boilerplate += indent_str + \"import core.utils\\n\"\n boilerplate += indent_str + \"from core.python_jit import update_array\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_num\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_str\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int_list\\n\\n\"\n boilerplate += indent_str + \"try:\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context\\n\"\n boilerplate += indent_str + \"except (NameError, UnboundLocalError):\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context = context\\n\"\n return boilerplate", "def load(self):\n address = 0\n if len(sys.argv) < 2:\n print(\"Please pass in a second file name: python3 ls8.py second_filename.py\")\n sys.exit()\n file_name = sys.argv[1]\n try:\n file = open(file_name, \"r\")\n except FileNotFoundError:\n print(f\"{sys.argv[0]}: {sys.argv[1]} file was not found.\")\n sys.exit()\n \n for line in file.readlines():\n instruction = line.split(\"#\")[0]\n instruction = instruction.strip() \n if len(instruction) > 0:\n self.ram_write(address, int(instruction, 2))\n address += 1 \n file.close()", "def test_taskfile_taskmod_loaded(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n import_module(modpath)\n\n # Forcibly remove the generated taskfile\n sys.modules.pop(randpath)\n\n import_module(randpath)", "def load_scipy_linalg_interface_gen(finder, module):\n module.IgnoreName(\"pre\")", "def generate_loader(mode, symbols, definition, linker):\n if \"vanilla\" == mode:\n loader_content = generate_loader_vanilla()\n elif \"dlfcn\" == mode:\n loader_content = generate_loader_dlfcn(symbols, linker)\n else:\n loader_content = generate_loader_hash(symbols)\n ret = template_loader % (definition, loader_content)\n if \"maximum\" != mode:\n ret += template_und_symbols\n return ret", "def _reload(mod,larch=None,**kw):\n\n if isinstance(mod, str):\n return larch.import_module(mod, do_reload=True)\n\n for k,v in chain(larch.symtable._sys.modules.iteritems(), sys.modules.iteritems()):\n if v == mod:\n modname = k\n break\n try:\n return larch.import_module(modname,do_reload=True)\n except NameError:\n pass", "def testPreProcessedImport(self):\n a = 'a.mojom'\n self.WriteFile(a, \"\"\"\\\n module a;\n struct Bar {};\"\"\")\n self.ParseMojoms([a])\n\n b = 'b.mojom'\n self.WriteFile(\n b, \"\"\"\\\n module b;\n import \"a.mojom\";\n struct Foo { a.Bar bar; };\"\"\")\n self.ParseMojoms([b])", "def bp_ins(filename, start, end):\n with open(filename, 'r') as f:\n lines = f.readlines()\n lines.insert(start-1, \"\")\n lines.insert(end+1, \"\")\n lines.insert(0, \"\")\n lines[start-1] = 'ipdb.set_trace()\\n'\n lines[end+1] = 'ipdb.set_trace()\\n'\n lines[0] = \"import ipdb\\n\"\n with open(f\"break_{filename}\", 'w+') as f:\n f.writelines(lines)", "def reload(self):\n\n\t\tif self.module is None:\n\t\t\t# Do nothing, as the module will be imported on attribute access.\n\t\t\tpass\n\t\telse:\n\t\t\texec \"reload(\" + self.name + \")\"\n\t\t\t# The module object is still identical, only its code has been\n\t\t\t# replaced. Thus no eval(self.name) is necessary.", "def reorder_module_calls(lines):\n\n code_len = len(lines)\n module_calls = []\n module_start = 0\n module_call = []\n output_io = 0\n boundary = 0\n new_module = 0\n prev_module_name = \"\"\n first_line = -1\n last_line = -1\n reset = 0\n\n for pos in range(code_len):\n line = lines[pos]\n if line.find(\"/* Module Call */\") != -1:\n if module_start == 0:\n module_start = 1\n else:\n module_start = 0\n\n if module_start:\n # Examine if the module is an output I/O module\n nxt_line = lines[pos + 1]\n if nxt_line.find(\"IO\") != -1 and nxt_line.find(\"out\") != -1:\n output_io = 1\n # Examine if the module is an boundary module\n if nxt_line.find(\"boundary\") != -1:\n boundary = 1\n # Extract the module name\n module_name = nxt_line.strip()[:-9]\n if boundary:\n module_name = module_name[:-9]\n if prev_module_name == \"\":\n prev_module_name = module_name\n first_line = pos\n else:\n if prev_module_name != module_name:\n new_module = 1\n prev_module_name = module_name\n first_line = pos\n reset = 0\n else:\n if reset:\n first_line = pos\n reset = 0\n new_module = 0\n\n if not module_start:\n if output_io:\n last_line = pos\n module_call.append(line)\n module_calls.append(module_call.copy())\n module_call.clear()\n if boundary:\n # Reverse the list\n module_calls.reverse()\n # Insert it back\n left_lines = lines[last_line + 1:]\n lines = lines[:first_line]\n first = 1\n for c in module_calls:\n if not first:\n lines.append(\"\\n\")\n lines = lines + c\n first = 0\n lines = lines + left_lines\n # Clean up\n module_calls.clear()\n boundary = 0\n output_io = 0\n reset = 1\n if new_module:\n # Pop out the previous module calls except the last one\n module_calls = module_calls[-1:]\n\n if module_start and output_io:\n module_call.append(line)\n\n return lines", "def autoload(self):\n # From 'autoload' configuration option\n specs = self._create_module_list_of(\"specs_to_load\")\n # From 'load' configuration option\n literals = self.conf.literals_to_load\n return specs + literals", "def add_module(self, module):\n getattr(module, 'load_bench')(self)", "def get_src(self):\n\n self.codegen = json.loads(self.cmod.get_source(\"json\"))\n self.sub_module_name = self.codegen[\"symbol\"]\n self.nodes = self.codegen[\"nodes\"]\n self.clml_code.append(self.MakeHeader.substitute(module=self.sub_module_name))\n\n def get_tensor_from_map(\n node_seq, shape=None, layout=\"CL_TENSOR_LAYOUT_OPTIMAL_QCOM\", dtype=\"float32\"\n ):\n if node_seq in self.node_map:\n return self.node_map[node_seq]\n else:\n node = self.nodes[node_seq]\n dtype = str(node[\"attrs\"][\"dtype\"][0][0])\n if node[\"op\"] == \"input\":\n self.clml_code.append(\"// Input Node\")\n node_out_name = self.sub_module_name + \"_\" + \"input_\" + str(node_seq)\n else:\n node_out_name = node[\"name\"]\n if shape is None:\n shape = str(tuple(node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n\n self.clml_code.append(\n self.MakeCLMLTensor.substitute(\n name=node_out_name, shape=shape, dtype=dtype, layout=layout\n )\n )\n self.clml_code.append(\n self.MapInsert.substitute(nid=node_out_name, tensor_desc=node_out_name)\n )\n if node[\"op\"] == \"input\":\n self.clml_code.append(\n Template(\"runner.inputs.push_back($clml_input);\").substitute(\n clml_input=node_out_name\n )\n )\n self.input_meta.append(\n self.MakeInputMetaInfo.substitute(\n in_name=node_out_name, dtype=dtype, shape=shape\n )\n )\n\n if self.nodes[node_seq][\"op\"] == \"const\":\n self.clml_code.append(\n Template('runner.consts.push_back(\"$nid\");').substitute(nid=node[\"name\"])\n )\n self.node_map[node_seq] = node_out_name\n return node_out_name\n\n def make_output_tensor(\n node, node_seq, shape=None, layout=\"CL_TENSOR_LAYOUT_OPTIMAL_QCOM\", dtype=\"float32\"\n ):\n if dtype is None:\n dtype = str(node[\"attrs\"][\"dtype\"][0][0])\n if shape is None:\n shape = str(tuple(node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n node_out_name = self.sub_module_name + \"_\" + \"layer_out_\" + str(node_seq)\n self.clml_code.append(\n self.MakeCLMLTensor.substitute(\n name=node_out_name,\n shape=shape,\n dtype=dtype,\n layout=layout,\n )\n )\n return node_out_name\n\n for node_seq, node in enumerate(self.nodes):\n if node[\"op\"] == \"kernel\":\n self.clml_code.append(\"// Kernel Node : \" + node[\"name\"])\n if node[\"name\"] == \"nn.conv2d\" or node[\"name\"] == \"nn.depthwise_conv2d\":\n if \"padding\" in node[\"attrs\"]:\n padding = str(tuple(int(x) for x in node[\"attrs\"][\"padding\"][0]))[1:-1]\n else:\n padding = \"0, 0, 0, 0\"\n dilation = str(tuple(int(x) for x in node[\"attrs\"][\"dilation\"][0]))[1:-1]\n strides = str(tuple(int(x) for x in node[\"attrs\"][\"strides\"][0]))[1:-1]\n groups = node[\"attrs\"][\"groups\"][0][0]\n if node[\"name\"] == \"nn.conv2d\":\n mode = \"CL_CONVOLUTION_MODE_CONVOLUTION_QCOM\"\n else:\n mode = \"CL_CONVOLUTION_MODE_DEPTHWISE_QCOM\"\n activation = \"CL_ACTIVATION_RELU\"\n has_act = False\n if \"activation_type\" in node[\"attrs\"]:\n has_act = True\n activation = node[\"attrs\"][\"activation_type\"][0][0]\n if activation == \"relu\":\n activation = \"CL_ACTIVATION_RELU\"\n elif activation == \"relu6\":\n activation = \"CL_ACTIVATION_RELU6\"\n else:\n RuntimeError(\"Unknown activation:\" + activation)\n has_bias = bool((node[\"inputs\"] == 3) or (node[\"inputs\"] == 7))\n has_bn = bool((node[\"inputs\"] == 6) or (node[\"inputs\"] == 7))\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n weight_tensor = get_tensor_from_map(node[\"inputs\"][1][0])\n if not has_bias:\n bias_tensor = \"runner.unusedTensor\"\n else:\n bias_tensor = get_tensor_from_map(node[\"inputs\"][2][0])\n\n node_out_name = make_output_tensor(node, node_seq)\n\n if not has_bn:\n self.clml_code.append(\n self.MakeConv2D.substitute(\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n bias_tensor=bias_tensor,\n output_tensor=node_out_name,\n padding=padding,\n dilation=dilation,\n strides=strides,\n groups=groups,\n mode=mode,\n activation=activation,\n has_bias=\"true\" if has_bias else \"false\",\n has_act=\"true\" if has_act else \"false\",\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n else:\n bn_index = 3 if has_bias else 2\n bn_attrs = tuple(node[\"attrs\"][\"batchnorm\"][0][0])\n axis = bn_attrs[0]\n bn_shape = [1, 1, 1, 1]\n bn_node = self.nodes[node[\"inputs\"][bn_index][0]]\n bn_shape[axis] = bn_node[\"attrs\"][\"shape\"][0][0]\n dtype = bn_node[\"attrs\"][\"dtype\"][0][0]\n\n bn_scale_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n bn_bias_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index + 1][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n bn_mean_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index + 2][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n bn_var_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index + 3][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n self.clml_code.append(\n self.MakeConv2DWithBN.substitute(\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n bias_tensor=bias_tensor,\n output_tensor=node_out_name,\n bn_scale_tensor=bn_scale_tensor,\n bn_bias_tensor=bn_bias_tensor,\n bn_mean_tensor=bn_mean_tensor,\n bn_var_tensor=bn_var_tensor,\n bn_attrs=str(bn_attrs)[1:-1],\n padding=padding,\n dilation=dilation,\n strides=strides,\n groups=groups,\n mode=mode,\n activation=activation,\n has_bias=\"true\" if has_bias else \"false\",\n has_act=\"true\" if has_act else \"false\",\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.relu6\" or node[\"name\"] == \"nn.relu\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n relu_type = (\n \"CL_ACTIVATION_RELU\" if node[\"name\"] == \"nn.relu\" else \"CL_ACTIVATION_RELU6\"\n )\n self.clml_code.append(\n self.MakeRelu.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n relu_type=relu_type,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.batch_norm\":\n bn_attrs = tuple(node[\"attrs\"][\"batchnorm\"][0][0])\n axis = bn_attrs[0]\n bn_shape = [1, 1, 1, 1]\n bn_node = self.nodes[node[\"inputs\"][0][0]]\n bn_shape[axis] = bn_node[\"attrs\"][\"shape\"][0][0]\n dtype = bn_node[\"attrs\"][\"dtype\"][0][0]\n bn_scale_tensor = get_tensor_from_map(\n node[\"inputs\"][0][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n bn_bias_tensor = get_tensor_from_map(\n node[\"inputs\"][1][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n bn_mean_tensor = get_tensor_from_map(\n node[\"inputs\"][2][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n bn_var_tensor = get_tensor_from_map(\n node[\"inputs\"][3][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n\n self.clml_code.append(\n self.MakeBN.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n bn_scale_tensor=bn_scale_tensor,\n bn_bias_tensor=bn_bias_tensor,\n bn_mean_tensor=bn_mean_tensor,\n bn_var_tensor=bn_var_tensor,\n bn_attrs=str(bn_attrs)[1:-1],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] in [\"nn.max_pool2d\", \"nn.avg_pool2d\", \"nn.l2_pool2d\"]:\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n pool_size = str(tuple(int(x) for x in node[\"attrs\"][\"pool_size\"][0]))[1:-1]\n strides = str(tuple(int(x) for x in node[\"attrs\"][\"strides\"][0]))[1:-1]\n padding = str(tuple(int(x) for x in node[\"attrs\"][\"padding\"][0]))[1:-1]\n self.clml_code.append(\n self.MakePool2D.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n pool_type=node[\"name\"],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] in [\"nn.global_max_pool2d\", \"nn.global_avg_pool2d\"]:\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n in_node = self.nodes[node[\"inputs\"][0][0]]\n in_shape = str(tuple(in_node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n self.clml_code.append(\n self.MakeGlobalPool2D.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n in_shape=in_shape,\n pool_type=node[\"name\"],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"reshape\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeReshape.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"concatenate\":\n input_len = len(node[\"inputs\"])\n in_list = str(\n [get_tensor_from_map(node[\"inputs\"][x][0]) for x in range(input_len)]\n )[1:-1]\n node_out_name = make_output_tensor(node, node_seq)\n axis = node[\"attrs\"][\"axis\"][0][0]\n self.clml_code.append(\n self.MakeConcatenate.substitute(\n in_list=in_list,\n output_tensor=node_out_name,\n axis=axis,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.dense\":\n in_node = self.nodes[node[\"inputs\"][0][0]]\n in_shape = tuple(in_node[\"attrs\"][\"shape\"][0][0])\n wt_shape = tuple(in_node[\"attrs\"][\"shape\"][0][0])\n input_tensor = get_tensor_from_map(\n node[\"inputs\"][0][0], layout=\"CL_TENSOR_LAYOUT_NCHW_QCOM\"\n )\n weight_tensor = get_tensor_from_map(\n node[\"inputs\"][1][0],\n shape=str(tuple([1, 1, wt_shape[0], wt_shape[1]]))[1:-1],\n layout=\"CL_TENSOR_LAYOUT_NCHW_QCOM\",\n )\n node_out_name = make_output_tensor(\n node,\n node_seq,\n shape=str(tuple([in_shape[0], wt_shape[0], 1, 1]))[1:-1],\n layout=\"CL_TENSOR_LAYOUT_NCHW_QCOM\",\n )\n self.clml_code.append(\n self.MakeDense.substitute(\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n output_tensor=node_out_name,\n in_shape=str(in_shape)[1:-1],\n wt_shape=str(wt_shape)[1:-1],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.softmax\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeSoftMax.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.pad\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n pad_mode = node[\"attrs\"][\"pad_mode\"][0][0]\n padding = str(tuple(int(x) for x in node[\"attrs\"][\"pad_width\"][0]))[1:-1]\n self.clml_code.append(\n self.MakePad.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n pad_mode=pad_mode,\n padding=padding,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.batch_flatten\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeBatchFlatten.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"clip\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n a_max = node[\"attrs\"][\"a_max\"][0][0]\n a_min = node[\"attrs\"][\"a_min\"][0][0]\n self.clml_code.append(\n self.MakeClip.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n a_max=a_max,\n a_min=a_min,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] in [\n \"add\",\n \"subtract\",\n \"multiply\",\n \"minimum\",\n \"maximum\",\n \"divide\",\n ]:\n input_a = get_tensor_from_map(node[\"inputs\"][0][0])\n input_b = get_tensor_from_map(node[\"inputs\"][1][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeBinaryOp.substitute(\n input_a=input_a,\n input_b=input_b,\n output_tensor=node_out_name,\n op=node[\"name\"],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n else:\n RuntimeError(\"Unsupported Op:\" + node[\"name\"])\n self.clml_code.append(\n self.MapInsert.substitute(nid=node_out_name, tensor_desc=node_out_name)\n )\n self.node_map[node_seq] = node_out_name\n\n elif node[\"op\"] not in [\"const\", \"input\"]:\n print(\"Unknown Node type:\", node[\"op\"])\n\n # Populate outputs\n out_nodes = self.codegen[\"heads\"]\n self.clml_code.append(\"// Populate outputs\")\n for nid_triple in out_nodes:\n nid = nid_triple[0]\n out_node = self.nodes[nid]\n dtype = str(out_node[\"attrs\"][\"dtype\"][0][0])\n shape = str(tuple(out_node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n out_name = self.sub_module_name + \"_\" + \"layer_out_\" + str(nid)\n self.clml_code.append(\n Template(\n 'runner.outputs.insert({\"$out_name\", runner.storage_map[\"$out_name\"]});'\n ).substitute(out_name=out_name)\n )\n self.clml_code.append(\n Template('runner.outputs_dtypes.insert({\"$out_name\", \"$dtype\"});').substitute(\n out_name=out_name, dtype=dtype\n )\n )\n self.clml_code.append(\n Template(\n \"runner.outputs_shapes.insert\" '({\"$out_name\", std::vector<size_t>({$shape})});'\n ).substitute(out_name=out_name, shape=shape)\n )\n self.output_meta.append(\n self.MakeOutputMetaInfo.substitute(out_name=out_name, dtype=dtype, shape=shape)\n )\n\n # Mem allocation & Param copy\n self.clml_code.append(\"// Allocate Tensor Memory and copy params\")\n self.clml_code.append(\"runner.AllocateMemAndPopulateParams();\")\n\n # Meta data preparation\n self.clml_code.append(\n self.MakeMetaInfo.substitute(\n name=self.sub_module_name,\n input_count=len(self.input_meta),\n output_count=len(self.output_meta),\n input_meta=\"\\\\\\n\".join(self.input_meta),\n output_meta=\"\\\\\\n\".join(self.output_meta),\n )\n )\n\n self.clml_code.append(self.MakeFooter.substitute())\n return (self.sub_module_name, self.clml_code)", "def import_module(self, location, name):", "def __init__(self, modules, filenames, drop_fk=False):\n self.target_tables = []\n self.fk2update = {}\n self.fkcache = {}\n full_mapping = {} # ends up as {'module': {'table': {'column': v}}}\n # load the full mapping file\n if isinstance(filenames, str):\n filenames = [filenames]\n for filename in filenames:\n with open(filename) as stream:\n full_mapping.update(yaml.load(stream))\n # filter to keep only wanted modules\n self.mapping = {}\n self.deferred = {}\n for addon in modules:\n if addon not in full_mapping: # skip modules not in YAML files\n LOG.warn('Mapping is not complete: module \"%s\" is missing!', addon)\n continue\n elif full_mapping[addon] == '__nothing_to_do__':\n del full_mapping[addon]\n continue\n for source_column, target_columns in full_mapping[addon].items():\n #here we are going over entire dictionary for module, by module, can't we just merge to table dict now\n if '__' in source_column:\n # skip special markers\n continue\n if (target_columns in ('__forget__', False) #if it needs forgetting\n or self.mapping.get(source_column) == '__forget__'):\n self.mapping[source_column] = '__forget__'\n continue\n if target_columns is None:\n target_columns = {}\n try:\n self.mapping.setdefault(source_column, target_columns)\n self.mapping[source_column].update(target_columns)\n except:\n raise ValueError('Error in the mapping file: \"%s\" is invalid here'\n % repr(target_columns))\n # replace function bodies with real functions\n for incolumn in self.mapping:\n targets = self.mapping[incolumn]\n if targets in (False, '__forget__'):\n self.mapping[incolumn] = {}\n continue\n for outcolumn, function in targets.items():\n # TODO Implement dispatcher here\n if function in ('__copy__', '__moved__', None):\n continue\n if type(function) is not str:\n raise ValueError('Error in the mapping file: \"%s\" is invalid in %s'\n % (repr(function), outcolumn))\n if function == '__defer__':\n self.mapping[incolumn][outcolumn] = '__copy__'\n if not drop_fk:\n table, column = outcolumn.split('.')\n self.deferred.setdefault(table, set())\n self.deferred[table].add(column)\n continue\n if function.startswith('__fk__ '):\n if len(function.split()) != 2:\n raise ValueError('Error in the mapping file: \"%s\" is invalid in %s'\n % (repr(function), outcolumn))\n self.fk2update[outcolumn] = function.split()[1]\n self.mapping[incolumn][outcolumn] = '__copy__'\n continue\n if function.startswith('__ref__'):\n if len(function.split()) != 2:\n raise ValueError('Error in the mapping file: \"%s\" is invalid in %s'\n % (repr(function), outcolumn))\n # we handle that in the postprocess\n self.mapping[incolumn][outcolumn] = function\n continue\n function_body = \"def mapping_function(self, source_row, target_rows):\\n\"\n\n #everything to here is special cases\n function_body += '\\n'.join([4*' ' + line for line in function.split('\\n')])\n mapping_function = None\n exec(compile(function_body, '<' + incolumn + ' → ' + outcolumn + '>', 'exec'),\n globals().update({\n 'newid': self.newid,\n 'sql': self.sql,\n 'fk_lookup': self.fk_lookup}))\n self.mapping[incolumn][outcolumn] = mapping_function\n del mapping_function\n\n # build the discriminator mapping\n # build the stored field mapping.\n self.discriminators = {}\n self.stored_fields = {}\n for mapping in full_mapping.values():\n for key, value in mapping.items():\n if '__discriminator__' in key:\n self.discriminators.update({key.split('.')[0]: value})\n if '__stored__' in key:\n table = key.split('.')[0]\n self.stored_fields.setdefault(table, [])\n self.stored_fields[table] += value", "def reload(*mods):\n for mod in mods:\n importlib.reload(importlib.import_module(mod))", "def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])", "def _import_compiled_module(self, fullname):\n\n vfile = vfs.getFile(self.filename, False)\n\n # We can only import a compiled module if it already exists on\n # disk. This means if it's a truly virtual file that has no\n # on-disk equivalent, we have to write it to a temporary file\n # first.\n if hasattr(vfile, 'getMount') and \\\n isinstance(vfile.getMount(), VirtualFileMountSystem):\n # It's a real file.\n filename = self.filename\n else:\n # It's a virtual file. Dump it.\n filename = Filename.temporary('', self.filename.getBasenameWoExtension(),\n '.' + self.filename.getExtension(),\n type = Filename.TDso)\n filename.setExtension(self.filename.getExtension())\n fin = open(vfile, 'rb')\n fout = open(filename, 'wb')\n data = fin.read(4096)\n while data:\n fout.write(data)\n data = fin.read(4096)\n fin.close()\n fout.close()\n\n module = imp.load_module(fullname, None, filename.toOsSpecific(),\n self.desc)\n module.__file__ = self.filename.cStr()\n return module", "def onReload(self,moduleName=\"NeedleFinder\"):\n if profiling : profbox()\n #framework\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)", "def mod_load(self):\n raise NotImplementedError(\"Mod load isn't overriden\")", "def as_module(file_path, name):\n\n with lock:\n with open(file_path, 'U') as module_file:\n prev = sys.dont_write_bytecode\n sys.dont_write_bytecode = True\n module = imp.load_module(name, module_file, file_path, (\".py\", 'U', imp.PY_SOURCE))\n sys.dont_write_bytecode = prev\n sys.modules[name] = module\n return module", "def preprocess_python_source(self, module, source):\n\n return source", "def load_mod_from_file(self, fpath):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tfpath = os.path.abspath(fpath)\n\t\tfile_ext = os.path.splitext(os.path.split(fpath)[-1])[-1]\n\t\tif file_ext.lower() != '.py':\n\t\t\treturn\n\t\twith open(fpath) as f:\n\t\t\tcontent = f.read().splitlines()\n\t\tok = False\n\t\tfor line in content:\n\t\t\tif line.strip() == 'from shutit_module import ShutItModule':\n\t\t\t\tok = True\n\t\t\t\tbreak\n\t\tif not ok:\n\t\t\tself.log('Rejected file: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Note that this attribute will only be set for 'new style' module loading, # this should be ok because 'old style' loading checks for duplicate # existing modules.\n\t\t# TODO: this is quadratic complexity\n\t\texistingmodules = [\n\t\t\tm for m in self.shutit_modules\n\t\t\tif getattr(m, '__module_file', None) == fpath\n\t\t]\n\t\tif existingmodules:\n\t\t\tself.log('Module already seen: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Looks like it's ok to load this file\n\t\tself.log('Loading source for: ' + fpath,level=logging.DEBUG)\n\n\t\t# Add this directory to the python path iff not already there.\n\t\tdirectory = os.path.dirname(fpath)\n\t\tif directory not in sys.path:\n\t\t\tsys.path.append(os.path.dirname(fpath))\n\t\t# TODO: use bytearray to encode?\n\t\tmod_name = base64.b32encode(fpath.encode()).decode().replace('=', '')\n\t\tpymod = imp.load_source(mod_name, fpath)\n\n\t\t# Got the python module, now time to pull the shutit module(s) out of it.\n\t\ttargets = [\n\t\t\t('module', self.shutit_modules), ('conn_module', self.conn_modules)\n\t\t]\n\t\tself.build['source'] = {}\n\t\tfor attr, target in targets:\n\t\t\tmodulefunc = getattr(pymod, attr, None)\n\t\t\t# Old style or not a shutit module, nothing else to do\n\t\t\tif not callable(modulefunc):\n\t\t\t\treturn\n\t\t\tmodules = modulefunc()\n\t\t\tif not isinstance(modules, list):\n\t\t\t\tmodules = [modules]\n\t\t\tfor module in modules:\n\t\t\t\tsetattr(module, '__module_file', fpath)\n\t\t\t\tShutItModule.register(module.__class__)\n\t\t\t\ttarget.add(module)\n\t\t\t\tself.build['source'][fpath] = open(fpath).read()", "def pybind11(self, line, cell):\n\n line = line.strip().rstrip(';')\n args = self.pybind11.parser.parse_args(shlex.split(line))\n code = self.format_code(cell)\n module = 'pybind11_{}'.format(self.compute_hash(code, args))\n libfile = cache_path(module + ext_suffix())\n need_rebuild = not os.path.isfile(libfile) or args.force\n if need_rebuild:\n source = self.save_source(code, module)\n self.build_module(module, source, args)\n self.import_module(module, libfile, import_symbols=not args.module)", "def __init__(\n self,\n module: Union[module_utils.CompiledModule, None],\n function: Union[Callable[[TracedModule], None], None],\n _load_dict: Optional[Dict[str, Any]] = None,\n ):\n if _load_dict is None:\n # Extract metadata from module and function.\n self.module_name = module.module_name\n self.compiled_paths = module.compiled_paths\n self.backend_name = module.backend_info.backend_name\n self.backend_id = module.backend_info.backend_id\n self.backend_driver = module.backend_info.driver\n self.iree_serializable = module.iree_serializable()\n self.tflite_serializable = module.tflite_serializable()\n self.function_name = function.__name__\n self.function_sourcefile = inspect.getsourcefile(function)\n source, start_line = inspect.getsourcelines(function)\n self.function_line_numbers = (start_line, start_line + len(source))\n self.function_source = \"\".join(source)\n\n self.calls = []\n else:\n self.module_name = _load_dict[\"module_name\"]\n self.compiled_paths = _load_dict[\"compiled_paths\"]\n self.backend_name = _load_dict[\"backend_name\"]\n self.backend_id = _load_dict[\"backend_id\"]\n self.backend_driver = _load_dict[\"backend_driver\"]\n self.iree_serializable = _load_dict[\"iree_serializable\"]\n self.tflite_serializable = _load_dict[\"tflite_serializable\"]\n self.function_name = _load_dict[\"function_name\"]\n self.function_sourcefile = _load_dict[\"function_sourcefile\"]\n self.function_line_numbers = _load_dict[\"function_line_numbers\"]\n self.function_source = _load_dict[\"function_source\"]\n self.calls = _load_dict[\"calls\"]", "def AddModule (self, module):\n getattr (module, 'load_bench') (self)", "def assemble_module(module):\n\n print(f\"Assembling module: {module.name}\")", "def __init__(self):\n ScriptedLoadableModuleLogic.__init__(self)", "def compile_modules(base, output, source, bind=True):\n return compile_files(base, output, source, bind, amd=True)", "def visit_Python(self, node):\n # This compiles the given Python ast into a Python code object\n # then disassembles it into a byteplay code object. This allows\n # us to interleave the instructions with those generated for\n # the rest of the module and then compile a single unified \n # code object.\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bpc = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.code_ops.extend(bpc.code[1:-2])", "def _rai_module(self) -> str:\n module = [\"--loadmodule\", CONFIG.redisai]\n if self.queue_threads:\n module.append(f\"THREADS_PER_QUEUE {self.queue_threads}\")\n if self.inter_threads:\n module.append(f\"INTER_OP_PARALLELISM {self.inter_threads}\")\n if self.intra_threads:\n module.append(f\"INTRA_OP_PARALLELISM {self.intra_threads}\")\n return \" \".join(module)", "def genContextCode(self):\n states_code = '['+', '.join([s.title()+'()' for s in self.info['statenames']])+']'\n import_code = '\\n'.join([self.tmpl['import'].render(pkg=s.lower(), api=s.title()) for s in self.info['statenames']])\n transition_code = '\\n'.join([self.tmpl['ctx_transition'].render(transition=t) for t in self.info['transitionnames']])\n self.codeinfo['context.py'] = self.tmpl['ctx'].render({\"states\": states_code, \"initaction\": self.info['initaction'], \"import_code\": import_code, \"transition_code\": transition_code})", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def challenge_two():\n # from my_module import print_backwards # Uncommnet when you have made the file\n # print(print_backwards(\"hello\")) # Uncommnet when you have made the file", "def main():\n load()\n\n print(generate())", "def halp(module_text):\n input_lines = module_text.splitlines()\n input, old_outputs = strip_old_outputs(input_lines)\n env = set_up_globals(Halp(old_outputs))\n output = format_part(eval_module(input, env))\n return diff(output.splitlines(), input_lines)", "def codegen_reload_data():\n return {\n \"package\": u\"fn_task_utils\",\n \"message_destinations\": [u\"fn_task_utils\"],\n \"functions\": [u\"task_utils_add_note\", u\"task_utils_update_task\", u\"task_utils_close_task\", u\"task_utils_create\"],\n \"workflows\": [u\"task_utils_mark_task_optional\", u\"task_utils_add_note_to_task\", u\"task_utils_create_custom_task\", u\"task_utils_close_task\"],\n \"actions\": [u\"Example: Task Utils - Create Custom Task\", u\"Example: Task Utils - Make this Task Optional\", u\"Example: Task Utils - Add Note to Task\", u\"Example: Task Utils - Close Task\"],\n \"incident_fields\": [],\n \"incident_artifact_types\": [],\n \"datatables\": [],\n \"automatic_tasks\": [],\n \"scripts\": []\n }", "def load(self):\n\n\t\tif self.module is None:\n\t\t\t# Cause the interpreter to load the module in local namespace ...\n\t\t\texec \"import \" + self.name\n\n\t\t\t# Store the module object ...\n\t\t\tobject.__setattr__(self, 'module', eval(self.name))", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def generate_from(self, module):\n if module in self.done:\n return\n self.done.add(module)\n subpkg = util.camelize(module.arg)\n if self.ctx.rootpkg:\n fullpkg = '.'.join([self.ctx.rootpkg, subpkg])\n else:\n fullpkg = subpkg\n d = os.sep.join(self.d + [subpkg])\n if not self.ctx.opts.no_classes:\n # Generate Java classes\n src = ('module \"' + module.arg + '\", revision: \"' +\n putil.get_latest_revision(module) + '\".')\n generator = ClassGenerator(module,\n path=os.sep.join([self.ctx.opts.directory, subpkg]),\n package=fullpkg, src=src, ctx=self.ctx)\n generator.generate()\n\n if not self.ctx.opts.no_schema:\n # Generate external schema\n schema_nodes = ['<schema>']\n stmts = util.search(module, context.node_stmts)\n module_root = SchemaNode(self.ctx, module, '/')\n schema_nodes.extend(module_root.as_list())\n if self.ctx.opts.verbose:\n print('Generating schema node \"/\"...')\n schema_generator = SchemaGenerator(stmts, '/', self.ctx, fullpkg)\n schema_nodes.extend(schema_generator.schema_nodes())\n for i in range(1, len(schema_nodes)):\n # Indent all but the first and last line\n if schema_nodes[i] in ('<node>', '</node>'):\n schema_nodes[i] = ' ' * 4 + schema_nodes[i]\n else:\n schema_nodes[i] = ' ' * 8 + schema_nodes[i]\n schema_nodes.append('</schema>')\n\n name = util.normalize(util.search_one(module, 'prefix').arg)\n\n # Replace schema files store path\n s = d\n if self.ctx.opts.classpath_schema_loading:\n s = self.ctx.opts.classpath_schema_loading\n\n util.write_file(s, name + '.xml', '\\n'.join(schema_nodes), self.ctx)\n print(\"- Visiting module '\" + name + \"'\")\n\n if not self.ctx.opts.no_pkginfo:\n # Generate package-info.java for javadoc\n pkginfo_generator = PackageInfoGenerator(d, module, self.ctx)\n pkginfo_generator.generate_package_info()\n\n if self.ctx.opts.debug or self.ctx.opts.verbose:\n print('pkg ' + fullpkg + ' generated')", "def import_source(module_name):\n module_file_path = module_name.__file__\n module_name = module_name.__name__\n\n module_spec = importlib.util.spec_from_file_location(module_name, module_file_path)\n module = importlib.util.module_from_spec(module_spec)\n module_spec.loader.exec_module(module)\n print(dir(module))\n\n msg = \"The {module_name} module has the following methods:{methods}\"\n print(msg.format(module_name=module_name, methods=dir(module)))", "def preinitialisation(ctx, stmt):\n validmap = {\n u\"module\": [OCLintFunctions.check_module_rawtext],\n u\"submodule\": [OCLintFunctions.check_module_rawtext],\n }\n\n for fn in OCLintStages.map_statement_to_lint_fn(stmt, validmap):\n fn(ctx, stmt)", "def onReload(self,moduleName=\"MarkupsInViewsSelfTest\"):\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)", "def importOverride(name, glbls={}, lcls={}, fromlist=[], level=-1):\n module = None\n # First try the system __import__ first\n try:\n module = BUILTIN_IMPORT(name, glbls, lcls, fromlist, level)\n # You cannot log in this namespace, due to an infinite regression issue, so don't try\n # Although I am thinking that disabling the import override, logging, and re enabling it would work\n except ImportError as error:\n # Next we will try to import them as a *.cc\n # First we need to determine if it exists\n # Check the folders in CC_PATH\n for path in CC_PATH:\n # If the path exists\n if os.path.exists(path):\n # And the path/<module name>.cc exists\n if os.path.exists(os.path.join(path, name+'.cc')):\n # We will use the first one we find\n # No the magic happens, we will first create a temp file\n temp_file = tempfile.TemporaryFile()\n # Now we add the 'magic' to the top of the temp file\n temp_file.write(MAGIC)\n # Now open the file being imported\n module_file = open(os.path.join(path, name+'.cc'), 'r')\n # Read the module contents into the temp file\n temp_file.write(module_file.read())\n module_file.close()\n # Now rewind the temp file so it can be read from the beginning\n temp_file.seek(0)\n # Now import the module\n try:\n module = imp.load_module(name, temp_file, path, ('.cc', 'r', imp.PY_SOURCE))\n except Exception as exception:\n logError(sys.exc_info(), log.error, 'Error importing control code file %s.cc:' % name, MAGIC_LINENO)\n finally:\n temp_file.close()\n log.debug('Module %s loaded from %s using the special .cc import' % (name, path))\n # If module is still None, we didn't find it and we should raise the original error\n if not module:\n raise error\n return module", "def _simulate_import(self, node, is_import_from=False):\n if self.module is None:\n self._handle_imports(node.names)\n return\n\n source_code = decompile(node)\n\n if self._is_unimportable_module(node):\n self._handle_imports(node.names)\n self.log(logging.INFO, \"Ignoring import node\", source_code)\n return\n\n # create a pseudo-module and examine its dictionary to figure out what this imports\n # default to the current __file__ if necessary\n module_file = _safe_getattr(self.module, \"__file__\", __file__)\n random_suffix = \"\".join(\n random.choice(string.ascii_lowercase) for _ in range(10)\n )\n pseudo_module_file = re.sub(r\"\\.pyc?$\", random_suffix + \".py\", module_file)\n is_init = os.path.basename(module_file) in (\"__init__.py\", \"__init__.pyc\")\n if is_init:\n pseudo_module_name = self.module.__name__ + \".\" + random_suffix\n else:\n pseudo_module_name = self.module.__name__ + random_suffix\n\n # Apparently doing 'from file_in_package import *' in an __init__.py also adds\n # file_in_package to the module's scope.\n if (\n is_import_from\n and is_init\n and node.module is not None\n and \".\" not in node.module\n ): # not in the package\n if node.level == 1 or (node.level == 0 and node.module not in sys.modules):\n self._set_name_in_scope(node.module, node, TypedValue(types.ModuleType))\n\n with tempfile.TemporaryFile() as f:\n f.write(source_code.encode(\"utf-8\"))\n f.seek(0)\n try:\n pseudo_module = imp.load_module(\n pseudo_module_name,\n f,\n pseudo_module_file,\n (\".py\", \"r\", imp.PY_SOURCE),\n )\n except Exception:\n # sets the name of the imported module to an UnresolvedValue so we don't get further\n # errors\n self._handle_imports(node.names)\n return\n finally:\n # clean up pyc file\n try:\n os.unlink(pseudo_module_file + \"c\")\n except OSError:\n pass\n if pseudo_module_name in sys.modules:\n del sys.modules[pseudo_module_name]\n\n for name, value in six.iteritems(pseudo_module.__dict__):\n if name.startswith(\"__\") or (\n hasattr(builtins, name) and value == getattr(builtins, name)\n ):\n continue\n self._set_name_in_scope(name, (node, name), KnownValue(value))", "def writeImports2File(self, file, indent = \" \"):\r\n # import each entity and its associated graphical file\r\n for obj in self.listNodes.keys():\r\n file.write(indent+\"from \"+obj+\" import \"+obj+\"\\n\")\r\n if not obj[0:4] == \"ASG_\":\r\n file.write(indent+\"from graph_\"+obj+\" import graph_\"+obj+\"\\n\")", "def _import(self, module_name):\n # load keywords\n kw = __import__('keywords')\n # set real rpc proxy\n kw.var_cache['proxy'] = device_proxy\n kw.var_cache['reflection'] = reflection_proxy\n kw.var_cache['local'] = local_proxy\n # load script\n __import__(module_name)\n # register all kw func from keywords.kw_func\n self.kw_func.update(kw.kw_func)", "def load_modules_manually():\n #cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))\n cmd_folder = '../myutils/'\n if cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n #print sys.path", "def run_zxpy(filename: str, module: ast.Module) -> None:\n patch_shell_commands(module)\n exec(compile(module, filename, mode='exec'))", "def write_module(args, module_path, templates):\n now_str = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n module_code = templates.format(author=args.author,\n email_addr=args.email,\n name=args.name,\n now=now_str,\n purpose=args.purpose)\n with open(module_path, 'w') as f_obj:\n f_obj.writelines(module_code)", "def reload_dependences(module):\n tree = get_reversed_tree()\n reload(module)\n for dependant in tree[module]:\n reload(dependant)", "def demoteCompiledModuleToBytecode(module):\n\n full_name = module.getFullName()\n filename = module.getCompileTimeFilename()\n\n if isShowProgress():\n inclusion_logger.info(\n \"Demoting module '%s' to bytecode from '%s'.\"\n % (full_name.asString(), filename)\n )\n\n source_code = module.getSourceCode()\n\n bytecode = demoteSourceCodeToBytecode(\n module_name=full_name, source_code=source_code, filename=filename\n )\n\n uncompiled_module = makeUncompiledPythonModule(\n module_name=full_name,\n reason=module.reason,\n filename=filename,\n bytecode=bytecode,\n is_package=module.isCompiledPythonPackage(),\n technical=full_name in detectEarlyImports(),\n )\n\n used_modules = module.getUsedModules()\n uncompiled_module.setUsedModules(used_modules)\n\n distribution_names = module.getUsedDistributions()\n uncompiled_module.setUsedDistributions(distribution_names)\n\n module.finalize()\n\n if isImportedModuleByName(full_name):\n replaceImportedModule(old=module, new=uncompiled_module)\n replaceRootModule(old=module, new=uncompiled_module)\n\n if isTriggerModule(module):\n replaceTriggerModule(old=module, new=uncompiled_module)\n\n writeImportedModulesNamesToCache(\n module_name=full_name,\n source_code=source_code,\n used_modules=used_modules,\n distribution_names=distribution_names,\n )", "def modules_load(machine_config):\n\t#---modules in LOCAL configuration must be loaded before checking version\n\timport importlib\n\tif 'module_path' in machine_config: module_path = machine_config['module_path']\n\telse:\n\t\tmodule_parent = os.environ.get('MODULESHOME','/usr/share/Modules/default')\n\t\tmodule_path = os.path.join(module_parent,'init','python.py')\n\tincoming = {}\n\tif sys.version_info<(3,0): execfile(module_path,incoming)\n\telse: exec(open(module_path).read(),incoming)\n\t#---note that modules that rely on dynamically-linked C-code must use EnvironmentModules\n\tmodlist = machine_config['modules']\n\tif type(modlist)==str: modlist = modlist.split(',')\n\tfor mod in modlist:\n\t\t#---always unload gromacs to ensure correct version\n\t\tincoming['module']('unload','gromacs')\n\t\tprint('[STATUS] module load %s'%mod)\n\t\tincoming['module']('load',mod)", "def test_make_module_text(self):\n import usercode\n usercode_sample_re = re.compile(r'^==========*\\n', re.M)\n saved_sample = usercode_sample_re.split(usercode.__doc__)[1]\n\n gcode = gencode.GenCode()\n gcode.make_module(self.schema)\n generated = gcode.get_user_text()\n self.assertEqual(generated, saved_sample, \"Generated code doesn't match sample:\\n\" +\n \"\".join(difflib.unified_diff(generated.splitlines(True),\n saved_sample.splitlines(True),\n fromfile=\"generated\",\n tofile=\"usercode.py\")))", "def setModule(name, module):", "def pyo():\n local('python -O -m compileall .')", "def pyo():\n local('python -O -m compileall .')", "def help_load(self):\n print(LOAD)", "def create_module(sbml_model_file, model_name, model_output_dir, condition_df,\n observable_df):\n\n from amici.petab_import import import_model\n import_model(sbml_model=sbml_model_file, observable_table=observable_df,\n model_name=model_name, model_output_dir=model_output_dir,\n verbose=True, condition_table=condition_df)", "def generate(module_name, code):\n try:\n ast_tree = ast.parse(code)\n except Exception:\n raise RuntimeError('Bad Python code')\n\n visitor = SearchSpaceGenerator(module_name)\n try:\n visitor.visit(ast_tree)\n except AssertionError as exc:\n raise RuntimeError('%d: %s' % (visitor.last_line, exc.args[0]))\n return visitor.search_space, astor.to_source(ast_tree)", "def generate_from(self, ast: ast_pb2.AST):\n for s in self._generate_headlines():\n yield s\n yield f'PYBIND11_MODULE({self._module_name}, m) {{'\n yield I+('m.doc() = \"CLIF generated pybind11-based module for '\n f'{ast.source}\";')\n for decl in ast.decls:\n if decl.decltype == ast_pb2.Decl.Type.FUNC:\n for s in function.generate_from(decl.func):\n yield s\n yield ''\n yield '}'", "def loadModule(*args, allModules: bool=True, load: AnyStr=\"\", scan: bool=True,\n **kwargs)->List[AnyStr]:\n pass", "def set_linker_script(self, op):\n self.__linker_script = [\"-T\", op]", "def load_module(id=None, datatype=None, action=None,\n version='0.0', fields=[]):\n\n icon = {\n 'URI': config.IMAGES + \"load.png\",\n 'terminals': {\n 'output': (20, 10, 1, 0),\n }\n }\n \n terminals = [\n dict(id='output',\n datatype=datatype,\n use='out',\n description='data',\n ),\n ]\n\n files_field = {\n \"type\":\"[file]\",\n \"label\": \"Files\",\n \"name\": \"files\",\n \"value\": '',\n }\n intent_field = {\n \"type\":\"string\",\n \"label\":\"Intent\",\n \"name\": \"intent\",\n \"value\": '',\n }\n \n # Combine everything into a module.\n module = Module(id=id,\n name='Load',\n version=version,\n description=action.__doc__,\n #icon=icon,\n terminals=terminals,\n fields=[files_field, intent_field] + fields,\n action=action,\n )\n\n return module", "def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)", "def includeWorkflow(self, *modules):\n targets = [ f\"{module}.targ\" for module in modules ]\n skfiles = [ f\"{module}.sk\" for module in modules ]\n self.includeWorkflowModules(*targets)\n self.snakefiles += skfiles", "def setup_module(module):\n print(\"Start rishabhSetupModule of Program\")", "def show_module(prog, name=\"<module>\"):\n if isinstance(prog, symtable.SymbolTable):\n # Already compiled\n mst = prog\n else:\n mst = symtable.symtable(prog, name, 'exec')\n stlist = list_symbol_tables(mst)\n for st in stlist:\n show_symbol_table(st)", "def process_modules(self) -> typing.NoReturn:\n\t\tfor moduleName in self.moduleNameSet:\n\t\t\tdetected_encoding = detect_encoding(moduleName)\n\n\t\t\tprint(f\"Processing {moduleName} ({detected_encoding})\")\n\n\t\t\twith open(moduleName, 'r+', encoding=detected_encoding) as fileStream:\n\t\t\t\t# Store the content of the file\n\t\t\t\tfileContent: str = fileStream.read()\n\t\t\t\t# Sets the file's current position at the offset, the position of the read/write pointer within the file\n\t\t\t\tfileStream.seek(0, 0)\n\t\t\t\t# Truncates the file's size\n\t\t\t\tfileStream.truncate()\n\n\t\t\t\t# Process regex patterns\n\t\t\t\tfor regexDict in regexDictList:\n\t\t\t\t\tfileContent = self.process_function(regexDict, fileContent)\n\n\t\t\t\t# Rewrite the processed content of the file\n\t\t\t\tfileStream.write(fileContent)", "def replace_loadtimes(top):\n assert isinstance(top, I.toplevel)\n trans = LoadTimeTransformer(top.scope)\n trans.visit(top)\n if trans.loads:\n top_expr = top.expression\n del top.expression\n expr = I.make_progn(list(I.make_write_binding(top.scope.use_symbol(g), expr)\n for g,expr in trans.loads) +\n [top_expr])\n propigate_location(top, expr, skips=list(expr for g,expr in trans.loads))\n top.expression = expr\n return top", "def on_modules_command(sender, command, label, args):\n plugin_header(sender, \"Modules\")\n msg(sender, \", \".join([((\"&a\" if mod in shared[\"modules\"] else \"&c\") + mod) for mod in shared[\"load_modules\"]]))", "def generate_loader_vanilla():\n return template_loader_vanilla", "def load_assemble_file(task_file):\n return imp.load_source('assemblefile', task_file)", "def add_import_benchmark(name):\n relative_path = create_relative_path(\"../utils/main.swift\")\n\n # read current contents into an array\n file_contents = []\n with open(relative_path, \"r\") as f:\n file_contents = f.readlines()\n\n # the test dependencies are placed before all benchmarks, so we have to\n # insert the benchmark in the right alphabetical order after we have seen\n # all test dependencies.\n read_test_dependencies = False\n previous_benchmark_name = None\n file_new_contents = []\n for line in file_contents:\n # check if this line is a definition of a benchmark and get its name\n match = re.search(r\"import ([a-zA-Z]+)\", line)\n if match and match.group(1):\n benchmark_name = match.group(1)\n # find where to insert the new benchmark in the right alphabetical\n # order.\n if (\n name < benchmark_name\n and previous_benchmark_name is None\n or name < benchmark_name\n and name > previous_benchmark_name\n ):\n if read_test_dependencies:\n file_new_contents.append(\"import \" + name + \"\\n\" + line)\n else:\n # all test dependencies are first specified, so from now\n # on we can look where to insert the new benchmark.\n read_test_dependencies = True\n file_new_contents.append(line)\n else:\n file_new_contents.append(line)\n previous_benchmark_name = benchmark_name\n else:\n file_new_contents.append(line)\n with open(relative_path, \"w\") as f:\n for line in file_new_contents:\n f.write(line)", "def test_documentation_popxl_code_loading(self):\n filename = \"code_loading.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def onReload(self, moduleName=\"NeedleFinder\"):\r\n if profiling : profbox()\r\n # framework\r\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)", "def codegen_reload_data():\n return {\n \"package\": u\"fn_utilities\",\n \"message_destinations\": [u\"fn_utilities\"],\n \"functions\": [u\"utilities_artifact_hash\", u\"utilities_attachment_hash\", u\"utilities_attachment_to_base64\", u\"utilities_attachment_zip_extract\", u\"utilities_attachment_zip_list\", u\"utilities_base64_to_artifact\", u\"utilities_base64_to_attachment\", u\"utilities_call_rest_api\", u\"utilities_domain_distance\", u\"utilities_email_parse\", u\"utilities_excel_query\", u\"utilities_expand_url\", u\"utilities_extract_ssl_cert_from_url\", u\"utilities_get_contact_info\", u\"utilities_json2html\", u\"utilities_parse_ssl_certificate\", u\"utilities_pdfid\", u\"utilities_resilient_search\", u\"utilities_shell_command\", u\"utilities_string_to_attachment\", u\"utilities_timer\", u\"utilities_xml_transformation\"],\n \"workflows\": [u\"example_artifact_attachment_to_base64\", u\"example_artifact_hash\", u\"example_attachment_hash\", u\"example_attachment_to_base64\", u\"example_call_rest_api\", u\"example_create_artifacts_from_excel_data\", u\"example_domain_distance\", u\"example_email_parsing_artifact\", u\"example_email_parsing_attachment\", u\"example_extract_ssl_cert_from_url\", u\"example_get_incident_contact_info\", u\"example_get_task_contact_info\", u\"example_json2html\", u\"example_parse_ssl_certificate\", u\"example_pdfid\", u\"example_resilient_search\", u\"example_shell_command\", u\"example_string_to_attachment\", u\"example_timer\", u\"example_timer_parallel\", u\"example_xml_transformation\", u\"example_zip_list\", u\"example_zip_to_artifact\", u\"utilities_expand_url\"],\n \"actions\": [u\"Example: (Artifact) Attachment to Base64\", u\"Example: Artifact Hash\", u\"Example: Attachment Hash\", u\"Example: Attachment to Base64\", u\"Example: Call REST API\", u\"Example: Domain Distance\", u\"Example: Email Parsing (Artifact)\", u\"Example: Email Parsing (Attachment)\", u\"Example: Expand URL\", u\"Example: Extract SSL Certificate\", u\"Example: Get Incident Contact Info\", u\"Example: Get Task Contact Info\", u\"Example: JSON2HTML\", u\"Example: Parse SSL Certificate\", u\"Example: PDFiD\", u\"Example: Resilient Search\", u\"Example: Shell Command\", u\"Example: String to Attachment\", u\"Example: Timer Epoch\", u\"Example: Timers in Parallel\", u\"Example: Use Excel Data\", u\"Example: XML Transformation\", u\"Example: Zip Extract\", u\"Example: Zip List\"],\n \"incident_fields\": [],\n \"incident_artifact_types\": [],\n \"incident_types\": [],\n \"datatables\": [],\n \"automatic_tasks\": [],\n \"scripts\": [u\"Convert JSON to rich text v1.0\"],\n \"playbooks\": []\n }", "def update_module(conn, module, chunk_size = 16000):\n rmodule = conn.modules[module.__name__]\n lf = inspect.getsourcefile(module)\n rf = conn.modules.inspect.getsourcefile(rmodule)\n upload_file(conn, lf, rf, chunk_size = chunk_size)\n conn.modules.__builtin__.reload(rmodule)", "def create(self):\n\t\tlipsBaseFile.imp()", "def MODULES(self):\n pass", "def example():\r\n path = os.path.abspath(os.path.dirname(__name__))\r\n module = CryptoModule()\r\n # create_name this is open source py module with confidential information\r\n opened_path = os.path.join(path, 'secret.py')\r\n # read_name this is open encrypted py module with confidential information\r\n secured_path = os.path.join(path, 'secured.py')\r\n # encrypt, read secret.py and create secured.py\r\n module.create_secured_module(path_to_opened_module=opened_path, path_to_secured_module=secured_path,\r\n create_key=True, delete_source_opened_module=False)\r\n # decrypt, read secured.py and create opened.py\r\n module.create_opened_module(path_to_secured_module=secured_path, path_to_opened_module=opened_path)\r\n print('ok')" ]
[ "0.62639445", "0.59777355", "0.5830551", "0.58056414", "0.57554215", "0.57490885", "0.5679191", "0.56280315", "0.55938977", "0.55713147", "0.55694884", "0.55570555", "0.55020046", "0.5476978", "0.5473481", "0.54465485", "0.54001313", "0.53518814", "0.531311", "0.5309852", "0.53042364", "0.52973497", "0.5278894", "0.5277192", "0.52760315", "0.5266393", "0.5250706", "0.5249076", "0.524772", "0.52406937", "0.5240216", "0.52255225", "0.5204946", "0.51879597", "0.5180667", "0.5180277", "0.51788425", "0.5174053", "0.51711774", "0.5171028", "0.51671785", "0.5162446", "0.51548874", "0.5150865", "0.51486117", "0.5147227", "0.51462364", "0.51459366", "0.5142524", "0.5140662", "0.5140662", "0.514027", "0.5137438", "0.51247066", "0.51238644", "0.51190287", "0.5102295", "0.5102295", "0.5102295", "0.5098986", "0.5097079", "0.5096468", "0.50943995", "0.50905645", "0.50809175", "0.5071136", "0.5069509", "0.5064815", "0.50629073", "0.50608003", "0.50595194", "0.5059269", "0.5051708", "0.5049447", "0.5049233", "0.5049009", "0.5049009", "0.5045931", "0.50341815", "0.50336355", "0.50331974", "0.50317204", "0.5031581", "0.5018757", "0.5018299", "0.50158906", "0.5014184", "0.50077546", "0.49960628", "0.499445", "0.4993862", "0.49729037", "0.49691224", "0.4965324", "0.49625623", "0.4956176", "0.49561396", "0.4950794", "0.49447903", "0.49367884", "0.49335426" ]
0.0
-1
Plotting and scaling data
def exercise_6(path_to_data, path_to_figure): print('='*30) print('Running exercise_6()') #### YOUR CODE HERE #### walk_arr = numpy.loadtxt('data/walk.txt') #### YOUR CODE HERE #### # plot the data using matplotlib plot! plt.plot(walk_arr) plt.ylabel('Location') plt.xlabel('Step') plt.title('Random Walk') plt.savefig('figures/walk.png') print(f'walk_arr.shape: {walk_arr.shape}') #### YOUR CODE HERE #### walk_min = numpy.amin(walk_arr) print(f'walk_min: {walk_min}') #### YOUR CODE HERE #### walk_max = numpy.amax(walk_arr) print(f'walk_max: {walk_max}') #### YOUR CODE HERE #### def scale01(arr): """ linearly scale the values of an array in the range [0, 1] :param a: input ndarray :return: scaled ndarray """ walk_arr_01 = numpy.interp(arr, (numpy.amin(arr), numpy.amax(arr)), (-1, +1)) # linear scaling return walk_arr_01 #return the scaled array walk_arr_scaled = scale01(walk_arr) print('DONE exercise_6()') return walk_arr, walk_min, walk_max, walk_arr_scaled
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_data(self):", "def setPlotScaling(x,y):\n dislin.trfscl(x,y)", "def scale_data(x):\n mu = x.mean(axis=0)\n sigma = x.std(axis=0)\n x = (x - mu) / sigma\n return (x, mu, sigma)", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "def scale_data(self, data):\n return (data - self.mean)/self.std", "def draw_plot(yscale='linear'):\n plt.yscale(yscale)\n plt.xticks(list(range(0, 101, 5)))\n plt.xlabel('percentile [%]')\n plt.grid(True)\n plt.ylabel('operation time [ns]')\n plt.legend()\n plt.show()", "def scale(self, data: np.ndarray):\n if self.scale_type == \"min_max\":\n scaled_data = (data - self.predictor_min) / (\n self.predictor_max - self.predictor_mean\n )\n elif self.scale_type == \"normalize\":\n scaled_data = (data - self.predictor_mean) / (\n self.predictor_max - self.predictor_min\n )\n elif self.scale_type == \"standardize\":\n scaled_data = (data - self.predictor_mean) / self.predictor_std\n elif self.scale_type == \"scale\":\n scaled_data = data - self.predictor_mean\n else:\n scaled_data = data\n return scaled_data", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def scale_data(x_data):\n\n # Scale based on maximum\n x_max = np.amax(x_data)\n scaled_data = x_data / x_max\n return scaled_data", "def plot_edisp_scale_map(self,vmin=0.5, vmax=1.5):\n\n scale_map = self.get_edisp_scale_map()\n \n pyplot.title(\"Energy dispersion scale plot\")\n pyplot.semilogx()\n pyplot.semilogy()\n\n pyplot.xlabel('MIGRA')\n pyplot.ylabel('MIGRA scaled')\n\n pyplot.step(self._edisp['M'],self._edisp['M'], color='C7', linestyle='--', linewidth=2, where='mid', label='Before')\n pyplot.step(self._edisp['M'],self._edisp['M_new'], color='midnightblue', linewidth=2, where='mid', label='After')\n\n pyplot.legend(loc=4 , frameon=False)", "def main(logy):\n filep = sys.stdin\n dataf = pd.read_csv(filep, sep=\" \", index_col=0, header=None)\n dataf.plot(logy=logy)\n print(dataf)\n plt.savefig(\"scaling.pdf\")", "def plot(self, data_grouped, *args, **kw):\n sdata = _scale_data(data_grouped, self.ranges)\n self.ax.plot(self.angle, np.r_[sdata, sdata[0]], *args, **kw)", "def test_set_scale():\n data = io.create_sample_Dataset()\n tmp = data.piv.set_scale(1.0)\n assert np.allclose(tmp[\"x\"], data[\"x\"])\n\n tmp = data.copy()\n tmp.piv.set_scale(2.0)\n tmp_mean = tmp[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n data_mean = data[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n assert np.allclose(tmp_mean / data_mean, 2.0)", "def scale(self):", "def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)", "def plot_transformed_data(transformed_data):\n plt.figure(1)\n plt.title('Transformed data')\n plt.xlabel('Eigenvector 1')\n plt.ylabel('Eigenvector 2')\n plt.plot(*transformed_data.T, 'o')", "def __init__(self, data, pixscale = 7.77/43):\n self.data = data\n self.pixscale = pixscale", "def plotfunc(self, ax, data, vmin=None, vmax=None):\n if data.shape[0] == 1:\n return ax.plot(data[0])\n else:\n return ax.imshow(data, vmin=vmin, vmax=vmax)", "def our_plot(ax, event, plot_args, scale=1, limits=None):\n _x = plot_args['x']\n _y = plot_args['y']\n ax.cla()\n ax.plot(_x, _y, zorder=0, color='k', alpha=0.5)\n ax.plot(_x + 200, _y, zorder=0, color='r', alpha=0.5)\n if hasattr(event, 'xdata') and event is not None:\n xmin, xmax, ymin, ymax = limits\n deltax, deltay = xmax - xmin, ymax - ymin\n\n if event.xdata is not None and event.ydata is not None:\n print(event.xdata, event.ydata, xmin, xmax, ymin, ymax, scale)\n ax.set(xlim=[event.xdata - scale * deltax,\n event.xdata + scale * deltax],\n ylim=[event.ydata - scale * deltay,\n event.ydata + scale * deltay])\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.plot([event.xdata], [event.ydata], marker='+', color='k',\n ms=20, alpha=0.25)\n else:\n ax.set(title='Press \"r\" to reset, \"z\" to undo last point')\n\n # plot coords\n for it in range(len(newcoords)):\n ax.scatter(*newcoords[it],\n marker=r\"$ {} $\".format(it),\n color='red', zorder=10)\n for it in range(len(oldcoords)):\n ax.scatter(*oldcoords[it],\n marker=r\"$ {} $\".format(it),\n color='black', zorder=10)", "def visualisation(self):\n plt.plot(self.x, self.y, 'o', label = 'Example data')\n plt.plot(self.x, np.dot(self.w, self.X), label = 'Model')\n plt.xlim([-1,1])\n plt.ylim([-1,1])", "def _scale_data(data: AmpDetData, odr_fit: odr.Output,\n action_unit: str, action_plot_unit: str, tune_scale: float):\n x_scale = UNIT_IN_METERS[action_unit] / UNIT_IN_METERS[action_plot_unit]\n data.action *= x_scale\n data.action_err *= x_scale\n\n # correct for tune scaling\n y_scale = 1. / tune_scale\n data.tune *= y_scale\n data.tune_err *= y_scale\n\n # same for odr_fit:\n for idx in range(len(odr_fit.beta)):\n full_scale = y_scale / (x_scale**idx)\n odr_fit.beta[idx] *= full_scale\n odr_fit.sd_beta[idx] *= full_scale\n return data, odr_fit", "def plot(self):\n pass", "def scale_data(x, y, x_scale_f = '../saved_models/scalers/9_params_21_2_x_scaler.pkl', y_scale_f = '../saved_models/scalers/9_params_21_2_y_scaler.pkl', par_slice = range(7) + range(8,9)):\n\tx_scaler = sklearn.externals.joblib.load(x_scale_f)\n\ty_scaler = sklearn.externals.joblib.load(y_scale_f)\n\tx_scaler.transform(x)\n\ty_scaler.transform(y)\n\tx = x[:,par_slice] \n\treturn x, y, x_scaler, y_scaler", "def plot_normalized_data(df, title=\"Daily portfolio value and SPY\", \n\t\t\t\t\t\t xlabel=\"Date\", ylabel=\"Normalized price\"):\n plot_data(df/df.iloc[0], title, xlabel, ylabel)", "def plot(self,x,y):\r\n scalex,scaley = self.getScale()\r\n x = x*self.samplerate\r\n x = (x-scalex[0])/(scalex[1]-scalex[0])*self.w\r\n y = (y-scaley[0])/(scaley[1]-scaley[0])*self.h\r\n return (x,y)", "def plot(self, x, ax=None, vmin=None, vmax=None, label=None, norm_std=False, set_ytickcolor=True, std=None, **kwargs):\n\n if std is not None:\n if std.shape != x.shape:\n raise ValueError('Inconsistent shapes!')\n\n if len(x.time) > 0:\n if ax is None:\n ax = self.ax\n set_axiscolor = False\n else:\n ax = ax\n set_axiscolor = True\n if x.ndim == 1: # if a vector already provided\n y = x.data * 1.\n if std is not None:\n top = y + std.data * 1.\n bot = y - std.data * 1.\n else:\n y = x.fldmean() # ... otherwise use fldmean() to get timeseries\n if std is not None:\n raise ValueError('Spatial aggregation for errors not supported yet!') # would need covariance structure\n\n if norm_std:\n y /= y.std()\n if label is None:\n label = x.label\n\n if self.regress: # calculate linear regression\n slope_print, intercept_print, r_value, p_value, std_err = stats.linregress(x.time / self.normx, y)\n slope, intercept, r_value, p_value, std_err = stats.linregress(x.time, y)\n self.tmp_slope = slope\n self.tmp_corr = r_value\n\n if p_value < 0.01:\n spvalue = 'p < 0.01'\n else:\n spvalue = 'p=' + str(round(p_value, 2))\n\n if self.show_equation:\n label = label + ' (y=' + \"%.1e\" % slope_print + 'x+' + \"%.1e\" % intercept_print + ', r=' + str(round(r_value, 2)) + ', ' + spvalue + ')'\n else:\n label = label + ' (r=' + str(round(r_value, 2)) + ', ' + spvalue + ')'\n\n self.labels.append(label)\n\n if std is not None:\n self._plot_std_bars(ax, x, std)\n\n p = ax.plot(x.date, y, label=label, **kwargs)[0]\n self.lines.append(p)\n if self.regress:\n ax.plot(x.date, x.time * slope + intercept, '--', color=p.get_color()) # plot regression line\n\n if self.show_ylabel:\n ax.set_ylabel(x._get_unit(), size=self.ticksize)\n if self.show_xlabel:\n ax.set_xlabel('time', size=self.ticksize)\n\n if self.title is not None:\n ax.set_title(self.title, size=self.ticksize)\n\n if (vmin is not None) and (vmax is not None):\n ax.set_ylim(vmin, vmax)\n\n if set_ytickcolor:\n for tl in ax.get_yticklabels():\n tl.set_color(p.get_color())", "def plot_data(self):\n if hasattr(self,'data'):\n plt.scatter(*self.data.T)\n plt.show()\n else:\n raise Exception('No 2d data of the instance has been loaded')", "def __init__(self,options,pos):\n self.options = options\n numobjects = pos.shape[1]\n plt.ion() # turn on interactive plotting mode\n dpi=72.0 # set dpi (I think this is appropriate on mac)\n # fig accepts size in inches\n # so divide desired pixel width, height by dpi to get inches\n w,h=(self.options.width/dpi,self.options.height/dpi)\n fig = plt.figure(1,figsize=(w,h),dpi=dpi)\n fig.clear()\n\n #w = self.options.width/fig.get_dpi() # desired width in inches\n #h = self.options.height/fig.get_dpi() # desired height in inches\n #fig.set_size_inches(w,h,forward=True) # last arg resizes the canvas to match\n\n self.ax = plt.axes()\n self.ax.set_xlim(self.options.xmin,self.options.xmax)\n self.ax.set_ylim(self.options.ymin,self.options.ymax)\n #pyplot.axis('scaled')\n\n # I don't know why axis('scaled') doesn't work here\n # But I think the next two commands are equivalent\n self.ax.set_aspect('equal', adjustable='box', anchor='C')\n self.ax.set_autoscale_on(False)\n\n #self.redraw()\n\n\n #facecolors = [cm.jet(x) for x in np.random.rand(len(vicon_objects))]\n facecolors = [cm.jet(x) for x in np.linspace(0,1,numobjects)]\n if self.options.visualize_switch_xy:\n if self.options.axis==1:\n self.ax.axvline(linewidth=4, c='k')\n else:\n self.ax.axhline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,1],pos[:,0],c=facecolors,s=3000)\n else:\n if self.options.axis==1:\n self.ax.axhline(linewidth=4, c='k')\n else:\n self.ax.axvline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,0],pos[:,1],c=facecolors,s=3000)\n\n # scores\n self.tpos = self.ax.text(0.75*self.options.xmax,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n self.tneg = self.ax.text(0.75*self.options.xmin,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n\n self.canvas = agg.FigureCanvasAgg(fig)\n self.canvas.draw()\n self.renderer = self.canvas.get_renderer()\n raw_data = self.renderer.tostring_rgb()\n\n pygame.init()\n \n self.window = pygame.display.set_mode((options.width,options.height), DOUBLEBUF)\n self.screen = pygame.display.get_surface()\n\n self.set_caption(\"Possession: Waiting for Vicon\")\n \n size = self.canvas.get_width_height()\n \n surf = pygame.image.fromstring(raw_data, size, \"RGB\")\n self.screen.blit(surf, (0,0))\n pygame.display.flip()", "def yscale(value):\n impl.yscale(**locals())", "def Draw_Scale( self ):\r\n self.canvas_scale.delete(ALL)\r\n if(cb.longx != 0):\r\n value = str( round( cb.longx, 3 ) )\r\n self.canvas_scale.create_line( cb.xorigin,5,cb.xorigin + cb.xtotal,5 )\r\n splits = 10.0\r\n increment = cb.xtotal/splits\r\n for i in range(int(splits + 1)):\r\n self.canvas_scale.create_line( int(cb.xorigin+i*increment),1,int(cb.xorigin+i*increment),9 )\r\n if( self.filter_distance > cb.longx ):\r\n self.filter_distance = cb.longx\r\n x = cb.xtotal - self.filter_distance*cb.xtotal/cb.longx + cb.xorigin\r\n top = str(round(self.filter_distance,3))\r\n \r\n while len(top) < 5:\r\n top = top + \"0\"\r\n self.scale_text = self.canvas_scale.create_text( cb.xorigin + cb.xtotal + 10,1,anchor = \"nw\",text = top + \"/\" + value)\r\n self.scale_marker = self.canvas_scale.create_polygon( x,7, x+4,3, x-4,3, fill=self.highlight_color,outline=self.highlight_color )\r\n if( self.filter_line_on ):\r\n if(self.filter_line != 0 ):\r\n self.canvas_one.delete( self.filter_line )\r\n self.filter_line = self.canvas_one.create_line( x,0,x,self.ys, fill=self.highlight_color)", "def scale(X, *, axis=..., with_mean=..., with_std=..., copy=...):\n ...", "def refresh_plot(self):\n self.ax.relim() # recompute the data limits\n self.ax.autoscale_view() # automatic axis scaling\n self.fig.canvas.flush_events()", "def _onToggleScale(self, event):\r\n if self.get_yscale() == 'log':\r\n self.set_yscale('linear')\r\n else:\r\n self.set_yscale('log')\r\n self.subplot.figure.canvas.draw_idle()", "def yscale(self, value='linear'):\r\n for ax in self._subaxes:\r\n ax.set_yscale(value)\r\n self.figure.canvas.draw()", "def testExplicitDataScaling(self):\n orig_scale = util.ScaleData\n util.ScaleData = self.FakeScale\n try:\n self.AddToChart(self.chart, [2, 3, 5, 7, 11])\n self.chart.left.min = -7\n self.chart.left.max = 49\n # This causes scaling to happen & calls FakeScale.\n self.chart.display.Url(0, 0)\n self.assertEqual(-7, self.min)\n self.assertEqual(49, self.max)\n finally:\n util.ScaleData = orig_scale", "def plot_spectrum(self, ax, data, stat=None, label_axes=True):\n if len(data.shape) == 1:\n data = np.reshape(data, (1, len(data)))\n\n x_pow = np.abs(data)\n if stat == None:\n if self.scale_select.currentIndex() == 0:\n ax.plot(x_pow, label='linear')\n elif self.scale_select.currentIndex() == 1:\n ax.plot(10*np.log10(x_pow), label='decibels')\n elif self.scale_select.currentIndex() == 2:\n ax.plot(np.var(x_pow, axis=0), label='variance')\n elif self.scale_select.currentIndex() == 3:\n ax.plot(skew(x_pow, axis=0), label='skew')\n elif self.scale_select.currentIndex() == 4:\n ax.plot(kurtosis(x_pow, axis=0), label='kurtosis')\n else:\n \n if self.scale_select.currentIndex() == 1:\n x_pow = 10*np.log10(x_pow)\n if stat == 'median' or stat == 'med':\n ax.plot(np.median(x_pow, axis=0), label='median')\n if stat == 'min':\n ax.plot(np.min(x_pow, axis=0), label='min')\n if stat == 'max':\n ax.plot(np.max(x_pow, axis=0), label='max')\n \n plt.minorticks_on()\n if label_axes:\n self.updateFreqAxis(ax, n_ticks=10)\n plt.xlabel(\"Frequency\")\n plt.ylabel(\"Amplitude\")\n plt.legend()", "def set_data(self, data, xaxis=None, yaxis=None, clim=None, cmap=None,\n vmin=None, under=None, vmax=None, over=None):\n assert isinstance(data, np.ndarray) and data.ndim <= 3\n data = np.atleast_2d(data)\n # Get the x-axis and y-axis :\n xaxis = np.arange(data.shape[1]) if xaxis is None else np.array(xaxis)\n yaxis = np.arange(data.shape[0]) if yaxis is None else np.array(yaxis)\n assert (len(xaxis) == data.shape[1]) and (len(yaxis) == data.shape[0])\n self._dim = (xaxis.min(), xaxis.max(), yaxis.min(), yaxis.max())\n # Convert data to color (if data.ndim == 2)\n if data.ndim == 2: # infer color from data\n clim = (data.min(), data.max()) if clim is None else clim\n # Limit the number of points :\n if any([k > self._n_limit for k in data.shape]):\n dsf_x = max(1, int(np.round(data.shape[0] / self._n_limit)))\n dsf_y = max(1, int(np.round(data.shape[1] / self._n_limit)))\n logger.debug(\"Image size reduced along the x and y-axis with \"\n \"a down-sampling factor of %s\" % ([dsf_x, dsf_y]))\n data = data[::dsf_x, ::dsf_y]\n xaxis, yaxis = xaxis[::dsf_x], yaxis[::dsf_y]\n # Set properties :\n self._update_cbar_args(cmap, clim, vmin, vmax, under, over)\n # Get colormap :\n cmap = self._get_glsl_colormap(limits=(data.min(), data.max()))\n self._image.cmap = cmap\n self._image.clim = 'auto'\n else: # data is already a compatible color\n assert data.shape[-1] in [3, 4]\n # Set color to the image :\n self._image.set_data(vispy_array(data))\n fact_x = (self._dim[1] - self._dim[0]) / len(xaxis)\n fact_y = (self._dim[3] - self._dim[2]) / len(yaxis)\n sc = (fact_x, fact_y, 1.)\n tr = (self._dim[0], self._dim[2], 0.)\n self._image.transform.scale = sc\n self._image.transform.translate = tr", "def plot_data(x: np.ndarray, y: np.ndarray) -> None:\n\n _, ax = plt.subplots()\n scatter = ax.scatter(x[:, 0], x[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)\n legend1 = ax.legend(*scatter.legend_elements(),\n loc=\"lower right\", title=\"Classes\")\n ax.add_artist(legend1)\n plt.xlim((min(x[:, 0]) - 0.1, max(x[:, 0]) + 0.1))\n plt.ylim((min(x[:, 1]) - 0.1, max(x[:, 1]) + 0.1))", "def plot(self, data_frame):\n self.axes.plot(data_frame, 'o-')\n self.axes.set_ylim(0.0, 200.0)\n self.fig.autofmt_xdate()\n self.draw()", "def plot_psf_scale_map(self, vmin=-0.5, vmax=0.5):\n\n scale_map = self.get_psf_scale_map()\n\n pyplot.title(\"PSF $\\sigma_1$ scale map\")\n pyplot.semilogx()\n\n pyplot.xlabel('Energy, TeV')\n pyplot.ylabel('Off-center angle, deg')\n pyplot.pcolormesh(scale_map['E_edges'], scale_map['Theta_edges'], scale_map['sigma_1'].transpose(),\n cmap='seismic', vmin=vmin, vmax=vmax)\n pyplot.colorbar()", "def plot_scatter(data):\n minimum = data[data.columns[0]]\n distance = data[data.columns[1]]\n\n # Forms the scatterplot\n plt.scatter(minimum, distance)\n\n # Adds a title and axis names\n plt.title('Minimum vs Total distance', fontweight='bold', fontsize='large')\n plt.xlabel('Minimun Bound', fontsize='large')\n plt.gca().invert_xaxis()\n plt.ylabel('Total Distance', fontsize='large')\n plt.grid(True)\n\n # Actually shows the scatterplot\n plt.show()", "def __init__(self,scale):\n self.scale = scale", "def renderScale(self, plot, painter, axisId, startDist, endDist,\r\n baseDist, rect):\r\n if not plot.axisEnabled(axisId):\r\n return\r\n scaleWidget = plot.axisWidget(axisId)\r\n if scaleWidget.isColorBarEnabled() and scaleWidget.colorBarWidth() > 0:\r\n scaleWidget.drawColorBar(painter, scaleWidget.colorBarRect(rect))\r\n baseDist += scaleWidget.colorBarWidth() + scaleWidget.spacing()\r\n painter.save()\r\n if axisId == QwtPlot.yLeft:\r\n x = rect.right() - 1.0 - baseDist\r\n y = rect.y() + startDist\r\n w = rect.height() - startDist - endDist\r\n align = QwtScaleDraw.LeftScale\r\n elif axisId == QwtPlot.yRight:\r\n x = rect.left() + baseDist\r\n y = rect.y() + startDist\r\n w = rect.height() - startDist - endDist\r\n align = QwtScaleDraw.RightScale\r\n elif axisId == QwtPlot.xTop:\r\n x = rect.left() + startDist\r\n y = rect.bottom() - 1.0 - baseDist\r\n w = rect.width() - startDist - endDist\r\n align = QwtScaleDraw.TopScale\r\n elif axisId == QwtPlot.xBottom:\r\n x = rect.left() + startDist\r\n y = rect.top() + baseDist\r\n w = rect.width() - startDist - endDist\r\n align = QwtScaleDraw.BottomScale\r\n \r\n scaleWidget.drawTitle(painter, align, rect)\r\n painter.setFont(scaleWidget.font())\r\n sd = scaleWidget.scaleDraw()\r\n sdPos = sd.pos()\r\n sdLength = sd.length()\r\n sd.move(x, y)\r\n sd.setLength(w)\r\n palette = scaleWidget.palette()\r\n palette.setCurrentColorGroup(QPalette.Active)\r\n sd.draw(painter, palette)\r\n sd.move(sdPos)\r\n sd.setLength(sdLength)\r\n painter.restore()", "def rescale_plot(self, fig, ax):\n\n # Two dimensions (plane curve)\n if self.ndim == 2:\n\n # Set the aspect ratio of the data\n ax.set_aspect(1.0)\n\n # Adjust pad\n plt.tight_layout(pad=5.0, w_pad=None, h_pad=None)\n\n # Three dimensions (space curve)\n if self.ndim == 3:\n\n # Set axes aspect ratio\n ax.autoscale(enable=True)\n x_min, x_max = ax.get_xlim()\n y_min, y_max = ax.get_ylim()\n z_min, z_max = ax.get_zlim()\n x_mid = (x_min + x_max) / 2\n y_mid = (y_min + y_max) / 2\n z_mid = (z_min + z_max) / 2\n L = np.max((x_max - x_min, y_max - y_min, z_max - z_min)) / 2\n ax.set_xlim3d(x_mid - 1.0 * L, x_mid + 1.0 * L)\n ax.set_ylim3d(y_mid - 1.0 * L, y_mid + 1.0 * L)\n ax.set_zlim3d(z_mid - 1.0 * L, z_mid + 1.0 * L)\n\n # Adjust pad\n plt.tight_layout(pad=5.0, w_pad=None, h_pad=None)", "def proteinScatterPlot(yDataDict, xData, xMin=0, xMax=None, yMin=-0.1, yMax=10,\r\n title=None, xLabel=None, yLabel=None, colors=None, \r\n figSize=(10,10), markerSize=10, legend=False, alpha=1.0, marker='o',\r\n linestyle=None, xTicks=None, legendLoc='upper left', legendCols=2, axes=None): \r\n if xMax is None:\r\n xMax = max(xData)\r\n if colors is None:\r\n colors = [pylab.cm.jet(float(i)/float(len(yDataDict))) for i in range(len(yDataDict))]\r\n if axes is None:\r\n scat = pylab.figure(figsize=figSize)\r\n scatAx = scat.add_subplot(111)\r\n else:\r\n scatAx=axes\r\n for i,p in enumerate(qMS.sort_nicely(yDataDict.keys())):\r\n \r\n if not (linestyle is None):\r\n scatAx.plot(xData, yDataDict[p], c=colors[i], linestyle=linestyle, label=p, marker=marker, markersize=markerSize, alpha=alpha)\r\n else:\r\n scatAx.plot(xData, yDataDict[p], c=colors[i], markersize=markerSize, marker=marker, label=p, alpha=alpha)\r\n scatAx.set_title(title, multialignment='center')\r\n scatAx.set_xlabel(xLabel)\r\n scatAx.set_ylabel(yLabel)\r\n scatAx.set_xlim([xMin,xMax])\r\n scatAx.set_ylim([yMin,yMax])\r\n if xTicks is None:\r\n scatAx.set_xticks([0,xMax/4,xMax/4*2,xMax/4*3,xMax])\r\n else:\r\n scatAx.set_xticks(xTicks)\r\n scatAx.set_yticks([0,yMax/4,yMax/4*2,yMax/4*3,yMax])\r\n if legend:\r\n pylab.legend(loc=legendLoc, ncol=legendCols, scatterpoints=1)\r\n scatAx.yaxis.tick_left()\r\n scatAx.xaxis.tick_bottom()\r\n pylab.tight_layout()\r\n \r\n return scatAx", "def updatePlot(self,*args):\n # set x limits\n timeDisplayOptions = {'10 minutes':10,'1 hour':60,'6 hours':6*60,'24 hours':24*60,'All':0}\n try:\n lastDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[-1])\n firstDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[0])\n except IndexError: # no data yet\n now = datetime.datetime.utcnow().toordinal()\n firstDatetime = mpl.dates.num2date(now)\n lastDatetime = firstDatetime\n xMin = lastDatetime-datetime.timedelta(minutes=timeDisplayOptions[self.wScale.get()])\n xMin = max([ firstDatetime, xMin ])\n if self.wScale.get() == 'All':\n xMin = firstDatetime\n xMinIndex = numpy.searchsorted( self.stage60K.get_xdata(), mpl.dates.date2num(xMin) )\n # rescale axes, with the x being scaled by the slider\n if self.toolbar._active == 'HOME' or self.toolbar._active == None:\n ymin,ymax = 10000000, -10000000\n lineAndVar = { self.stage60K: self.t60K,\n self.stage03K: self.t3K,\n self.stageGGG: self.tGGG,\n self.stageFAA: self.tFAA }\n if len(self.stage60K.get_xdata()) > 1:\n for line in lineAndVar.keys():\n if lineAndVar[line].get() == 0:\n line.set_visible(False)\n else:\n line.set_visible(True)\n ydata = line.get_ydata()[xMinIndex:-1]\n try:\n ymin = min(ymin, numpy.nanmin(ydata))\n ymax = max(ymax, numpy.nanmax(ydata))\n except ValueError as e:\n pass\n self.ax.set_xlim(xMin,lastDatetime)\n self.ax.set_ylim(ymin - (ymax-ymin)/10, ymax + (ymax-ymin)/10)\n hfmt = mpl.dates.DateFormatter('%H:%M:%S', tz=tz.tzlocal())\n self.ax.xaxis.set_major_formatter(hfmt)\n self.fig.autofmt_xdate()\n self.fig.tight_layout()\n #draw\n self.canvas.draw()", "def scale(self, x, y, z) -> None:\n ...", "def rescale(self):\n # forecast on real data, don't need this anymore\n pass", "def plot(self,datarange=None,nx=100,ny=100,clf=True,cb=True,data='auto',\n log=False,**kwargs):\n from operator import isMappingType\n from .utils import mpl_context\n\n with mpl_context(clf=clf) as plt:\n if data == 'auto':\n data = self.data or None\n\n maxmind = None\n if data: #TODO:correct coord conv\n xd,yd = data[0][0],data[0][1]\n dataval = data[1]\n if datarange is None:\n datarange = (np.min(xd),np.max(xd),np.min(yd),np.max(yd))\n maxmind = (np.max(dataval),np.min(dataval))\n elif datarange is None:\n if self.rangehint is not None:\n datarange = self.rangehint\n else:\n raise ValueError(\"Can't choose limits for plotting without data or a range hint\")\n\n\n grid = np.mgrid[datarange[0]:datarange[1]:1j*nx,datarange[2]:datarange[3]:1j*ny]\n res = self(grid)\n\n if log:\n if isinstance(log,basestring) and 'mag' in log:\n lognomag = log.replace('mag','')\n zpt = float(lognomag) if lognomag.strip() != '' else 0\n logfunc = lambda x:zpt-2.5*np.log10(x)\n elif log == '10':\n logfunc = np.log10\n else:\n logfunc = np.log\n res = logfunc(res)\n if data:\n dataval = logfunc(dataval)\n if maxmind is not None:\n maxmind = logfunc(maxmind)\n\n if maxmind:\n norm = plt.normalize(min(np.min(res),maxmind[1]),max(np.max(res),maxmind[0]))\n else:\n norm = plt.normalize(np.min(res),np.max(res))\n\n if clf:\n plt.clf()\n\n kwargs.setdefault('aspect','auto')\n #plt.imshow(res[::-1].T,norm=norm,extent=datarange,origin='lower',**kwargs)\n plt.imshow(res.T,norm=norm,extent=datarange,origin='lower',**kwargs)\n\n if cb:\n if isMappingType(cb):\n plt.colorbar(**cb)\n else:\n plt.colorbar()\n\n if data:\n if isMappingType(data):\n kwscat = dict(data)\n else:\n kwscat = {}\n kwscat.setdefault('norm',norm)\n kwscat.setdefault('c',dataval)\n plt.scatter(xd,yd,**kwscat)\n\n plt.xlim(datarange[0],datarange[1])\n plt.ylim(datarange[2],datarange[3])", "def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):\n features = np.array(data.features, dtype=float)\n for i in process_cols_list:\n value = features[i]\n if value > max_value_list[i]:\n value = max_value_list[i]\n elif value < min_value_list[i]:\n value = min_value_list[i]\n\n features[i] = (value - min_value_list[i]) / scale_value_list[i]\n _data = copy.deepcopy(data)\n _data.features = features\n return _data", "def plot_data():\n \n [X_train, X_dev, X_test, Y_train, Y_dev, Y_test, numOutputNodes] = load_data('regression') \n \n traindev = np.concatenate((Y_train, Y_dev), 1)\n traindevtest = np.concatenate((traindev, Y_test), 1)\n tdt = traindevtest.reshape(traindevtest.shape[1],)\n\n Y_train = Y_train.reshape(Y_train.shape[1],)\n Y_dev = Y_dev.reshape(Y_dev.shape[1],)\n Y_test = Y_test.reshape(Y_test.shape[1],)\n\n sigma = np.round(np.std(tdt), 3)\n mu = np.round(np.mean(tdt), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(1)\n plt.hist(tdt)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt.size, mu, sigma))\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(2)\n plt.hist([Y_train, Y_dev, Y_test], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"average Fe coordination number\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n # below is graphing for the charge data, as opposed to the averaged spectrum data\n [X_train1, X_dev1, X_test1, _, _, _, Y_train1, Y_dev1, Y_test1, numOutputNodes1] = load_data('multi_task')\n traindev1 = np.concatenate((Y_train1, Y_dev1), 1)\n traindevtest1 = np.concatenate((traindev1, Y_test1), 1)\n tdt1 = traindevtest1.reshape(traindevtest1.shape[1],)\n\n Y_train1 = Y_train1.reshape(Y_train1.shape[1],)\n Y_dev1 = Y_dev1.reshape(Y_dev1.shape[1],)\n Y_test1 = Y_test1.reshape(Y_test1.shape[1],)\n\n sigma = np.round(np.std(tdt1), 3)\n mu = np.round(np.mean(tdt1), 3)\n\n # plots histogram of all data together, indicating values of mean and standard deviation\n plt.figure(3)\n plt.hist(tdt1)\n plt.title(\"{} data points, mu = {}, sigma = {}\".format(tdt1.size, mu, sigma))\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n # plots histogram where the training, cross-validation, and test sets have separate bars\n plt.figure(4)\n plt.hist([Y_train1, Y_dev1, Y_test1], label = ['training', 'cross-validation', 'test'], density = True)\n plt.xlabel(\"charge\")\n plt.ylabel(\"frequency\")\n plt.legend()\n plt.show()\n\n return None", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def ProfilePlot(t,y,z,scale=86400, axis=0,color=[0.5,0.5,0.5]):\r\n from matplotlib import collections\r\n from matplotlib.ticker import Formatter\r\n\r\n class MyFormatter(Formatter):\r\n def __init__(self, dates, fmt='%b %d %Y'):\r\n self.fmt = fmt\r\n self.dates = dates\r\n\r\n def __call__(self, x, pos=0):\r\n 'Return the label for time x s'\r\n return datetime.strftime(datetime(1990,1,1)+timedelta(seconds=x),self.fmt)\r\n\r\n tsec = othertime.SecondsSince(t)\r\n formatter = MyFormatter(tsec)\r\n \r\n y = np.swapaxes(y,0,axis)\r\n \r\n lines=[]\r\n line2 =[]\r\n for ii, tt in enumerate(tsec):\r\n #xplot = set_scale(y[:,ii],tt)\r\n xplot = tt + y[:,ii]*scale\r\n lines.append(np.array((xplot,z)).T)\r\n line2.append(np.array([[tt,tt],[z[0],z[-1]]]).T)\r\n \r\n \r\n LC1 = collections.LineCollection(lines,colors=color,linewidths=1.5)\r\n LC2 = collections.LineCollection(line2,colors='k',linestyles='dashed') # Zero axis\r\n \r\n ax=plt.gca()\r\n ax.add_collection(LC1)\r\n ax.add_collection(LC2)\r\n ax.set_ylim((z.min(),z.max()))\r\n ax.xaxis.set_major_formatter(formatter)\r\n ax.set_xlim((tsec[0],tsec[-1]))\r\n plt.xticks(rotation=17) \r\n \r\n return ax", "def make_plot(x,y):", "def clickAutoscale(self, event):\n self.axes.autoscale_view()", "def set_scales(self):\r\n self.canvas.update()\r\n self.dxmin = self.dmargin\r\n self.dymin = self.dmargin\r\n self.dxmax = self.canvas.winfo_width() - self.dmargin - 1\r\n self.dymax = self.canvas.winfo_height() - self.dmargin - 1\r\n\r\n # Flip the Y coordinates to invert the result.\r\n if self.y_is_flipped:\r\n self.dymin, self.dymax = self.dymax, self.dymin\r\n\r\n self.xscale = (self.dxmax - self.dxmin) / (self.wxmax - self.wxmin)\r\n self.yscale = (self.dymax - self.dymin) / (self.wymax - self.wymin)\r\n\r\n # Calculate 1 pixel in world coordinates.\r\n self.xpix = 1 / self.xscale\r\n self.ypix = 1 / self.yscale", "def plotTimeDelta(data, type_plot, device):\n mean = data.mean()\n std = data.std()\n max_data = data.max()\n min_data = data.min()\n max_indx = np.argmax(data) # max value index\n min_indx = np.argmin(data) # min value index\n x = np.arange(min_data, max_data, 0.1)\n y = normfun(x, mean, std)\n res_quantile = quantileValues(data, device)\n if type_plot == 0:\n plt.plot(x, y, color='blue')\n annot_max_min(x, y)\n # plt.hist(data.dropna(), bins=500, rwidth=0.9, normed=True)\n plt.title('Time Delta distribution')\n plt.xlabel('Time Delta')\n plt.ylabel('Probability')\n sns.distplot(tmp.deltaSeconds.dropna(),\n kde=True, rug=True, rug_kws={\"color\": \"k\"},\n kde_kws={\"color\": \"red\", \"lw\": 3, \"label\": \"KDE\"},\n hist_kws={\"histtype\": \"step\", \"lw\": 3, \"alpha\": 1,\n \"color\": \"g\"},\n bins=500)\n # ax.set(xlabel='Vibration Intensity', ylabel='Probability')\n elif type_plot == 1: # plot the max and min point\n plt.plot(data)\n plt.plot(max_indx, data[max_indx], 'ks')\n show_max = '['+str(max_indx)+' '+str(data[max_indx])+']'\n plt.annotate(show_max,\n xytext=(max_indx, data[max_indx]),\n xy=(max_indx, data[max_indx]))\n plt.plot(min_indx, data[min_indx], 'gs')\n show_min = '['+str(min_indx)+' '+str(data[min_indx])+']'\n plt.annotate(show_min,\n xytext=(min_indx, data[min_indx]),\n xy=(min_indx, data[min_indx]))\n plt.title('Time Delta')\n plt.xlabel('Index')\n plt.ylabel('Vibration Intensity Value')\n elif type_plot == 2: # boxplot\n boxplot(data.dropna())\n return res_quantile", "def plot(self, *args, **kwargs):\n pass", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def plot_plasma(self):\n x = self.geom.x\n fig, axes = plt.subplots(1, 2, figsize=(8, 3),\n constrained_layout=True)\n # plot densities\n ax = axes[0]\n ax.plot(x, self.ne, 'b-')\n ax.plot(x, self.ni, 'r-')\n ax.legend(['E', 'Ion'])\n ax.set_xlabel('Position (m)')\n ax.set_ylabel('Density (m^-3)')\n # plot temperature\n ax = axes[1]\n ax.plot(x, self.Te, 'b-')\n ax.plot(x, self.Ti, 'r-')\n ax.legend(['Te', 'Ti'])\n ax.set_xlabel('Position (m)')\n ax.set_ylabel('Temperature (eV)')\n plt.show()", "def scale(self, X_train, X_test):\n\n #X_train, X_test, y_train, y_test = self.split_X_y_sets()\n self.scaler.fit(X_train)\n X_train_sc = self.scaler.transform(X_train)\n X_test_sc = self.scaler.transform(X_test)\n\n return X_train_sc, X_test_sc #, y_train, y_test", "def _force_rescale(self, setpoint_x, setpoint_y):", "def draw_plot(self, data_x_axis, data_y_axis, label_x_axis, label_y_axis):\n\n # Flipped tells us whether to invert the current x, y axis\n self.figure.clear()\n\n # Fix for plot having cutoff text or labels\n self.figure.tight_layout()\n self.figure.subplots_adjust(left=0.1, right=0.9, bottom=0.3, top=0.9)\n\n self.figure.suptitle(self.plot_title)\n\n ax = self.figure.add_subplot(111)\n\n # Add another argument fontsize = 10 to change the fontsize of the labels\n ax.set_xlabel(label_x_axis)\n ax.set_ylabel(label_y_axis)\n\n ax.xaxis.set_major_locator(plt.MaxNLocator(10))\n ax.yaxis.set_major_locator(plt.MaxNLocator(10))\n\n if self.plotType == 1:\n ax.scatter(data_x_axis, data_y_axis)\n\n elif self.plotType == 2:\n # SMOOTH CURVE CURRENTLY WORKS ONLY WITH INTEGRAL VALUES\n # Smoothen the curve points\n try:\n T = data_x_axis\n power = data_y_axis\n\n xnew = np.linspace(T.min(), T.max(),\n 300) # 300 represents number of points to make between T.min and T.max\n\n spl = make_interp_spline(T, power, k=3) # BSpline object\n power_smooth = spl(xnew)\n ax.scatter(data_x_axis, data_y_axis)\n ax.plot(xnew, power_smooth, marker='o')\n except:\n # Switch to normal plot if the data is not purely numeric in which case a smooth curve is not possible\n ax.plot(data_x_axis, data_y_axis, marker='o')\n\n else:\n ax.plot(data_x_axis, data_y_axis)\n\n self.canvas.draw()\n # Enable the option as plot is now drawn\n self.action_save_plot_png.setEnabled(True)\n self.action_toolbar_save_plot_png.setEnabled(True)", "def plot(self, data, background, scale=(5, 99)):\n # find the minimum and maximum value of plotting\n vmin = np.percentile(data, scale[0])\n vmax = np.percentile(data, scale[1])\n\n cax1 = self.ax1.imshow(data, cmap='gray', vmin=vmin, vmax=vmax,\n origin='lower')\n cax2 = self.ax2.imshow(background, cmap='viridis',\n origin='lower')\n cs = self.ax2.contour(background, colors='r', linewidths=0.5)\n self.ax2.clabel(cs, inline=1, fontsize=7, use_clabeltext=True)\n self.colorbar(cax1, cax=self.ax1c)\n self.colorbar(cax2, cax=self.ax2c)\n for ax in [self.ax1, self.ax2]:\n ax.set_xlabel('X (pixel)')\n ax.set_ylabel('Y (pixel)')\n ax.xaxis.set_major_locator(tck.MultipleLocator(500))\n ax.xaxis.set_minor_locator(tck.MultipleLocator(100))\n ax.yaxis.set_major_locator(tck.MultipleLocator(500))\n ax.yaxis.set_minor_locator(tck.MultipleLocator(100))", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plot(self):\n y = self.projection\n mpl.scatter(y[:, 0], y[:, 1], c=self.data_class)\n mpl.show()", "def plot_aeff_scale_map(self, vmin=-0.5, vmax=0.5):\n\n scale_map = self.get_aeff_scale_map()\n\n pyplot.title(\"Collection area scale map\")\n pyplot.semilogx()\n\n pyplot.xlabel('Energy, TeV')\n pyplot.ylabel('Off-center angle, deg')\n pyplot.pcolormesh(scale_map['E_edges'], scale_map['Theta_edges'], scale_map['Map'].transpose(),\n cmap='seismic', vmin=vmin, vmax=vmax)\n pyplot.colorbar()", "def compute(self):\n # get the parameter values from the widgets\n loc = float(str(self.loctext.text()))\n scale = float(str(self.scaletext.text()))\n # check the values\n # compute\n self.data[2] = self.data[1] + np.random.normal(loc, scale, self.npt)\n self.data[3] = self.data[2] - self.data[1] - self.dataSpan * 0.1\n # store the values\n self.loc = loc\n self.scale = scale\n self.updatePlot()", "def scaling(self):\n \n if self.colindex == self.rowsize: # last chart in row\n self.colindex = 0\n self.rowindex += 1 \n xorigin = self.indent + (self.colindex * self.xscale) \n yorigin = self.rowindex * self.yscale\n xscale = self.xscale # to fulfil % formatting below\n yscale = self.yscale \n self.colindex += 1\n\n res = \"origin(%(xorigin)s%%, %(yorigin)s%%), scale(%(xscale)s%%, %(yscale)s%%)\" % locals()\n return res", "def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()", "def plot(self):\n fig, ax = plt.subplots()\n\n T=self.storage.T\n #print self.storage.S\n #print T\n for statename in self.v:\n i=self.rv[statename]\n s=self.storage.S[:, i]\n #print s\n ax.plot(T, s, label=statename)\n\n legend = ax.legend(loc='upper right', shadow=True)\n\n frame = legend.get_frame()\n frame.set_facecolor('0.90')\n\n # Set the fontsize\n for label in legend.get_texts():\n label.set_fontsize('large')\n\n for label in legend.get_lines():\n label.set_linewidth(1.5) # the legend line width\n plt.show()", "def scale_data_point(self, data_point):\n \n data_point_scaled = pd.Series(self.scaler.transform(data_point[self.feature_names].to_numpy().reshape(1, -1)).ravel())\n data_point_scaled.name = data_point.name\n data_point_scaled.index = self.feature_names\n \n # Set any values > 1 to 1. This is only used in visualization.\n data_point_scaled = data_point_scaled.where(data_point_scaled <= 1.0, 1.0)\n #data_point_scaled.values = data_point_scaled.values.apply(> 1.0 else 1.0 for y in x])\n\n return data_point_scaled", "def epics_data_plot(data):\n if isinstance(data, (xr.DataArray, xr.Dataset)):\n data = data.to_dataframe()", "def plot_linear(self, **kwargs):\n import matplotlib.pyplot as plt\n self._plot_pair(ylabel='Reflectivity', **kwargs)\n plt.yscale('linear')", "def scale_data(self, train_data):\n\n # Fit on training data only.\n # scaler = StandardScaler().fit(train_data[self.feature_names])\n scaler = QuantileTransformer().fit(train_data[self.feature_names])\n self.scaler = scaler\n scaled_train_data = scaler.transform(train_data[self.feature_names])\n\n scaled_train_data_df = pd.DataFrame(data=scaled_train_data, columns=self.feature_names)\n scaled_train_data_df.index = train_data.index\n scaled_train_data_df[self.outcome_name] = train_data[self.outcome_name]\n\n return scaled_train_data_df", "def _stats_plot(self, element, y, data=None):\n data, x, y = self._process_args(data, None, y)\n\n opts = {'plot': dict(self._plot_opts), 'norm': self._norm_opts,\n 'style': self._style_opts}\n\n ylim = self._plot_opts.get('ylim', (None, None))\n if not isinstance(y, (list, tuple)):\n ranges = {y: ylim}\n return (element(data, self.by, y).redim.range(**ranges).relabel(**self._relabel).opts(**opts))\n\n labelled = ['y' if self.invert else 'x'] if self.group_label != 'Group' else []\n if self.value_label != 'value':\n labelled.append('x' if self.invert else 'y')\n\n if 'xlabel' in self._plot_opts and 'x' not in labelled:\n labelled.append('x')\n if 'ylabel' in self._plot_opts and 'y' not in labelled:\n labelled.append('y')\n\n opts['plot']['labelled'] = labelled\n\n kdims = [self.group_label]\n data = data[list(y)]\n if check_library(data, 'dask'):\n from dask.dataframe import melt\n else:\n melt = pd.melt\n df = melt(data, var_name=self.group_label, value_name=self.value_label)\n ranges = {self.value_label: ylim}\n return (element(df, kdims, self.value_label).redim(**self._redim)\n .redim.range(**ranges).relabel(**self._relabel).opts(**opts))", "def _plot_rawdata(self):\n fig, ax = plt.subplots(1, 1)\n ax.imshow(self.data, origin='top')\n ax.set_title('Gauss-Legendre Quadrature Grid')\n ax.set_xlabel('longitude index')\n ax.set_ylabel('latitude index')\n fig.tight_layout(pad=0.5)\n return fig,ax", "def xscale(value):\n impl.xscale(**locals())", "def view(self, lo_en: Quantity = Quantity(0.0, \"keV\"), hi_en: Quantity = Quantity(30.0, \"keV\"),\n figsize: Tuple = (8, 6)):\n if lo_en > hi_en:\n raise ValueError(\"hi_en cannot be greater than lo_en\")\n else:\n lo_en = lo_en.to(\"keV\").value\n hi_en = hi_en.to(\"keV\").value\n\n if len(self._plot_data.keys()) != 0:\n # Create figure object\n plt.figure(figsize=figsize)\n\n # Set the plot up to look nice and professional.\n ax = plt.gca()\n ax.minorticks_on()\n ax.tick_params(axis='both', direction='in', which='both', top=True, right=True)\n\n # Set the title with all relevant information about the spectrum object in it\n plt.title(\"{n} - {o}{i} Spectrum\".format(n=self.src_name, o=self.obs_id, i=self.instrument.upper()))\n for mod_ind, mod in enumerate(self._plot_data):\n x = self._plot_data[mod][\"x\"]\n # If the defaults are left, just update them to the min and max of the dataset\n # to avoid unsightly gaps at the sides of the plot\n if lo_en == 0.:\n lo_en = x.min()\n if hi_en == 30.0:\n hi_en = x.max()\n\n # Cut the x dataset to just the energy range we want\n plot_x = x[(x > lo_en) & (x < hi_en)]\n\n if mod_ind == 0:\n # Read out the data just for line length reasons\n # Make the cuts based on energy values supplied to the view method\n plot_y = self._plot_data[mod][\"y\"][(x > lo_en) & (x < hi_en)]\n plot_xerr = self._plot_data[mod][\"x_err\"][(x > lo_en) & (x < hi_en)]\n plot_yerr = self._plot_data[mod][\"y_err\"][(x > lo_en) & (x < hi_en)]\n plot_mod = self._plot_data[mod][\"model\"][(x > lo_en) & (x < hi_en)]\n\n plt.errorbar(plot_x, plot_y, xerr=plot_xerr, yerr=plot_yerr, fmt=\"k+\", label=\"data\", zorder=1)\n else:\n # Don't want to re-plot data points as they should be identical, so if there is another model\n # only it will be plotted\n plot_mod = self._plot_data[mod][\"model\"][(x > lo_en) & (x < hi_en)]\n\n # The model line is put on\n plt.plot(plot_x, plot_mod, label=mod, linewidth=1.5)\n\n # Generate the legend for the data and model(s)\n plt.legend(loc=\"best\")\n\n # Ensure axis is limited to the chosen energy range\n plt.xlim(lo_en, hi_en)\n\n plt.xlabel(\"Energy [keV]\")\n plt.ylabel(\"Normalised Counts s$^{-1}$ keV$^{-1}$\")\n\n ax.set_xscale(\"log\")\n ax.xaxis.set_major_formatter(ScalarFormatter())\n ax.xaxis.set_minor_formatter(FuncFormatter(lambda inp, _: '{:g}'.format(inp)))\n ax.xaxis.set_major_formatter(FuncFormatter(lambda inp, _: '{:g}'.format(inp)))\n\n plt.tight_layout()\n # Display the spectrum\n plt.show()\n\n # Wipe the figure\n plt.close(\"all\")\n\n else:\n warnings.warn(\"There are no XSPEC fits associated with this Spectrum, you can't view it.\")", "def plot_values(self, plot_widget, data, x_range, y_range):\r\n\r\n self.widget = plot_widget\r\n self.data = data\r\n self.x_range = x_range\r\n self.y_range = y_range\r\n\r\n self.widget.setXRange(0, self.x_range)\r\n self.widget.setYRange(0, self.y_range)\r\n self.widget.showGrid(x=True, y=True)\r\n self.widget.addLegend()\r\n # self.widget.setLabel('left', 'Value', units='y')\r\n self.widget.setLabel('bottom', 'Frames')\r\n self.widget.clear()\r\n\r\n for item in self.data.items():\r\n line = self.widget.plot(np.insert(item[1], 0, item[1][0]), pen=self.get_color(item[0]),\r\n symbolPen=self.get_color(item[0]), symbol='o', symbolSize=1, name=item[0])\r\n self.marker(self.widget)", "def distribution_plot(data):\r\n ready_data = sorted((data))\r\n fit = stats.norm.pdf(ready_data, np.mean(ready_data), np.std(ready_data))\r\n plt.plot(ready_data, fit, '-o')\r\n plt.ylabel(\"Prob\")\r\n plt.xlabel(\"Prices\")\r\n plt.title(\"Distribution of prices (Under 50 days) Demand Function\")\r\n plt.show()", "def normalize_plot(self):\n\n kwargs = dict(stretch = self.stretch,\n vmin = self.vmin_button.get_value(),\n vmax = self.vmax_button.get_value())\n norm = aplpy.normalize.APLpyNormalize(**kwargs)\n self.parent.aplpy_plot.image.set_norm(norm)\n self.parent.aplpy_plot.refresh()", "def fit(self, data):\n self.column_min_value, self.column_max_value = self._get_min_max_value(data)\n self.scale_column_idx = self._get_scale_column_idx(data)\n self.header = self._get_header(data)\n\n self.column_range = []\n for i in range(len(self.column_max_value)):\n scale = self.column_max_value[i] - self.column_min_value[i]\n if scale < 0:\n raise ValueError(\"scale value should large than 0\")\n elif np.abs(scale - 0) < 1e-6:\n scale = 1\n self.column_range.append(scale)\n\n f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,\n min_value_list=self.column_min_value, scale_value_list=self.column_range,\n process_cols_list=self.scale_column_idx)\n fit_data = data.mapValues(f)\n\n return fit_data", "def plot_data(x_plot, X_train, X_test, y_train, y_test, low, high):\n s = 15\n plt.plot(x_plot, ground_truth(x_plot), alpha=0.5, label='ground truth')\n plt.scatter(X_train, y_train, s=s, alpha=0.2)\n plt.scatter(X_test, y_test, s=s, alpha=0.2, color='red')\n plt.xlim((low, high))\n plt.ylabel('y')\n plt.xlabel('x')\n plt.legend(loc='upper left')\n plt.show()", "def plot(self):\n plot_spectrum(self.data, self.fig, self.ax_e, self.ax_s, title = \"Solar spectrum\")", "def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def contingency(self, scale, distrib=True, dataname=''):\n print 'Generating the plot ...'\n\n cont = np.zeros((scale, scale))\n minLat, maxLat, minLon, maxLon = self.city[1]\n normLat = scale / (maxLat - minLat)\n normLon = scale / (maxLon - minLon)\n\n # syn = (index, rel index, class)\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n # print posx,posy,data[i][0],data[i][1], normLat, normLon\n try:\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n except IndexError:\n print self.dataset[i][0], self.dataset[i][1]\n if distrib:\n cont = cont / np.max(cont)\n\n fig = plt.figure()\n\n ax = fig.add_subplot(111)\n plt.title('Density ')\n\n plt.imshow(cont, interpolation='bicubic', cmap=cm.gist_yarg)\n vmax = np.max(cont)\n # vmin=np.min(cont)\n\n if distrib:\n plt.colorbar(ticks=np.round(np.linspace(0, 1, 10), 2),\n orientation='vertical')\n nfile = self.application + '-' + dataname\n\n fig.savefig(homepath + 'Results/' + self.city[2] + '-' + nfile + '.pdf', orientation='landscape', format='pdf')\n\n #plt.show()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def testDefaultDataScaling(self):\n orig_scale = util.ScaleData\n util.ScaleData = self.FakeScale\n try:\n self.AddToChart(self.chart, [2, 3, 5, 7, 11])\n self.chart.auto_scale.buffer = 0\n # This causes scaling to happen & calls FakeScale.\n self.chart.display.Url(0, 0)\n self.assertEqual(2, self.min)\n self.assertEqual(11, self.max)\n finally:\n util.ScaleData = orig_scale", "def transform(self, data):\n #scaled_transform = data + self.sc_factor * (data* (1-self.sigma) - self.mu) / self.sigma\n # scaling = 1+self.sc_factor*(self.sigma-1)\n # scaling = tf.clip_by_value(scaling, 1.0e-8, 1.0e8)\n\n scaled_transform = (data-self.mu)/(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2))\n return scaled_transform", "def _setscales(self, ndata, largestscale, notes, scaling):\n if scaling==\"log\":\n if notes<=0: notes=1\n # adjust nscale so smallest scale is 1\n noctave=self._log2(2.*ndata/largestscale)\n self.nscale=notes*noctave\n self.scales=numpy.zeros(self.nscale, float)\n for j in range(self.nscale):\n self.scales[j]=2.0**(float(j)/notes)\n elif scaling==\"linear\":\n nmax=ndata/largestscale/2\n self.scales=numpy.arange(float(2), float(nmax))\n self.nscale=len(self.scales)\n else: raise ValueError, \"scaling must be linear or log\"\n return", "def update_graph(self, data_list):\n #log.debug(\"render graph\")\n x_axis = range(len(data_list))\n\n mcd = self.main_curve_dialog\n mcd.curve.set_data(x_axis, data_list)\n\n if self.auto_scale:\n mcd.get_plot().do_autoscale()\n else:\n mcd.get_plot().replot()", "def plot(self, show=True):\n xs, ys = zip(*[(float(ix)/self.sample_rate, val)\n for ix, val in enumerate(self.samples)])\n plt.plot(xs, ys)\n if show:\n plt.show()", "def plot_data(self, plot_input=True, plot_fitted=True,plotfile=None, show=None):\n if not self.fitted:\n raise RuntimeError(\"Please run fit() before attempting to plot the results\")\n\n fitted_data = self.data_summary(printout=False)\n fitted_mean = fitted_data['mean'].to_numpy().reshape((self.npoints,self.ndim))\n print(fitted_mean.shape)\n fitted_sigma = fitted_data['sd'].to_numpy().reshape((self.npoints,self.ndim))\n if self.ndim==np.int(2) and isinstance(self.ndim, int):\n blue, _, red, *_ = sns.color_palette()\n f, ax = plt.subplots(1, 1, figsize=(5, 4))#, gridspec_kw=dict(width_ratios=[4, 3]))\n\n sns.scatterplot(x=self.data[:,0], y=self.data[:,1])\n if plot_input:\n ax.errorbar(x=self.data[:,0], y=self.data[:,1],\n xerr=self.sigma[:,0], yerr=self.sigma[:,1],fmt='o',label='input data')\n \n if plot_fitted:\n ax.errorbar(x=fitted_mean[:,0], y=fitted_mean[:,1],\n xerr=fitted_sigma[:,0], yerr=fitted_sigma[:,1],fmt='o',label='inferred data')\n \n mu_post = self.trace.posterior[\"mu\"].mean(axis=(0, 1)).data\n \n sigma_post = self.trace.posterior[\"cov\"].mean(axis=(0, 1)).data\n \n var_post, U_post = np.linalg.eig(sigma_post)\n angle_post = 180.0 / np.pi * np.arccos(np.abs(U_post[0, 0]))\n\n e_post = Ellipse(\n mu_post,\n 2 * np.sqrt(5.991 * var_post[0]),\n 2 * np.sqrt(5.991 * var_post[1]),\n angle=angle_post,\n )\n e_post.set_alpha(0.5)\n e_post.set_facecolor(blue)\n e_post.set_zorder(10)\n ax.add_artist(e_post)\n rect_post = plt.Rectangle((0, 0), 1, 1, fc=blue, alpha=0.5)\n ax.legend(\n [rect_post],\n [\"Estimated 95% density region\"],\n loc=2,\n )\n #plt.show()\n\n elif self.ndim > 2 and isinstance(int, self.ndim) and np.isfinite(self.ndim):\n #raise NotImplementedError(\"This routine doesn't support plotting correlations in more than 2 dimensions yet!\")\n rows = self.ndim - 1\n cols = self.ndim - 1\n fig = plt.figure()\n gs = fig.add_gridSpec(rows, cols,left=0.1, right=0.9, bottom=0.1, top=0.9,\n wspace=0.05, hspace=0.05)\n for i in range(self.ndim - 1):\n for j in range(i+1,self.ndim - 1):\n ax = fig.add_subplot(gs[i,j])\n #plot the data points\n sns.scatterplot(self.data[:,i], self.data[:,j], ax=ax)\n if plot_input:\n ax.errorbar(x=self.data[:,i], y=self.data[:,j],\n xerr=self.sigma[:,i], yerr=self.sigma[:,j])\n \n if plot_fitted:\n ax.errorbar(x=fitted_mean[:,i], y=fitted_mean[:,j],\n xerr=fitted_sigma[:,i], yerr=fitted_sigma[:,j])\n \n mu_post = self.trace.posterior[\"mu\"].mean(axis=(i, j)).data\n \n sigma_post = self.trace.posterior[\"cov\"].mean(axis=(i, j)).data\n \n var_post, U_post = np.linalg.eig(sigma_post)\n angle_post = 180.0 / np.pi * np.arccos(np.abs(U_post[0, 0]))\n \n e_post = Ellipse(\n mu_post,\n 2 * np.sqrt(5.991 * var_post[0]),\n 2 * np.sqrt(5.991 * var_post[1]),\n angle=angle_post,\n )\n e_post.set_alpha(0.5)\n e_post.set_facecolor(blue)\n e_post.set_zorder(10)\n ax.add_artist(e_post)\n \n else:\n raise ValueError(\"Ndim is either less than 2 or is not an integer!\")\n \n if isinstance(plotfile, str):\n plt.save(plotfile)\n elif not show:\n raise TypeError(\"plotfile must be a string\")\n if show:\n plt.show()\n elif plotfile is not None:\n plt.close()", "def setAxisScaling(scalingtype='linear', axes='XYZ'):\n scalingdict = {'linear':'LIN', 'log':'LOG'} \n dislin.axsscl(scalingdict[scalingtype],axes)" ]
[ "0.68279326", "0.66121054", "0.64640146", "0.6433218", "0.64242774", "0.6404947", "0.6313454", "0.6287877", "0.61888176", "0.618611", "0.61710227", "0.6164797", "0.6157777", "0.6145128", "0.61247987", "0.61205786", "0.61193025", "0.6103496", "0.60717255", "0.60140777", "0.5998499", "0.5992837", "0.59788674", "0.59724146", "0.59705085", "0.5940334", "0.5885013", "0.5879962", "0.5837348", "0.58363396", "0.58226234", "0.5821972", "0.5819802", "0.58172184", "0.5799422", "0.5778377", "0.57743734", "0.57672817", "0.5762457", "0.5760777", "0.5747662", "0.5746428", "0.5739202", "0.57345897", "0.5713519", "0.5709218", "0.5702632", "0.5701501", "0.57014143", "0.56990635", "0.56848186", "0.5683958", "0.5680177", "0.56738615", "0.5658526", "0.5656272", "0.5653768", "0.56519014", "0.5643613", "0.5637286", "0.56327057", "0.5628998", "0.5613885", "0.5613603", "0.5612859", "0.5609793", "0.56034803", "0.5594542", "0.55926704", "0.55910516", "0.5576562", "0.5576447", "0.5573627", "0.55679953", "0.5566772", "0.5556879", "0.5548829", "0.5545551", "0.55439246", "0.5541079", "0.55400926", "0.5537872", "0.55351883", "0.5533982", "0.55311394", "0.5526042", "0.5525495", "0.55239654", "0.55221516", "0.55192524", "0.55192524", "0.55192524", "0.55192524", "0.55192524", "0.5518974", "0.55146515", "0.55135256", "0.5512963", "0.55089873", "0.55053097", "0.5502941" ]
0.0
-1
linearly scale the values of an array in the range [0, 1]
def scale01(arr): walk_arr_01 = numpy.interp(arr, (numpy.amin(arr), numpy.amax(arr)), (-1, +1)) # linear scaling return walk_arr_01 #return the scaled array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale0to1(img):\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if min == max:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if np.absolute(min-max) < 1.e-6:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n if img.shape[-1] != 1:\r\n img = np.sqrt(np.sum(img**2, axis=-1, keepdims=True))\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if np.absolute(min-max) < 1.e-6:\r\n img.fill(0.5)\r\n else:\r\n img = (img - min)/(max - min)\r\n\r\n return img.astype(np.float32)", "def scale_1d(x):\n return (min(x), max(x), len(x))", "def normalize_1d(x, scale=(0, 1, 1024)):\n new_min = scale[0]\n new_max = scale[1]\n new_len = scale[2]\n (min_x, max_x, old_size) = scale_1d(x)\n x_norm = (x - min_x) / (max_x - min_x)\n old_baseline = np.linspace(0, 1, old_size)\n new_baseline = np.linspace(0, 1, new_len)\n if len(old_baseline) <= 1:\n old_baseline = np.array([0, 1])\n x_norm = np.array([1, 0])\n x_interp = interp.interp1d(old_baseline, x_norm)\n x_resized = (x_interp(new_baseline) * (new_max - new_min)) + new_min\n return x_resized", "def scale(input):\n return (input - np.min(input)) / ((np.max(input) - np.min(input)))", "def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)", "def scale(x):\n min_x, max_x = numpy.min(x), numpy.max(x)\n if min_x != max_x:\n x = (x-min_x)/(max_x-min_x)\n else:\n # all the numbers are the same in x\n x = numpy.asarray([1/len(x) for i in range(len(x)) ])\n return x.tolist()", "def lin_scale( val, x1, y1, x2, y2 ):\r\n x_range = (x2 - x1)\r\n new_val = 0\r\n if x_range is 0:\r\n new_val = y1\r\n else:\r\n y_range = ( y2 - y1 )\r\n new_val = ( ( ( val - x1 ) * y_range ) / x_range ) + y1\r\n\r\n return new_val", "def linearscale(input, boundfrom, boundto, extrema=None):\n\n\tminfrom,maxfrom = boundfrom\n\tminto,maxto = boundto\n\n\t### default from bounds are min,max of the input\n\tif minfrom is None:\n\t\tif extrema:\n\t\t\tminfrom = extrema[0]\n\t\telse:\n\t\t\tminfrom = arraystats.min(input)\n\tif maxfrom is None:\n\t\tif extrema:\n\t\t\tmaxfrom = extrema[1]\n\t\telse:\n\t\t\tmaxfrom = arraystats.max(input)\n\n\trangefrom = maxfrom - minfrom\n\tif rangefrom == 0:\n\t\t# if min==max, do simple thresholding\n\t\toutput = numpy.where(input>maxfrom, maxto, minto)\n\telse:\n\t\trangeto = maxto - minto\n\t\tscale = float(rangeto) / rangefrom\n\t\toffset = minfrom * scale\n\t\toutput = input * scale - offset\n\n\treturn output", "def scale(inp: np.ndarray, new_min: float = 0., new_max: float = 1.,\n axis: int = -1) -> np.ndarray:\n xmax = inp.max(axis=axis, keepdims=True)\n xmin = inp.min(axis=axis, keepdims=True)\n a = (inp-xmin) / (xmax - xmin)\n y = a * (new_max - new_min) + new_min\n return y", "def scaleValues(values):\n\n values = values - values.min()\n return values/values.max()", "def hist_normalize_linear(data, new_min, new_max):\n data_min = np.ma.min(data)\n data_max = np.ma.max(data)\n scaled = (data - data_min) * ((new_max - new_min) / (data_max - data_min))\n scaled.mask = data.mask\n return scaled", "def scale(a, tmin=0.0, tmax=1.0):\n return np.interp(a, (a.min(), a.max()), (tmin, tmax))", "def normalize(x):\n a = 0\n b = 1\n scale_min = 0\n scale_max = 255\n return a + ( ( (x - scale_min)*(b - a) )/( scale_max - scale_min ) )", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def _min_max_scale(arr, new_range=(0, 255)):\n # get array's current min and max\n mn = arr.min()\n mx = arr.max()\n\n # check if scaling needs to be done to be in new_range\n if mn < new_range[0] or mx > new_range[1]:\n # perform min-max scaling\n scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]\n else:\n # return array if already in range\n scaled = arr\n\n return scaled", "def normalize_0d(x, old_scale=(0, 1, 1024), new_scale=(0, 1, 1024)):\n old_delta = old_scale[1] - old_scale[0]\n new_delta = new_scale[1] - new_scale[0]\n old_min = old_scale[0]\n new_min = new_scale[0]\n return (x - old_min) * (new_delta / old_delta) + new_min", "def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def scale(x, feature_range=(-1,1)):\r\n x = x * 2 - 1\r\n return x", "def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def rescale(x):\n if x.min() != 0:\n raise ValueError('input should have min zero.')\n\n x /= x.max() # max 1\n x *= 2 # max 2\n x -= 1 # range -1, 1\n\n if x.min() != -1 and x.max() != 1:\n raise Exception\n\n return x", "def normalize_data(data, min=0, max=1):\r\n import numpy as np\r\n assert isinstance(data, np.ndarray)\r\n\r\n max_value = np.max(data)\r\n min_value = np.min(data)\r\n\r\n scaled = np.interp(data, [min_value, max_value], [min, max])\r\n # convert to float64\r\n scaled = scaled.astype(np.float64)\r\n\r\n return scaled", "def _scale_array(arr, clip=True):\n if clip:\n scaled = np.clip(arr, 0, 255)\n else:\n scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))\n scaled = _min_max_scale(arr, new_range=scale_range)\n\n return scaled", "def rescale(A, d1, d2):\n \n A[0, 1] = A[0, 1] * (d2 / d1)\n A[1, 0] = A[1, 0] * (d1 / d2)\n \n return A", "def scale_mag_1(x):\n return np.array([np.true_divide(ui, mag(x)) for ui in x])", "def scaleto01(val,check=True):\n if type(val) is not list and type(val) is not np.ndarray:\n val = [val]\n if type(val) is list:\n val = np.array(val)\n assert type(val) is np.ndarray\n assert not check or np.all((val==-1) + (val==1))\n return (val+1)/2.0", "def scale(x, minimum, maximum):\n return (x - minimum) / (maximum - minimum)", "def normalize(self,arr):\n arr = arr/(arr.max()/255.0)\n return arr", "def simple_scaling(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the minimum values of the input numpy array along the axis \n Max = np.max(input_data, axis = 0)\n\n # Simple sclaing \n scaled_input_data = input_data / (Max + sys.float_info.min)\n\n # Return scaled input data\n return scaled_input_data", "def scale(self, value):\n\t\toldscale = self.oldmax - self.oldmin\n\t\tnewscale = self.newmax - self.newmin\n\t\treturn (newscale * (value - self.oldmin) / oldscale) + self.newmin", "def _scale(x, axis=None):\n x = _remove_baseline(x, axis=axis)\n x /= np.std(x, ddof=1, axis=axis, keepdims=True)\n return x", "def _scale(x):\n scaleFactor = 1\n _ret = int(x/scaleFactor)\n return _ret", "def rescale(tx):\n mins = np.amin(tx, axis=0)\n maxs = np.amax(tx, axis=0)\n txscale = (tx - mins) / (maxs - mins)\n return txscale", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def scale(self, points, inplace=True):\n points = np.array(points).astype(float)\n if inplace==False:\n points = points.copy()\n # if len(points.shape) == 1:\n # points = points[None,:]\n # if len(points.shape) != 2:\n # logger.error(\"cannot scale array of dimensions\".format(len(points.shape)))\n points -= self.origin\n points /= self.scale_factor\n return points", "def l1_normalize(x: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name\n return x / x.sum()", "def scale(x_range=1, y_range=1):\r\n x = rand_val(x_range)\r\n y = rand_val(y_range)\r\n return np.array(((x, 0, 0),\r\n (0, y, 0),\r\n (0, 0, 1)), dtype=np.float)", "def scale(self, value):\n return (float(value) - float(self.minimum)) / \\\n float(self.maximum - self.minimum) * 2.0 - 1.0", "def rescale_arr(arr, amin, amax):\r\n\r\n # old bounds\r\n m = arr.min()\r\n M = arr.max()\r\n # scale/offset\r\n s = float(amax - amin) / (M - m)\r\n d = amin - s * m\r\n\r\n # Apply clip before returning to cut off possible overflows outside the\r\n # intended range due to roundoff error, so that we can absolutely guarantee\r\n # that on output, there are no values > amax or < amin.\r\n return np.clip(s * arr + d, amin, amax)", "def scale(c, scalar):\n return [c[0]*scalar, c[1]*scalar]", "def scale(arrayin,Amin,Amax,mask=None):\r\n if (mask==None) and (arrayin.max() - arrayin.min())!=0.0 :\r\n Bmax = arrayin.max()\r\n Bmin = arrayin.min()\r\n elif (arrayin.max() - arrayin.min())!=0.0 :\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n Bmax = arrayin.min()\r\n Bmin = arrayin.max()\r\n for i in range(ny):\r\n for j in range(ny):\r\n if mask[i,j] > 0.5e0 :\r\n if arrayin[i,j] < Bmin :\r\n Bmin = arrayin[i,j]\r\n if arrayin[i,j] > Bmax :\r\n Bmax = arrayin[i,j]\r\n else :\r\n print \"andrew.bagOfns.scale : warning (arrayin.max() - arrayin.min())=0.0 \"\r\n return np.copy(arrayin)\r\n\r\n arrayout = (arrayin - Bmin)*(Amax - Amin) / (Bmax - Bmin) + Amin\r\n return arrayout", "def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))", "def _scale(self, normalize, mat):\n mat = mat.astype(float)\n if normalize:\n mat = sklearn_norm(mat,\n feature_range=(0, 1),\n axis=0,\n copy=True)\n else:\n return mat\n return mat", "def scale_to_start(x):\n x = (x + eps) / (x[0] + eps)\n return x", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]", "def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')", "def melscale(f):\n return 1125.0 * np.log(1 + f / 700.0)", "def normalize(a, new_max=1.0):\n a = (a - a.min())\n a = a/a.max()\n a *= new_max\n return a", "def transform(self, X):\n X = np.asarray(X, dtype=np.float64)\n X *= self.scale_\n X += self.min_\n return X", "def normalize(array):\n\treturn array/np.max(array)", "def db2lin(x, scale=10):\n return 10**(x/scale)", "def vec_scale (x, alpha):\n return [x_i*alpha for x_i in x]", "def scale_img(img):\r\n # Scale values of img between 0 and 255.\r\n img -= np.amin(img)\r\n img /= np.amax(img)\r\n img *= 255\r\n return img", "def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))", "def normalize(vals):\n min_val = torch.min(vals)\n max_val = torch.max(vals)\n return (vals - min_val) / (max_val - min_val)", "def apply_scale( vertices, scale=1.0 ):\n checkVerticesValidity( vertices )\n if type(scale) != float:\n raise ValueError\n \n for i in range(len(vertices)):\n v = vertices[i]\n tmpv = [v[0]*scale, v[1]*scale, v[2]*scale]\n vertices[i] = tmpv", "def scale_zoom(x, start, end):\n length = len(x)\n start_index = int(np.round(length * start))\n end_index = int(np.round(length * end))\n if start_index >= end_index:\n if start_index <= 3:\n start_index = 0\n end_index = 3\n else:\n start_index = end_index - 3\n return normalize_1d(x[start_index:end_index])", "def normalize(self, arr):\r\n\r\n\t\t#Set the cap for arr at self.value_max and self.value_max\r\n\t\t#this prevents outliers of breaking the previously predicted p_func\r\n\t\tarr_capped = arr * (arr <= self.value_max) + self.value_max * (arr > self.value_max)\t#cap to value_max\r\n\t\tarr_capped = arr_capped * (arr_capped >= self.value_min) + self.value_min * (arr_capped < self.value_min)\t#cap to value_min\r\n\r\n\t\t#Normalize array\r\n\t\tnorm_factor = self.get_norm_factor(arr_capped)\r\n\t\tnormalized = arr * norm_factor\r\n\r\n\t\treturn(normalized)", "def scale(x, p=2, inplace=False):\n return x / np.linalg.norm(x, ord=p)", "def normalise(values):\n max_value = max(values)\n min_value = min(values)\n factor = 32767.0 / max(max_value, abs(min_value))\n return (int(v * factor) for v in values)", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]\n # pass", "def scaled(values, output_min, output_max, input_min=0, input_max=1):\n values = _normalize(values)\n if input_min >= input_max:\n raise ValueError('input_min must be smaller than input_max')\n input_size = input_max - input_min\n output_size = output_max - output_min\n for v in values:\n yield (((v - input_min) / input_size) * output_size) + output_min", "def unit_scale(x, eps=1e-8):\n\tx = x.copy()\n\tx -= x.min()\n\tx *= 1.0 / (x.max() + eps)\n\treturn x", "def scale(data, factor):\n\n if np.ndim(data) != 2: # only process one IV dataset at a time\n raise IndexError('Incorrect data format')\n\n if np.size(data, 0) < np.size(data, 1):\n data = data.T # make sure data is in columns\n\n # match data types for float multiplication/division\n new_data = data.copy().astype(float)\n\n new_data[:, 1] *= factor\n\n return new_data", "def get_scale_from_linear_transform(A):\n _, _, S = decompose_rws(A)\n return abs(S.a), abs(S.e)", "def scale(self,n,d=1):\r\n\t\t\r\n\t\t# scale all terms\r\n\t\ts = [i.scale(n,d) for i in self]\r\n\t\t\r\n\t\treturn Li(s)", "def scale(self):", "def scale_uniform(self, s: float):\n self.vertices = [v * s for v in self.vertices]\n return self", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]", "def normalize(img):\r\n min = img.min()\r\n max = img.max()\r\n x = 2.0 * (img - min) / (max - min) - 1.0\r\n return x", "def rescale(num, old_min, old_max, new_min, new_max):\n old_range = old_max - old_min\n new_range = new_max - new_min\n new_val = new_min + (((num - old_min) * new_range)/old_range)\n\n return new_val", "def scaleClipl(x):\n x = 0 if x < 0 else x\n x = 1 if x > 1 else x\n return int(round(x*255.))", "def get_linear_value(self, scaled_value):\r\n result = (scaled_value - self.minvalue) / (self.maxvalue - self.minvalue)\r\n if self.logarithmic_factor != 1.0:\r\n result *= exp(self.logarithmic_factor * 4.0) - 1\r\n result = log(result + 1.0) / (4.0 * self.logarithmic_factor)\r\n return result", "def autoscale(self, A):\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)", "def normalize(data, vmin=0, vmax=1):\n data = np.array(data, dtype=np.float64)\n return (vmin + (data - data.min()) * (vmax - vmin) / (data.max() - data.min())).tolist()", "def normalize(A: np.array) -> np.array:\n for i in range(A.shape[1]):\n A[:, i] = (A[:, i] - np.min(A[:, i])) / (np.max(A[:, i] - np.min(A[:, i])))\n return A", "def scale(val, src, dst):\r\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def normalise_0_1(arraylike):\n array_min = np.min(arraylike)\n array_max = np.max(arraylike)\n normalised = (arraylike - array_min) / (array_max - array_min)\n # convert to float\n normalised = np.array(normalised).astype(float)\n return normalised, array_min, array_max", "def scale(x: pd.Series, a: int = 1) -> pd.Series:\n # todo check this implementation is right\n assert isinstance(x.index, pd.MultiIndex)\n return x.groupby(level=0).apply(lambda e: a * e / e.abs().sum())", "def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return", "def _normalize(a: np.ndarray, u: float=0, s: float=1) -> np.ndarray:\n a_norm = (a - np.mean(a)) / (np.std(a) + STABILITY)\n a_rescaled = a_norm * s + u\n\n return a_rescaled", "def normalize_range(array, floor=0, ceil=1):\n scaler = MinMaxScaler(feature_range=(floor, ceil), copy=True)\n return scaler.fit_transform(array)", "def rescale_toa(arr, dtype=np.float32):\n # First look at raw value dists along bands\n\n arr_trans = np.subtract(arr, arr.min(axis=(1, 2))[:, np.newaxis, np.newaxis])\n arr_rs = np.divide(arr_trans, arr_trans.max(axis=(1, 2))[:, np.newaxis, np.newaxis])\n if dtype == np.uint8:\n arr_rs = np.array(arr_rs*255, dtype=np.uint8)\n return arr_rs", "def scale_value(value, ip_range, domain=(0,1)):\n x1, x2 = domain\n y1, y2 = ip_range\n\n assert(y1 <= value <= y2)\n\n m = (x2 - x1)/(y2 - y1)\n b = y1 - m * x1\n return m * value - b", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def normScale( x, y ):\n if x == 0 and y == 0:\n return 0\n else:\n return 1.0 / pow( x*x + y*y, 0.5 )", "def scale(v: InputTensor) -> t.Tensor:\n v = util.to_tensor(v, dtype=t.float32)\n assert len(v.shape) == 1\n return t.diag(t.cat([v, v.new_ones([1])], dim=0))", "def _rescale(x, xlim, ylim):\n m = (ylim[1] - ylim[0]) / (xlim[1] - xlim[0])\n c = ylim[1] - m * xlim[1]\n y = m * x + c\n return y", "def log_scale(self, value: float):\n assert value > 1\n self.__log_scale = value\n self.logarithmic = self.logarithmic", "def scaleto11(val,check=True):\n if type(val) is not list and type(val) is not np.ndarray:\n val = [val]\n if type(val) is list:\n val = np.array(val)\n assert type(val) is np.ndarray\n assert not check or np.all((val==0) + (val==1))\n return val*2-1", "def set_scales(l_min, l_max, N_scales):\r\n t1=1\r\n t2=2\r\n s_min = t1 / l_max\r\n s_max = t2 / l_min\r\n # Scales should be decreasing ... higher j should give larger s\r\n s = np.exp(np.linspace(np.log(s_max), np.log(s_min), N_scales));\r\n\r\n return s", "def scale(X, *, axis=..., with_mean=..., with_std=..., copy=...):\n ...", "def linear(minVal, maxVal, newMin, newMax, value):\n coef = ((float(value) - float(minVal)) * 100) / (float(maxVal) - float(minVal))\n newVal = float(newMin) + ((coef * (float(newMax) - float(newMin))) / 100)\n return newVal", "def scalenans(X) :\n\tXscale = (X - np.nanmean(X)) / np.nanstd(X)\n\treturn Xscale", "def scale(self,factor):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y*factor for y in self.coord[x]])\n return self", "def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))" ]
[ "0.7274082", "0.71151817", "0.7070224", "0.70607144", "0.695351", "0.6909782", "0.6863304", "0.683024", "0.6815376", "0.68095505", "0.6759205", "0.6644985", "0.6604848", "0.65765154", "0.65633816", "0.65503216", "0.6484777", "0.6470204", "0.644784", "0.64431757", "0.64138615", "0.6412299", "0.63703316", "0.6368305", "0.635279", "0.63487786", "0.63365394", "0.63241345", "0.6312529", "0.62925816", "0.6270397", "0.6266954", "0.6264509", "0.62500376", "0.6249947", "0.62447846", "0.6241726", "0.6234611", "0.62227666", "0.6180602", "0.61788595", "0.6173485", "0.6169719", "0.6167993", "0.6158148", "0.6158062", "0.61487454", "0.6131403", "0.611953", "0.61177063", "0.6116741", "0.6101827", "0.6099496", "0.60966784", "0.6077089", "0.60758835", "0.60648346", "0.6055476", "0.6051298", "0.60324603", "0.6030318", "0.6029164", "0.60280526", "0.6024427", "0.60067695", "0.5998509", "0.5995074", "0.5985069", "0.5975842", "0.5975013", "0.5965923", "0.5963612", "0.5950549", "0.5950496", "0.59492475", "0.59462225", "0.5942523", "0.59367305", "0.5930805", "0.5929865", "0.5923911", "0.59142786", "0.5902135", "0.59006417", "0.5896553", "0.5894566", "0.58925635", "0.5891534", "0.5891534", "0.5884551", "0.5883143", "0.58830166", "0.5880381", "0.58752066", "0.5875017", "0.5874347", "0.58740777", "0.5867836", "0.5865118", "0.5852829" ]
0.7821479
0
This is a doc string
def exercise_7(): print('=' * 30) print('Running exercise_7()') #### YOUR CODE HERE #### numpy.random.seed(7) # set the numpy random seed to 7 # This determines how many times we "throw" the # 2 six-sided dice in an experiment num_dice_throws = 10000 # don't edit this! # This determines how many trials in each experiment # ... that is, how many times we'll throw our two # 6-sided dice num_dice_throws times num_trials = 10 # don't edit this! # Yes, you can have functions inside of functions! def run_experiment(): trial_outcomes = list() for trial in range(num_trials): #### YOUR CODE HERE #### doubles = 0 # number of the occurrence of doubles in one trial for throws in range(num_dice_throws): throw_0 = 1 # number of throws in one trial outcome = numpy.random.randint(1,7, size=2) #generate two throws if outcome[0] == outcome[1]: doubles = doubles + 1 #count the number of doubles # In the following, make it so that probability_estimate is an estimate # of the probability of throwing 'doubles' with two fair six-sided dice # (i.e., the probability that the dice end up with teh same values) # based on throwing the two dice num_dice_throws times. probability_estimate = doubles/num_dice_throws # Save the probability estimate for each trial (you don't need to change # this next line) trial_outcomes.append(probability_estimate) trial = trial + 1 return trial_outcomes experiment_outcomes_1 = run_experiment() print(f'experiment_outcomes_1: {experiment_outcomes_1}') print(f'do it again!') experiment_outcomes_2 = run_experiment() print(f'experiment_outcomes_2: {experiment_outcomes_2}') print('Now reset the seed') #### YOUR CODE HERE #### numpy.random.seed(7) # reset the numpy random seed back to 7 experiment_outcomes_3 = run_experiment() print(f'experiment_outcomes_3: {experiment_outcomes_3}') print("DONE exercise_7()") return experiment_outcomes_1, experiment_outcomes_2, experiment_outcomes_3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DocString():\n return", "def doc_string():\n pass # pass does nothing", "def get_doc_string(self) -> str:\n r = \"Undocumented\"\n if self.doc is not None: r = self.doc\n return r", "def raw_doc(self):\n try:\n return str(self.definition.docstr)\n except AttributeError:\n return ''", "def main_docstring():", "def __doc__(self, ???):", "def testSummaryDOCstr(self):\n pass", "def test_doc_representation(self):\n doc_str = \"# This is a comment\\n* One\\nText text\"\n doc = parser.parse(doc_str)\n\n self.assertEqual(str(doc), doc_str)\n\n doc_str = \"Text\\nMore text\\n\\n\\nSome empty lines and text\\n* HL\"\n doc = parser.parse(doc_str)\n\n self.assertEqual(str(doc), doc_str)\n\n doc_str = \"- List one\\n + Slist one\\n + Slist two\"\n doc = parser.parse(doc_str)\n\n self.assertEqual(str(doc), doc_str)", "def doc(self,str):\n d = str.replace('\\n',' ')\n d = d.replace('\\t',' ')\n while d.find(' ') > -1: d = d.replace(' ',' ')\n while d[0] in '\"\\'\\t ': d = d[1:]\n while d[-1] in '\"\\'\\t ': d = d[:-1]\n self.docstr = d", "def __call__(self, doc):\n return doc", "def getdoc(object):\r\n try:\r\n doc = object.__doc__\r\n except AttributeError:\r\n return None\r\n if not isinstance(doc, types.StringTypes):\r\n return None\r\n return cleandoc(doc)", "def documento():\r\n\tpass", "def dummy(doc):\r\n return doc", "def getDoc(self):\r\n return self.__doc__", "def doc(self):\n return \"\\n\".join(self.docLines)", "def docs():", "def test_doc():\n pass", "def fini_doc(self):\n raise NotImplementedError()", "def test__get_doc():\n docstring = util._get_doc(\"midgard\")\n assert isinstance(docstring, str) and len(docstring) > 0", "def func_doc():", "def init_doc(self):\n raise NotImplementedError()", "def docstrings(param1, param2):\n return \"example string\"", "def docstring_hack():\n pass", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def document(self):\n ...", "def test_doc_string():\n # Create fake profiles library by named tuples\n faker_db = session10.create_fake_library_by_namedtuple(10)\n\n assert len(faker_db.__doc__) > 0 , \"Doc string is missing\"", "def __buildDocumentClassDocString():\n\n # build a dictionary of tags and their descriptions, seems a little over\n # the top, but keeps all the information in one place\n tagsStrings = {\n \"comment\" : \"Define the comment string\",\n \"define\" : \"Define the symbol name for #define's\",\n \"info\" : \"Information string, to end up in the 'info' output\",\n \"instance\" : \"Instance name\",\n \"matlabRoot\" : \"Name of variable used by the matlab output\",\n \"members\" : \"List of symbols, which are going to be children of this symbol\",\n \"name\" : \"Name of this symbol\",\n \"size\" : \"Size of this symbol, i.e. indicate it is an array\",\n \"subtype\" : \"Define the actual type of general symbol\",\n \"symbol\" : \"Define a symbol, either a top level entity a child in a members\",\n \"test\" : \"Define the preprocessor test\",\n \"text\" : \"Text to put into a banner symbol\",\n \"title\" : \"Set the overall document title\",\n \"value\" : \"Define a value for this symbol\",\n \"valuesRequired\" : \"Does the enumeration allow automatic value assignment in entries\",\n }\n # build the list of classes\n classes = dict(filter(lambda (k,v): type(v) == types.ClassType, globals().iteritems()))\n (tagsUsed, optionsUsed) = buildKeys(classes)\n\n # build the string we are going to add to the document class\n s = \"Document class that represents the XML document and contains the data.\\n\\n\"\n s += \"Available tags:\\n\"\n\n for tag in tagsStrings:\n try:\n used = \" Required by : %s\\n\" % (\", \".join(tagsUsed[tag]))\n except KeyError:\n used = \"\"\n try:\n opts = \" Optional for: %s\\n\" % (\", \".join(optionsUsed[tag]))\n except KeyError:\n opts = \"\"\n s += \" %s\\n %s\\n %s\\n\\n%s%s\\n\" % (tag, \"-\"*len(tag), tagsStrings[tag], used, opts)\n\n return s", "def pythondoc(self, irc, msg, args, num, req):\n self.googleq('http://docs.python.org/library/', req, num, irc)", "def __repr__(self) -> str:\n return f\"<Doc[{self.desc}]>\"", "def documentation():\n return auto.html()", "def lispdoc(self, irc, msg, args, num, req):\n self.googleq('http://lispdoc.com/', req, num, irc)", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)", "def empty_fn_docstr_pass():\n pass", "def doc(self):\n doc = self.get('doc')\n if doc:\n from .config import defaults\n return defaults.types.doc(doc)", "def short_doc(obj):\n if obj.__doc__:\n lines = obj.__doc__.strip(' \\n').splitlines()\n if lines:\n return lines[0]\n return None", "def test_doc(cls, type_str):\n do_doc_test(cls, type_str)", "def empty_fn_docstr():", "def getdoc(obj):\n try:\n doc = obj.__doc__\n except AttributeError:\n return None\n if not isinstance(doc, str):\n return None\n return inspect.cleandoc(doc)", "def consistent_documentation():\n\n return 3", "def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)", "def magic_pdoc(self, parameter_s=''):\n self._inspect('pdoc',parameter_s)", "def describe_docstring(doc_string, indentation=None):\n text = escape_triple_quotes(doc_string)\n text = u'\"\"\"\\n' + text + '\\n\"\"\"\\n'\n\n if indentation:\n text = indent(text, indentation)\n return text", "def rawDoc(self):\n return self.namespace[\"__doc__\"]", "def doc(obj):\n return Documentation.fromObject(obj).first", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)", "def doc(self):\n try:\n return self.definition.doc\n except AttributeError:\n return self.raw_doc", "def doc(docid):\n\t\n\tdata = {'id':docid,\n\t\t\t'text':\"Some made up text for classification\"}\n\treturn _send(data)", "def get_test_doc(self, name, doc_string=None):\n # PARSE OUT THE DOXYGEN SETTINGS FROM THE STRING\n doc_string = doc_string if doc_string is not None else self._testMethodDoc\n doc_list = [x.strip().replace('\\t', '') for x in doc_string.split('\\n') if len(x.strip().replace('\\t', '')) > 0]\n # CREATE DOC DICT OBJECT\n doc_dict = dict(name=name, title=doc_list[0], bug='', tags=[])\n for val in doc_list[1:]:\n if '@test' in val:\n doc_dict['test'] = val[5:]\n elif '@arg' in val:\n doc_dict['tags'] = [x.lower() for x in val[4:].split()]\n elif '@attention' in val:\n doc_dict['priority'] = val[10:]\n elif '@author' in val:\n doc_dict['author'] = val[7:]\n elif '@date' in val:\n doc_dict['date'] = val[5:]\n elif '@bug' in val:\n doc_dict['bug'] = val[4:].strip()\n elif '@version' in val:\n doc_dict['version'] = int(val[8:].replace(\".\", \"\"))\n elif '@deprecated' in val:\n doc_dict['deprecated'] = True\n elif '@note' in val:\n env_result = re.findall('ENV=(\\w+)', val.upper())\n if len(env_result) > 0:\n doc_dict['datacenters'] = env_result\n env_not = re.findall('ENV!=(\\w+)', val.upper())\n if len(env_not) > 0:\n doc_dict['no_environment'] = env_not\n priority = re.findall('priority=(\\d)', val.lower())\n if len(priority) > 0:\n doc_dict['priority'] = priority[0]\n else:\n doc_dict['note'] = val[5:]\n # CHECK IF TITLE AND NOTE NOT IN DOC_DICT\n if 'title' not in doc_dict:\n doc_dict['Title'] = 'TEST DOCUMENTATION DOES NOT INCLUDE THE TEST TITLE'\n if 'test' not in doc_dict:\n doc_dict['Test'] = 'TEST DOCUMENTATION DOES NOT INCLUDE TEST DESCRIPTION'\n self.doc_dict = doc_dict", "def triple_quote_docs():\n return", "def get_documentation(path=\"\"):\n return \"\"\"<HTML><head><title>Python Minidoc for \"\"\"+path+\"\"\"</title></head>\n <body>\n \"\"\"+get_documentation_body(path)+\"\"\"\n </body></html>\"\"\"", "def beehive_make_doc(self):\n run_data = {\n u'tags':[u'doc'],\n u'local_package_path':self.local_package_path\n } \n self.ansible_playbook(u'docs', run_data, \n playbook=self.beehive_doc_playbook)", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def documentation(self) -> str:\n return pulumi.get(self, \"documentation\")", "def get_docstring(self, s, base=None):\n if base is not None:\n self.params[base] = s\n return s", "def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal", "def name(self):\n return self.__doc__.split('\\n')[0]", "def inherits_doc():\n pass", "def get_doc(filename: str) -> str:\n\n # Create the header.\n doc = \"# `\" + filename.split(\"/\")[-1] + \"`\\n\\n\"\n\n lines: List[str] = Path(filename).read_text().split(\"\\n\")\n\n for i in range(len(lines)):\n # Create a class description.\n if lines[i].startswith(\"class\"):\n # Skip private classes.\n match = re.search(\"class _(.*):\", lines[i])\n if match is not None:\n continue\n # Add the name of the class\n class_name = re.search(\"class (.*):\", lines[i]).group(1)\n doc += f\"## `{class_name}`\\n\\n\"\n # Add an example.\n class_example = f\"`from tdw.{filename[:-3].replace('/', '.')} import \" + re.sub(r\"(.*)\\((.*)\\)\", r'\\1',\n class_name) + \"`\"\n doc += class_example + \"\\n\\n\"\n doc += PyDocGen.get_class_description(lines, i)\n # Parse an enum.\n if re.search(r\"class (.*)\\(Enum\\):\", lines[i]) is not None:\n doc += \"\\n\\n\" + PyDocGen.get_enum_values(lines, i)\n doc += \"\\n\\n***\\n\\n\"\n # Create a function description.\n elif lines[i].strip().startswith(\"def\"):\n # Skip private functions.\n match = re.search(\"def _(.*)\", lines[i])\n if match is not None and \"__init__\" not in lines[i]:\n continue\n # Append the function description.\n doc += PyDocGen.get_function_documentation(lines, i) + \"\\n\\n***\\n\\n\"\n\n # Move the \"main class\" to the top of the document.\n main_class_name = ''.join(x.capitalize() or '_' for x in filename[:-3].split('_'))\n main_class = re.search(\"(## `\" + main_class_name + \"`((.|\\n)*))\", doc)\n if main_class is not None:\n main_class = main_class.group(1)\n doc_header = re.search(\"(.*)\\n\\n\", doc).group(0)\n doc_temp = doc.replace(main_class, \"\").replace(doc_header, \"\")\n doc = doc_header + main_class + doc_temp\n\n return doc", "def docType():\n return (u'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'\n u'<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 '\n u'Transitional//EN\" '\n u'\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\\n')", "def doDocStrings(parentNode, srcNode):\n def makeDocElement(name, content):\n node = libxml2.newNode(name)\n node.addChild(libxml2.newText(content))\n return node\n \n autodoc = getAttr(srcNode, \"python_autodoc\")\n docstr = getAttr(srcNode, \"feature_docstring\")\n if autodoc:\n parentNode.addChild(makeDocElement(\"autodoc\", autodoc))\n if docstr:\n parentNode.addChild(makeDocElement(\"docstring\", docstr))", "def get_documentation(self, *args, **dargs):\n pass", "def get_main_help(self):\r\n return __doc__.strip()", "def phpdoc(self, irc, msg, args, num, req):\n self.googleq('http://php.net/manual/en/', req, num, irc)", "def test_class_docstrings(self):\n self.assertGreater(len(self.storage.__doc__), 1)", "def _add_doc(func, doc):\n func.__doc__ = doc", "def _parse_docstring(doc):\n _cache_key = doc\n try:\n return _parse_docstring_cache[_cache_key]\n except KeyError:\n pass\n\n if doc is None:\n return _Doc('', '', {})\n\n # Convert Google- or Numpy-style docstrings to RST.\n # (Should do nothing if not in either style.)\n doc = str(GoogleDocstring(doc))\n doc = str(NumpyDocstring(doc))\n\n tree = publish_doctree(doc)\n\n class Visitor(NodeVisitor):\n optional = [\n 'document', 'docinfo',\n 'field_list', 'field_body',\n 'literal', 'problematic']\n\n def __init__(self, document):\n NodeVisitor.__init__(self, document)\n self.paragraphs = []\n self.start_lines = []\n self.params = defaultdict(dict)\n self._current_paragraph = None\n self._indent_iterator_stack = []\n self._indent_stack = []\n\n def _do_nothing(self, node):\n pass\n\n def visit_paragraph(self, node):\n self.start_lines.append(node.line)\n self._current_paragraph = []\n\n def depart_paragraph(self, node):\n text = ''.join(self._current_paragraph)\n text = ''.join(self._indent_stack) + text\n self._indent_stack = [\n ' ' * len(item) for item in self._indent_stack]\n text = text.replace('\\n', '\\n' + ''.join(self._indent_stack))\n self.paragraphs.append(text)\n self._current_paragraph = None\n\n def visit_Text(self, node):\n self._current_paragraph.append(node)\n\n depart_Text = _do_nothing\n\n def visit_emphasis(self, node):\n self._current_paragraph.append('\\033[3m') # *foo*: italic\n\n def visit_strong(self, node):\n self._current_paragraph.append('\\033[1m') # **foo**: bold\n\n def visit_title_reference(self, node):\n self._current_paragraph.append('\\033[4m') # `foo`: underlined\n\n def _depart_markup(self, node):\n self._current_paragraph.append('\\033[0m')\n\n depart_emphasis = depart_strong = depart_title_reference = \\\n _depart_markup\n\n def visit_literal_block(self, node):\n text, = node\n self.start_lines.append(node.line)\n self.paragraphs.append(re.sub('^|\\n', r'\\g<0> ', text)) # indent\n raise SkipNode\n\n def visit_bullet_list(self, node):\n self._indent_iterator_stack.append(\n (node['bullet'] + ' ' for _ in range(len(node))))\n\n def depart_bullet_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_enumerated_list(self, node):\n enumtype = node['enumtype']\n fmt = {('(', ')'): 'parens',\n ('', ')'): 'rparen',\n ('', '.'): 'period'}[node['prefix'], node['suffix']]\n try:\n start = node['start']\n except KeyError:\n start = 1\n else:\n start = {\n 'arabic': int,\n 'loweralpha': lambda s: ord(s) - ord('a') + 1,\n 'upperalpha': lambda s: ord(s) - ord('A') + 1,\n 'lowerroman': lambda s: roman.fromRoman(s.upper()),\n 'upperroman': lambda s: roman.fromRoman(s),\n }[enumtype](start)\n enumerators = [Body(None).make_enumerator(i, enumtype, fmt)[0]\n for i in range(start, start + len(node))]\n width = max(map(len, enumerators))\n enumerators = [enum.ljust(width) for enum in enumerators]\n self._indent_iterator_stack.append(iter(enumerators))\n\n def depart_enumerated_list(self, node):\n self._indent_iterator_stack.pop()\n\n def visit_list_item(self, node):\n self._indent_stack.append(next(self._indent_iterator_stack[-1]))\n\n def depart_list_item(self, node):\n self._indent_stack.pop()\n\n def visit_field(self, node):\n field_name_node, field_body_node = node\n field_name, = field_name_node\n parts = field_name.split()\n if len(parts) == 2:\n doctype, name = parts\n elif len(parts) == 3:\n doctype, type_, name = parts\n if doctype not in _PARAM_TYPES:\n raise SkipNode\n if 'type' in self.params[name]:\n raise ValueError('type defined twice for {}'.format(name))\n self.params[name]['type'] = type_\n else:\n raise SkipNode\n if doctype in _PARAM_TYPES:\n doctype = 'param'\n if doctype in _TYPE_NAMES:\n doctype = 'type'\n if doctype in self.params[name]:\n raise ValueError('{} defined twice for {}'.format(doctype, name))\n visitor = Visitor(self.document)\n field_body_node.walkabout(visitor)\n self.params[name][doctype] = ''.join(visitor.paragraphs)\n raise SkipNode\n\n def visit_comment(self, node):\n raise SkipNode\n\n def visit_system_message(self, node):\n raise SkipNode\n\n visitor = Visitor(tree)\n tree.walkabout(visitor)\n\n tuples = {name: _Param(values.get('param'), values.get('type'))\n for name, values in visitor.params.items()}\n if visitor.paragraphs:\n text = []\n for start, paragraph, next_start in zip(\n visitor.start_lines,\n visitor.paragraphs,\n visitor.start_lines[1:] + [0]):\n text.append(paragraph)\n # We insert a space before each newline to prevent argparse\n # from stripping consecutive newlines down to just two\n # (http://bugs.python.org/issue31330).\n text.append(' \\n' * (next_start - start - paragraph.count('\\n')))\n parsed = _Doc('', ''.join(text), tuples)\n else:\n parsed = _Doc('', '', tuples)\n _parse_docstring_cache[_cache_key] = parsed\n return parsed", "def has_doc() -> None:", "def __init__(self, doc_string: ast3.Expr, doc_type: Type) -> None:\n self._doc = doc_string\n self.value = doc_string.value.s.strip() # type: ignore\n desc = Tags.DESC.regex.search(self.value)\n ret = Tags.RETURN.regex.search(self.value)\n cc = Tags.CC.regex.search(self.value) # pylint: disable=invalid-name\n author = Tags.AUTHOR.regex.search(self.value)\n todo = Tags.TODO.regex.search(self.value)\n\n self.no_lint = bool(Tags.NO_LINT.regex.search(self.value))\n self.no_doc = bool(Tags.NO_DOC.regex.search(self.value))\n self.todo = todo[1] if todo else \"\"\n\n self.desc = desc[1] if desc else \"\"\n self.args = {\n x: y\n for x, y in Tags.ARG.regex.findall(self.value)\n if x not in DEFAULT_ARG_IGNORE\n }\n self.links = Tags.LINK.regex.findall(self.value)\n self.ret = ret[1] if ret else \"\"\n self.author = author[1] if author else \"\"\n self.cc = int(cc[1] if cc else -1) # pylint: disable=invalid-name\n\n self.notes = Tags.NOTE.regex.findall(self.value)\n self.warnings = Tags.WARN.regex.findall(self.value)", "def docstring(self, docstring): # type: (str) -> None\n self._tmp_docstring = inspect.cleandoc(docstring)", "def documentation():\n return render_template('help.html')", "def test_docstring(self):\n self.assertTrue(len(BaseModel.__doc__) > 1)\n self.assertTrue(len(BaseModel.__init__.__doc__) > 1)\n self.assertTrue(len(BaseModel.__str__.__doc__) > 1)\n self.assertTrue(len(BaseModel.save.__doc__) > 1)\n self.assertTrue(len(BaseModel.to_dict.__doc__) > 1)", "def get_doc(self) -> Documentation:\n r : Documentation = [self.get_doc_string()]\n r_src = \"\"\n if hasattr(self,\"_path\"): r_src += \"locally at '%s'\" % (str(self._path))\n if self.url is not None: r_src += \" remote url(orig) '%s'\" % (self.url)\n r_src += \" remote url(parsed) '%s'\" % (self.git_url.as_string())\n if self.branch is not None: r_src += \" branch '%s'\" % (self.branch)\n r.append(r_src)\n r_stages = []\n for (sn,s) in self.stages.items():\n r_stages.append(sn)\n pass\n r_stages.sort()\n if len(r_stages)>0:\n r.append(\"Stages: %s\"%(\" \".join(r_stages)))\n pass\n return r", "def _get_doc(self, name):\r\n doc = \"No documentation for %s\" % name\r\n\r\n engine = self._engine\r\n if not engine:\r\n msg = \"Session is not open\"\r\n raise Oct2PyError(msg)\r\n doc = engine.eval('help(\"%s\")' % name, silent=True)\r\n\r\n if \"syntax error:\" in doc.lower():\r\n raise Oct2PyError(doc)\r\n\r\n if \"error:\" in doc.lower():\r\n doc = engine.eval('type(\"%s\")' % name, silent=True)\r\n doc = \"\\n\".join(doc.splitlines()[:3])\r\n\r\n default = self.feval.__doc__\r\n default = \" \" + default[default.find(\"func_args:\") :] # type:ignore\r\n default = \"\\n\".join([line[8:] for line in default.splitlines()])\r\n\r\n doc = \"\\n\".join(doc.splitlines())\r\n doc = \"\\n\" + doc + \"\\n\\nParameters\\n----------\\n\" + default\r\n doc += \"\\n**kwargs - Deprecated keyword arguments\\n\\n\"\r\n doc += \"Notes\\n-----\\n\"\r\n doc += \"Keyword arguments to dynamic functions are deprecated.\\n\"\r\n doc += \"The `plot_*` kwargs will be ignored, but the rest will\\n\"\r\n doc += \"used as key - value pairs as in version 3.x.\\n\"\r\n doc += \"Use `set_plot_settings()` for plot settings, and use\\n\"\r\n doc += \"`func_args` directly for key - value pairs.\"\r\n return doc", "def test_doc_file(self):\n expected = '\\nAmenity Class from Models Module\\n'\n actual = models.amenity.__doc__\n self.assertEqual(expected, actual)", "def test(self):\n self.note(\"Test Note\", \"\"\" This is a note.\nsecond line\"\"\", \"date\")", "def get_doc(cls_or_func):\n try:\n return cls_or_func.__doc__.split(\"\\n\")[0].strip()\n except (AttributeError, IndexError):\n return None", "def get_description(self):\n return re.sub('\\n\\W+',' ', self.__doc__)", "def parse_docs(docs):\n if not docs:\n return __name__, \"<no documentation>\"\n docs = docs.strip().split('\\n')\n for i, line in enumerate(docs):\n docs[i] = line.strip()\n return docs[0], ' '.join(docs[1:]) if len(docs[1:]) else \"<no documentation>\"", "def shortDescription(self):\n doc = self._testMethodDoc\n\n if doc is not None:\n doc = doc.split('\\n\\n', 1)[0]\n doc = self.ws_re.sub(' ', doc).strip()\n\n return doc", "def shortDescription(self):\n doc = self._testMethodDoc\n\n if doc is not None:\n doc = doc.split('\\n\\n', 1)[0]\n doc = self.ws_re.sub(' ', doc).strip()\n\n return doc", "def syntax_text():", "def test_doc2(self):\n assert Review.__doc__ is not None", "def update_docstring(instance):\n try:\n docstring = instance.api_map['doc']\n except (KeyError, TypeError):\n docstring = 'No docstring provided.'\n\n instance.__class__.__doc__ = docstring\n instance.__class__.__call__.__signature__ = construct_signature(instance)\n\n return docstring", "def description(self):\n return (self.__doc__ or \"\").strip()", "def input_file_docx(str_write, str_answer):\r\n paragraph = dti.add_paragraph(str_write)\r\n paragraph_format = paragraph.paragraph_format\r\n paragraph_format.space_after = Pt(1.0)\r\n\r\n paragraph = dti1.add_paragraph(str_answer)\r\n paragraph_format = paragraph.paragraph_format\r\n paragraph_format.space_after = Pt(1.0)", "def get_doc(self) -> str:\n if self.soup is not None:\n root = self.soup.contents[0]\n body = self.get_paragraph(root.find(\"abstract\", recursive=False))\n body += self.get_paragraph(root.find(\"discussion\", recursive=False))\n return body\n\n return self.doc", "def guess(cls, docstring):", "def text(self) -> str:", "def test_docstrings(self):\n self.assertEqual(len(Rectangle.__doc__) > 0, True)\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def test_docstring(self):\n self.assertIsNotNone(Place.__doc__)", "def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0", "def test_docstring(self):\n self.assertTrue(len(City.__doc__) > 1)\n self.assertTrue(len(City.__init__.__doc__) > 1)\n self.assertTrue(len(City.__str__.__doc__) > 1)\n self.assertTrue(len(City.save.__doc__) > 1)\n self.assertTrue(len(City.to_dict.__doc__) > 1)", "def doc_sub(*sub):\n def dec(obj):\n obj.__doc__ = obj.__doc__.format(*sub)\n return obj\n return dec", "def doc(update: Update, context: CallbackContext):\n language_code = update.effective_user.language_code\n args = context.args\n if_admin = database.get_user_attr('admin', user_id=update.effective_user.id)\n if len(args) > 2:\n text = get_text('quantity_error_doc_text', language_code).text()\n else:\n if len(args) == 0:\n text = get_text('doc_text', language_code).text({'command': consts.ALL, 'admin': if_admin})\n else:\n if args[0] not in consts.DOC_COMMANDS:\n text = get_text('wrong_command_error_doc_text', language_code).text()\n else:\n text = get_text('doc_text', language_code).text({'command': args[0], 'admin': if_admin})\n if not if_admin and args[0] == 'admin':\n text += get_text('doc_unavailable_text', language_code).text()\n cf.send_message(\n context=context,\n chat_id=update.effective_chat.id,\n text=text,\n )", "def documentation_only():\n pass", "def doc(caesar, input):\n name = input.group(1)\n name = name.lower()\n\n if caesar.doc.has_key(name): \n caesar.reply(caesar.doc[name][0])\n if caesar.doc[name][1]: \n caesar.say('e.g. ' + caesar.doc[name][1])", "def savedoc():\r\n document.save('QSDoc_{0}_{1}_{2}_{3}.docx'.format(args.server, year, month, day))" ]
[ "0.8623083", "0.8082154", "0.74954015", "0.7324442", "0.7286685", "0.72765845", "0.72732335", "0.71542895", "0.71460176", "0.7124491", "0.70944595", "0.70730394", "0.7053277", "0.70136875", "0.6961591", "0.696057", "0.69478667", "0.6903653", "0.69020563", "0.6898957", "0.6898941", "0.68979573", "0.6855355", "0.68271476", "0.68271476", "0.67869145", "0.67492497", "0.6719158", "0.6686487", "0.6671741", "0.66692626", "0.66464734", "0.66371596", "0.6627773", "0.6626095", "0.66245264", "0.66149414", "0.6577798", "0.6528557", "0.65067947", "0.649335", "0.6474563", "0.64562136", "0.6440753", "0.6430745", "0.64260274", "0.6406084", "0.63914245", "0.6384251", "0.63463575", "0.6335461", "0.6335055", "0.6331765", "0.6331765", "0.6331765", "0.6324966", "0.631142", "0.6306112", "0.6284417", "0.62823194", "0.62773734", "0.6271403", "0.62598306", "0.6248969", "0.62455404", "0.623843", "0.62263364", "0.6224899", "0.6218198", "0.6214493", "0.620757", "0.6192332", "0.61721325", "0.61632824", "0.61608875", "0.61603737", "0.6159948", "0.61525667", "0.61217046", "0.6116812", "0.6078722", "0.6063606", "0.60604286", "0.60604286", "0.60502553", "0.60415053", "0.60354996", "0.6035146", "0.60278505", "0.60234624", "0.600299", "0.5999775", "0.59984374", "0.59900844", "0.59877753", "0.59859943", "0.5985901", "0.5981783", "0.5979809", "0.59788585", "0.5968372" ]
0.0
-1
Random vectors and matrices, and some linear algebra operations
def exercise_8(): print("=" * 30) print("Running exercise_8()") #### YOUR CODE HERE #### numpy.random.seed(seed= 7) # set the numpy random seed to 7 #### YOUR CODE HERE #### # Set x to a 2-d array of random number of shape (3, 1) x = numpy.random.rand(3, 1) print(f'x:\n{x}') #### YOUR CODE HERE #### # Set 7 to a 2-d array of random number of shape (3, 1) y = numpy.random.rand(3,1) print(f'y:\n{y}') #### YOUR CODE HERE #### # Calclate the sum of x and y v1 = x + y print(f'v1:\n{v1}') #### YOUR CODE HERE #### # Calclate the sum of x and y v2 = numpy.multiply(x, y) print(f'v2:\n{v2}') #### YOUR CODE HERE #### # Transpose x xT = numpy.transpose(x) print(f'xT: {xT}') #### YOUR CODE HERE #### # Calculate the dot product of x and y v3 = numpy.dot(xT, y) print(f'v3: {v3}') #### YOUR CODE HERE #### # Set A to a 2-d array of random numbers of shape (3, 3) A = numpy.random.rand(3,3) print(f'A:\n{A}') #### YOUR CODE HERE #### # Compute the dot product of x-transpose with A v4 = numpy.dot(xT, A) print(f'v4: {v4}') #### YOUR CODE HERE #### # Compute the dot product of x-transpose with A and the product with y v5 = numpy.dot(v4, y) print(f'v5: {v5}') #### YOUR CODE HERE #### # Compute the inverse of A v6 = numpy.linalg.inv(A) print(f'v6:\n{v6}') #### YOUR CODE HERE #### # Compute the dot product of A with its inverse. # Should be near identity (save for some numerical error) v7 = numpy.dot(v6, A) print(f'v7:\n{v7}') return x, y, v1, v2, xT, v3, A, v4, v5, v6, v7
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_norm_vector():\n random_state = np.random.RandomState(0)\n for n in range(1, 6):\n v = pr.random_vector(random_state, n)\n u = pr.norm_vector(v)\n assert_almost_equal(np.linalg.norm(u), 1)", "def random_matrix():\n # Initialize random angles\n theta1 = np.random.rand() * 360\n theta2 = np.random.rand() * 360\n theta3 = np.random.rand() * 360\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glPushMatrix()\n glRotatef(theta1, 1.0, 0.0, 0.0)\n glRotatef(theta2, 0.0, 1.0, 0.0)\n glRotatef(theta3, 0.0, 0.0, 1.0)\n matrix = glGetDoublev(GL_MODELVIEW_MATRIX)\n glPopMatrix()\n glPopMatrix()\n return matrix", "def exercise10():\n#Setting the random seed to 5 and generating 2 arrays to do a series of operations on them\n np.random.seed(seed=5)\n a = np.random.rand(3,1)\n b = np.random.rand(3,1)\n print(\"a= \",a)\n print(\"b= \", b)\n print(\"a+b= \",a+b)\n print(\"Hadamard product= \", np.multiply(a , b))\n print(\"dot-product= \",np.dot(a.T, b))\n\n#10c:Setting the random seed to 2 and generating a matrix to do a series of operations on them\n np.random.seed(seed=2)\n arrX = np.random.rand(3,3)\n X= np.matrix(arrX)\n print(\"X= \",X)\n print(\"aT X= \",np.dot(a.T, X))\n print(\"aT X b= \",np.dot(np.dot(a.T, X), b))\n print(\"X inverse= \", X.I)", "def rand_unit_vect_3D():\n xyz = np.random.normal(size=3)\n mag = sum(i**2 for i in xyz) ** .5\n return xyz / mag", "def set_random_vector(self):\n self.vector = vu.create_dense_random_vector(dimension)", "def sample_utility(n, model, alpha, beta, bmax):\n A, b = matrix(0.0, (n,n)), matrix(ra.uniform(0,bmax,(n,1)))\n \n if model == 1: A = matrix(ra.uniform(0,beta,(n,n)))\n \n if model == 2:\n for i in range(n):\n for j in range(n/2):\n A[i, int(np.mod(i+j+1,n))] = beta**(j+1)\n A[i, int(np.mod(i-(j+1),n))] = beta**(j+1)\n \n if model == 3: A = 0.5*matrix(ra.binomial(1,beta,(n,n)))\n \n for i in range(n): A[i,i] = 1.0\n \n return Utility((alpha*A,b), 'sqrt')", "def test_from_two_vectors(self):\r\n for _ in range(20):\r\n v0 = np.random.randn(3)\r\n v1 = np.random.randn(3)\r\n v0 /= np.linalg.norm(v0)\r\n v1 /= np.linalg.norm(v1)\r\n\r\n q = from_two_vectors(v0, v1)\r\n R = to_rotation(q)\r\n\r\n zero_vec = R @ v0 - v1\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n q_inv = from_two_vectors(v1, v0)\r\n R_inv = to_rotation(q_inv)\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def randomSO3():\n\tu1 = random.random()\n\tu2 = random.random()\n\tu3 = random.random()\n\tR = array([[cos(2*pi*u1), sin(2*pi*u1), 0], [-sin(2*pi*u1), cos(2*pi*u1), 0], [0, 0, 1]])\n\tv = array([cos(2*pi*u2)*sqrt(u3), sin(2*pi*u2)*sqrt(u3), sqrt(1-u3)])\n\tH = identity(3)-2*v*transpose([v])\n\t#print \"v\", v\n\t#print \"vvT\", v*transpose([v])\n\t#print \"H\", H\n\t#print linalg.det(R), linalg.det(H)\n\t#print H, v * transpose([v])\n\treturn - dot(H, R)", "def test_random_sphere_vector():\n\ttest_vector = o_gen_instance.generate_random_sphere_vector()\n\tassert isinstance(test_vector, np.ndarray)\n\tassert test_vector.shape == (3,)\n\tfor component in test_vector:\n\t\tassert component != 0.\n\tassert np.isclose(np.linalg.norm(test_vector), 1.0)", "def get_random_vec():\n return [random.gauss(GENERATING_MEAN, GENERATING_DEVIATION) for _ in range(VECTOR_SIZE)]", "def random_vec(self, rand):\n return array([rand.uniform(*c) for c in self.constraints])", "def _make_random_matrix(self, n_components, n_features):", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def randvec(self, X):\n raise NotImplementedError", "def test_vector_projection_on_zero_vector():\n random_state = np.random.RandomState(23)\n for _ in range(5):\n a = pr.random_vector(random_state, 3)\n a_on_b = pr.vector_projection(a, np.zeros(3))\n assert_array_almost_equal(a_on_b, np.zeros(3))", "def random_vector():\n\n import numpy as np\n\n zeta = np.random.rand(2) # Two uniformly sampled random numbers in range (0,1)\n c = 2.0*zeta[0] - 1.0 # Random cos(theta) uniformly sampled in range (-1,+1)\n if c >= 1.0: # Guard against very small chance of roundoff error\n s = 0.0 # Set sin(theta) to zero\n else:\n s = np.sqrt(1.0-c**2) # Calculate sin(theta) from cos(theta), always positive\n\n phi = zeta[1] * 2.0*np.pi # Random angle uniformly sampled in range (0,2*pi)\n\n return np.array ( ( s*np.cos(phi), s*np.sin(phi), c ), dtype=np.float_ ) # Random unit vector", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.normal((2,2), -1, 2))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.normal(-1, 2, size=(2,2))\r\n numpy_val1 = rng.normal(-1, 2, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def get_random_vectors(nr_of_vectors, strategy):\n\n if strategy == 'orthonormal':\n array = ortho_group.rvs(nr_of_vectors)\n elif strategy == 'random_normal':\n array = np.zeros((nr_of_vectors, nr_of_vectors))\n for i in range(nr_of_vectors):\n array[i] = np.random.normal(0, 1, nr_of_vectors)\n else:\n raise Exception('Wrong random vector generation strategy')\n\n return array", "def univariate_dlm_simulation(F,G,W,v,initial_state,n,T):\n \n ZEROS = np.zeros(n)\n \n emissions = np.zeros([T,1])\n state = np.zeros([T,n])\n \n state[0] = initial_state\n emissions[0] = F.dot(initial_state) + np.random.normal(loc = 0.0,scale = v)\n \n for t in range(T):\n state[t] = G.dot(state[t-1]) + np.random.multivariate_normal(ZEROS,W)\n emissions[t] = F.dot(state[t]) + np.random.normal(0.0, v)\n \n return state,emissions", "def gen_vector(size):\n solution = []\n for i in range(size):\n rand_num = uniform(-size, size)\n solution.append(rand_num)\n return np.array(solution)", "def vrand(self,v):\n vtemp = np.random.randn(v.size)\n return vtemp.reshape(v.shape)", "def test_tensor_math_ops(free_alg):\n\n dr = free_alg\n p = dr.names\n r = p.R\n v = p.v\n w = Vec('w')\n x = IndexedBase('x')\n i, j, k = p.R_dumms[:3]\n a = sympify('a')\n\n v1 = dr.sum((i, r), x[i] * v[i])\n w1 = dr.sum((i, r), x[i] * w[i])\n assert v1.n_terms == 1\n assert w1.n_terms == 1\n\n v1_neg = -v1\n assert v1_neg == dr.sum((i, r), -x[i] * v[i])\n\n v1_1 = v1 + 2\n assert v1_1.n_terms == 2\n assert v1_1 == 2 + v1\n\n w1_1 = w1 + a\n assert w1_1.n_terms == 2\n assert w1_1 == a + w1\n\n prod = v1_1 * w1_1\n # Test scalar multiplication here as well.\n expected = (\n 2 * a + a * v1 + 2 * w1 +\n dr.sum((i, r), (j, r), x[i] * x[j] * v[i] * w[j])\n )\n assert prod.simplify() == expected.simplify()\n\n # Test the commutator operation.\n comm_v1v1 = v1 | v1\n assert comm_v1v1.simplify() == 0\n # Here the tensor subtraction can also be tested.\n comm_v1w1 = v1 | w1\n expected = (\n dr.sum((i, r), (j, r), x[i] * x[j] * v[i] * w[j]) -\n dr.sum((i, r), (j, r), x[j] * x[i] * w[i] * v[j])\n )\n assert comm_v1w1.simplify() == expected.simplify()\n\n alpha = symbols('alpha')\n assert alpha not in v1.free_vars\n tensor = v1 / alpha\n assert tensor.n_terms == 1\n terms = tensor.local_terms\n assert len(terms) == 1\n term = terms[0]\n assert term.sums == ((i, r),)\n assert term.amp == x[i] / alpha\n assert term.vecs == (v[i],)\n assert alpha in tensor.free_vars", "def getLinearizedMatrices(model_type: ModelType, operating_point, Vf_op, Vb_op):\n\n p_op, e_op, lamb_op, dp_op, de_op, dlamb_op = operating_point\n\n # Vf_op, Vb_op = compute_feed_forward_flatness(e_and_derivatives, lambda_and_derivatives)\n Vs_op = Vf_op + Vb_op\n Vd_op = Vf_op - Vb_op\n\n if model_type == ModelType.EASY:\n A = np.array([[0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0],\n [-L3 * Vs_op * sin(p_op) / Je_static, -L2 * sin(e_op) / Je_static, 0, 0, 0, 0],\n [L4 * Vs_op * cos(p_op) * cos(e_op) / Jl_static, 0, 0, 0, 0, 0]])\n elif model_type == ModelType.FRICTION:\n A = np.array([[0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, -mc.d_p / Jp_static, 0, 0],\n [-L3 * Vs_op * sin(p_op) / Je_static, -L2 * sin(e_op) / Je_static, 0, 0, -mc.d_e / Je_static, 0],\n [L4 * Vs_op * cos(p_op) * cos(e_op) / Jl_static, -L4 * Vs_op * sin(p_op) * sin(e_op) / Jl_static, 0, 0, 0, -mc.d_l / Jl_static]])\n elif model_type == ModelType.CENTRIPETAL:\n A = np.array([[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1],\n [-(de_op ** 2 - dlamb_op ** 2 * cos(e_op) ** 2) * sin(p_op) ** 2 + (de_op ** 2 - dlamb_op ** 2 * cos(e_op) ** 2) * cos(p_op) ** 2, 2 * dlamb_op ** 2 * sin(p_op) * sin(e_op) * cos(p_op) * cos(e_op), 0, -mc.d_p / Jp_static, 2 * de_op * sin(p_op) * cos(p_op), -2 * dlamb_op * sin(p_op) * cos(p_op) * cos(e_op) ** 2],\n [-L3 * Vs_op * sin(p_op) / Je_static, dlamb_op ** 2 * sin(e_op) ** 2 - dlamb_op ** 2 * cos(e_op) ** 2 - L2 * sin(e_op) / Je_static, 0, 0, -mc.d_e / Je_static, -2 * dlamb_op * sin(e_op) * cos(e_op)],\n [L4 * Vs_op * cos(p_op) * cos(e_op) / Jl_static, -L4 * Vs_op * sin(p_op) * sin(e_op) / Jl_static, 0, 0, 0, -mc.d_l / Jl_static]])\n\n B = np.array([[0, 0],\n [0, 0],\n [0, 0],\n [L1 / Jp_static, -L1 / Jp_static],\n [L3 / Je_static * cos(p_op), L3 / Je_static * cos(p_op)],\n [L4 * sin(p_op) * cos(e_op) / Jl_static, L4 * sin(p_op) * cos(e_op) / Jl_static]])\n\n return A, B, Vf_op, Vb_op", "def coef_random(an,bn,random_trun_start=0,random_start=1,random_end= 32, halfwidth0=1,pow=-1):\n\n an=np.asarray(an)\n bn=np.asarray(bn)\n half=halfcube(random_start,random_end,halfwidth0,pow)\n an_random=half*np.random.uniform(-1,1,(random_end-random_start,))\n bn_random=half*np.random.uniform(-1,1,(random_end-random_start,))\n\n an_random=np.append(np.zeros(random_trun_start-0),an_random)\n bn_random=np.append(np.zeros(random_trun_start-0),bn_random)\n\n if an.shape[0]>an_random.shape[0]:\n an_random.resize(an.shape)\n bn_random.resize(bn.shape)\n else:\n an.resize(an_random.shape)\n bn.resize(bn_random.shape)\n an_random=an+an_random\n bn_random=bn+bn_random\n\n return an_random,bn_random", "def generate_x(number_dimensions, T_train, T_test, mu, feature_model):\n number_training_obeservations = T_train.shape[0]\n number_testing_obeservations = T_test.shape[0]\n\n X_train = np.zeros((number_training_obeservations,number_dimensions))\n X_test = np.zeros((number_testing_obeservations,number_dimensions))\n\n mixture_indicator_train = generate_mixture_indicator(number_training_obeservations)\n mixture_indicator_test = generate_mixture_indicator(number_testing_obeservations)\n\n G = np.random.normal(0,1,(number_dimensions,number_dimensions))\n q, r = np.linalg.qr(G)\n\n mu1 = mu*np.ones(number_dimensions)\n mu2 = -mu*np.ones(number_dimensions)\n\n if feature_model == \"A\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@lambda1@q.T\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n\n\n elif feature_model == \"B\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@lambda1@q.T\n\n eigenvalues2 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues2 = np.sort(eigenvalues2, axis = 0)[::-1]/np.sum(eigenvalues2)\n lambda2 = np.identity(number_dimensions)\n np.fill_diagonal(lambda2,eigenvalues2)\n cov2 = q@lambda2@q.T\n\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n\n train_mean = np.mean(X_train, axis = 0)\n train_std = np.std(X_train, axis = 0)\n X_train = (X_train - train_mean)/train_std\n X_test = (X_test - train_mean)/train_std\n \n return X_train, X_test", "def random():\r\n return R.NextDouble()", "def generate_random_MT_Lune_samp():\n # 1. Get randomly varied Lune parameters (gamma and delta):\n # Define U rotation matrix (See Tape and Tape 2012/2013):\n U_rot_matrix = (1./np.sqrt(6))*np.vstack(([np.sqrt(3.),0.,-np.sqrt(3.)],[-1.,2.,-1.], [np.sqrt(2.),np.sqrt(2.),np.sqrt(2.)]))\n # Get a random sample 3-vector on a 3-unit sphere to use to calculate random delta and gamma Lune angles:\n delta = np.random.uniform(-np.pi/2., np.pi/2.) # theta, but shifted to range between -pi/2 and pi/2 (See Tape and Tape 2012/2013)\n beta = (np.pi/2.) - delta # Beta is simply phase shift of delta (See Tape and Tape 2012/2013)\n gamma = np.random.uniform(-np.pi/6., np.pi/6.) # phi, but shifted to range between -pi/6 and pi/6 (See Tape and Tape 2012/2013)\n # Get eigenvalues from delta,gamma,beta:\n lune_space_uvw_vec = np.vstack(([np.cos(gamma)*np.sin(beta)], [np.sin(gamma)*np.sin(beta)], [np.cos(beta)]))\n lambda_vec = np.dot(np.transpose(U_rot_matrix), lune_space_uvw_vec) # (See Tape and Tape 2012, eq. 20)\n Lune_space_MT = np.vstack(([lambda_vec[0],0.,0.],[0.,lambda_vec[1],0.], [0.,0.,lambda_vec[2]])) # MT with principle axes in u,v,w Lune space\n # 2. Get theta and phi angles to rotate Lune_space_MT by to randomly rotate into x,y,z space:\n # Get a random sample 3-vector on a 3-unit sphere to use to calculate random theta and phi rotation angles:\n a_unnormalised = np.array([np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0)], dtype=float) # Generate 3 indepdendent normal deviates\n a_normalised = a_unnormalised/(np.sum(a_unnormalised**2)**-0.5) # Normallise sample onto unit 3-sphere - As in Muller (1959)\n # And normallise so that vector magnitude = 1:\n a_normalised = a_normalised/((np.sum(a_normalised**2))**0.5)\n x = a_normalised[0]\n y = a_normalised[1]\n z = a_normalised[2]\n theta = np.arctan2(np.sqrt((x**2)+(y**2)),z)\n phi = np.arctan2(y,x)\n # 3. Rotate Lune_space_MT from u,v,w coords to x,y,z coords:\n random_MT = rot_mt_by_theta_phi(Lune_space_MT, theta, phi)\n random_six_MT = get_six_MT_from_full_MT_array(random_MT)\n # And normallise so that moment tensor magnitude = 1:\n random_six_MT_normalised = random_six_MT/((np.sum(random_six_MT**2))**0.5)\n # And set to correct dimensions (so matrix multiplication in forward model works correctly):\n random_six_MT_normalised = np.reshape(random_six_MT_normalised, (6, 1))\n return random_six_MT_normalised", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])", "def random_matrix(rows, cols):\n return np.random.randn(rows, cols)", "def generate_random_matrix(dim):\n\n A = np.complex128(np.random.random([dim, dim]))\n A_adjoint = A.conj().T\n\n P = A @ A_adjoint\n P += np.identity(len(P))\n\n P_inverse = np.linalg.inv(P)\n\n return P_inverse", "def features(x):\n # We need to contract last axis of x with first of W - do this with\n # tensordot. The result has shape:\n # (?, ?, num_random_features)\n return jnp.sqrt(2 / num_random_features) * jnp.cos(\n jnp.sqrt(2 / gamma) * jnp.tensordot(x, w, axes=1) + b)", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.normal((2,2), -1, 2))\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.normal(-1, 2, size=(2,2))\r\n numpy_val1 = rng.normal(-1, 2, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n matA = np.less_equal(np.random.random(tuple(input_vals[0])) , input_vals[1])\r\n return matA\r\n #assert False\r", "def test_uniform(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.uniform((2,2), -1, 1))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n print fn_val0\r\n print fn_val1\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n numpy_val0 = rng.uniform(-1, 1, size=(2,2))\r\n numpy_val1 = rng.uniform(-1, 1, size=(2,2))\r\n print numpy_val0\r\n print numpy_val1\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def matrix_normal(M,U,V):\n return numpy.random.multivariate_normal(M.ravel(), np.kron(V, U)).reshape(M.shape)", "def generate_data(v1, v2, l1, l2):\n alpha = random.random()\n vector = v1 + alpha * abs(v1 - v2)\n label = l1 + alpha * abs(l1 - l2)\n\n return vector, label", "def _nnls_solver(A, B, warm_start=None):\n # catch singular matrix error, reset result\n # (this should not happen often)\n try:\n result = nnlsm_blockpivot(A.T, B.T, init=warm_start)[0].T\n except np.linalg.linalg.LinAlgError:\n result = np.random.rand(B.shape[0], A.shape[0])\n\n # prevent all parameters going to zero\n for r in range(result.shape[1]):\n if np.allclose(result[:,r], 0):\n result[:,r] = np.random.rand(result.shape[0])\n\n return result", "def exo1():\n randu = randn(N/ 2, N/ 2, 2); % a random vector field\n b = 2\n for i in 1: 4:\n LLs_u = Li{i}(LiS{i}(randu))\n % relative error should be very small\n norm(abs(LLs_u(: ) - b*randu(: )))/ norm(randu(: ))", "def generate_matrix(size) -> np.ndarray:\n np.random.seed(1)\n return np.random.rand(size, size) - 0.5", "def test_d_3():\n rs = 20\n d = 3\n np.random.seed(rs)\n number_rotations = 3\n\n theta_1 = np.random.uniform(0, 2 * math.pi)\n rotation_1 = np.identity(d)\n pos_1 = np.random.randint(0, d - 1)\n pos_2 = np.random.randint(pos_1 + 1, d)\n rotation_1[pos_1, pos_1] = math.cos(theta_1)\n rotation_1[pos_1, pos_2] = - math.sin(theta_1)\n rotation_1[pos_2, pos_1] = math.sin(theta_1)\n rotation_1[pos_2, pos_2] = math.cos(theta_1)\n\n theta_2 = np.random.uniform(0, 2 * math.pi)\n rotation_2 = np.identity(d)\n pos_3 = np.random.randint(0, d - 1)\n pos_4 = np.random.randint(pos_3 + 1, d)\n rotation_2[pos_3, pos_3] = math.cos(theta_2)\n rotation_2[pos_3, pos_4] = - math.sin(theta_2)\n rotation_2[pos_4, pos_3] = math.sin(theta_2)\n rotation_2[pos_4, pos_4] = math.cos(theta_2)\n\n theta_3 = np.random.uniform(0, 2 * math.pi)\n rotation_3 = np.identity(d)\n pos_5 = np.random.randint(0, d - 1)\n pos_6 = np.random.randint(pos_5 + 1, d)\n rotation_3[pos_5, pos_5] = math.cos(theta_3)\n rotation_3[pos_5, pos_6] = - math.sin(theta_3)\n rotation_3[pos_6, pos_5] = math.sin(theta_3)\n rotation_3[pos_6, pos_6] = math.cos(theta_3)\n\n final_rotation = rotation_1 @ rotation_2 @ rotation_3\n np.random.seed(rs)\n rotation_function = (mt_obj.calculate_rotation_matrix\n (d, number_rotations))\n assert(np.all(final_rotation == rotation_function))", "def matrix_math_api_examples():\n \n print(dingbat.vec2())\n print(dingbat.vec2(array.array('f', (20, 40))))\n print(dingbat.mat3())\n print(dingbat.perspective_matrix(45, 640/480, 0.001, 100.0))\n print(dingbat.orthographic_matrix(-1.0, 1.0, -1.0, 1.0, 0.001, 100.0))\n print(dingbat.lookat_matrix(-20, -20, 20, 0, 0, 0, 0, 0, 1))\n translate = dingbat.translation_matrix(-10, -20, -30)\n rotate = dingbat.rotation_matrix(45, 0, 1, 0)\n scale = dingbat.scale_matrix(100, 200, 300)\n print(translate)\n print(rotate)\n print(scale)\n print(dingbat.multiply_matrices(translate, rotate, scale))", "def test_suite():\r\n test(add_vectors([1, 1], [1, 1]) == [2, 2])\r\n test(add_vectors([1, 2], [1, 4]) == [2, 6])\r\n test(add_vectors([1, 2, 1], [1, 4, 3]) == [2, 6, 4])\r\n test(scalar_mult(5, [1, 2]) == [5, 10])\r\n test(scalar_mult(3, [1, 0, -1]) == [3, 0, -3])\r\n test(scalar_mult(7, [3, 0, 5, 11, 2]) == [21, 0, 35, 77, 14])\r\n test(dot_product([1, 1], [1, 1]) == 2)\r\n test(dot_product([1, 2], [1, 4]) == 9)\r\n test(dot_product([1, 2, 1], [1, 4, 3]) == 12)\r\n test(cross_product([2,3,4], [5,6,7]) == [-3, 6, -3])", "def generate_solved_instance(m, n):\n # Generate a, U\n a = np.array([random_fp() for _ in range(n)])\n U = random_sparse_matrix(m, n)\n\n # Normalize U to satisfy constraints\n Ua2 = U.dot(a) * U.dot(a)\n for (i, j), val in U.items():\n U[i, j] /= Ua2[i].sqrt()\n\n assert (U.dot(a) * U.dot(a) == 1).all()\n Ud = U.to_dense()\n assert (Ud.dot(a) * Ud.dot(a) == 1).all()\n return U, a", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def gen_random_matrix(a_size, b_size, AB_d, BA_d, AA_d, BB_d):\n\n def random_gen(n_samples):\n return np.ones(shape=(n_samples,))\n\n AB = sparse.random(a_size, b_size, AB_d, data_rvs=random_gen, format=\"csr\")\n BA = sparse.random(b_size, a_size, BA_d, data_rvs=random_gen, format=\"csr\")\n AA = sparse.random(a_size, a_size, AA_d, data_rvs=random_gen, format=\"csr\")\n BB = sparse.random(b_size, b_size, BB_d, data_rvs=random_gen, format=\"csr\")\n\n return AB, BA, AA, BB", "def generate_AR(N, M, L, non_zero):\n X = np.zeros([N, L+2])\n \n for i in range(N):\n ind = np.random.randint(1,4) # Choose randomly between the 4 AR processes\n for j in range(2,L):\n if ind == 1:\n sig = np.random.uniform(-1,1,(2))\n X[i][j] = sig[0] * X[i][j-1] + sig[1] * X[i][j-2] + np.random.randn(1)\n \n elif ind == 2: \n sig = np.random.uniform(-1,1,(3))\n X[i][j] = sig[0] * X[i][j-1] + sig[1] * X[i][j-2] + sig[2] * X[i][j-3] + np.random.randn(1)\n \n elif ind == 3:\n sig = np.random.uniform(-1,1,(2))\n X[i][j] = sig[0] * X[i][j-1] + sig[1] * X[i][j-2] + np.random.randn(1)\n \n elif ind == 4:\n sig = np.random.uniform(-1,1,(4))\n X[i][j] = sig[0] * X[i][j-1] + sig[1] * X[i][j-2] + sig[2] * X[i][j-3] + sig[3] * X[i][j-4]+ np.random.randn(1)\n \n \" Making zero and non-zero rows \"\n Real_X = np.zeros([N, L+2])\n ind = np.random.random(non_zero)\n for i in range(len(ind)):\n temp = np.random.randint(0,N)\n while temp in ind:\n temp = np.random.randint(0,N)\n ind[i] = temp\n \n for j in ind:\n Real_X[int(j)] = X[int(j)]\n \n Real_X = Real_X.T[2:].T \n\n \" Finding A and Y \"\n A_Real = np.random.randn(M,N) # Random mixing matrix\n Y_Real = np.dot(A_Real, Real_X) # Measurement matrix \n return Y_Real, A_Real, Real_X", "def random_three_vector():\n phi = config.random.uniform(0, np.pi * 2)\n costheta = config.random.uniform(-1, 1)\n\n theta = np.arccos(costheta)\n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n\n return x, y, z", "def __init__(self):\n self.py = random.getstate()\n self.np = np.random.get_state()\n self.torch = torch.get_rng_state()", "def test_B(self):\r\n class B(RModule):\r\n def __init__(self):\r\n super(B, self).__init__()\r\n \r\n self.x = compile.Member(tensor.dvector())\r\n self.r = self.random.uniform(tensor.shape(self.x))\r\n \r\n self.f = compile.Method([self.x], self.r)\r\n class E(RModule):\r\n def __init__(self):\r\n super(E, self).__init__()\r\n self.b = B()\r\n self.f = compile.Method([self.b.x], self.b.r)\r\n\r\n b = E()\r\n m = b.make()\r\n \r\n m.seed(1000)\r\n #print m.f(N.ones(5))\r\n #print m.f(N.ones(5))\r\n #print m.f(N.ones(5))\r\n rvals = [\"0.74802375876 0.872308123517 0.294830748897 0.803123780003 0.6321109955\",\r\n \"0.00168744844365 0.278638315678 0.725436793755 0.7788480779 0.629885140994\",\r\n \"0.545561221664 0.0992011009108 0.847112593242 0.188015424144 0.158046201298\",\r\n \"0.054382248842 0.563459168529 0.192757276954 0.360455221883 0.174805216702\",\r\n \"0.961942907777 0.49657319422 0.0316111492826 0.0915054717012 0.195877184515\"]\r\n\r\n for i in xrange(5):\r\n s = \" \".join([str(n) for n in m.f(N.ones(5))])\r\n print s\r\n assert s == rvals[i]", "def test_matrix_product(self, use_cache):\n\n key = jrandom.PRNGKey(0)\n dim = 50\n max_power = 25\n\n matrix = jrandom.normal(key, (dim, dim)) / 10\n vector = jnp.ones((dim,), dtype=jnp.float32)\n\n if use_cache:\n mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)\n else:\n mpstate = model_utils.LazyMatrixPowerState(matrix)\n\n for t in range(max_power):\n result = mpstate.matrix_power_multiply(vector, t)\n expected = np.linalg.matrix_power(matrix, t) @ vector\n\n np.testing.assert_array_almost_equal(result, expected, decimal=1)", "def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.multinomial((20,20), 1, [0.1]*10))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n numpy_val1 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def _expr_to_vector(\n expr, basis, *, random_fct=lambda: random.randint(-100, 100), numeric\n):\n dim = len(basis)\n assert dim > 0\n # create random values for the coordinates and evaluate\n # both the basis functions and the expression to generate\n # the linear equation to be solved\n A = [] # pylint: disable=invalid-name\n b = [] # pylint: disable=invalid-name\n for _ in range(2 * dim):\n if not numeric:\n if sp.Matrix(A).rank() >= len(basis):\n break\n vals = [(k, random_fct()) for k in VEC]\n A.append([b.subs(vals) for b in basis])\n b.append(expr.subs(vals))\n else:\n # this could happen if the random_fct is bad, or the 'basis' is not\n # linearly independent\n if not numeric:\n raise ValueError(\n 'Could not find a sufficient number of linearly independent vectors'\n )\n\n if numeric:\n vec = nl.lstsq(\n np.array(A).astype(complex),\n np.array(b).astype(complex),\n rcond=None if np.__version__ >= '1.14' else -1\n )[0]\n else:\n res = sp.linsolve((sp.Matrix(A), sp.Matrix(b)), sp.symbols('a b c'))\n if len(res) != 1:\n raise ValueError(\n 'Invalid result {res} when trying to match expression {expr} to basis {basis}.'\n .format(res=res, expr=expr, basis=basis)\n )\n vec = next(iter(res))\n vec = tuple(v.nsimplify() for v in vec)\n return vec", "def test_uniform(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.uniform((2,2), -1, 1))\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.uniform(-1, 1, size=(2,2))\r\n numpy_val1 = rng.uniform(-1, 1, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def _make_random_matrix(self, n_components, n_features):\n #random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(\n n_components, n_features, random_state=self.random_state\n )", "def test_shuffle_row_elements(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n # On matrices, for each row, the elements of that row should be\r\n # shuffled.\r\n # Note that this differs from numpy.random.shuffle, where all the\r\n # elements of the matrix are shuffled.\r\n mm = Module()\r\n mm.random = RandomStreams(utt.fetch_seed())\r\n m_input = tensor.dmatrix()\r\n mm.f = Method([m_input], mm.random.shuffle_row_elements(m_input))\r\n mmade = mm.make()\r\n mmade.random.initialize()\r\n\r\n # Generate the elements to be shuffled\r\n val_rng = numpy.random.RandomState(utt.fetch_seed()+42)\r\n in_mval = val_rng.uniform(-2, 2, size=(20,5))\r\n fn_mval0 = mmade.f(in_mval)\r\n fn_mval1 = mmade.f(in_mval)\r\n print in_mval[0]\r\n print fn_mval0[0]\r\n print fn_mval1[0]\r\n assert not numpy.all(in_mval == fn_mval0)\r\n assert not numpy.all(in_mval == fn_mval1)\r\n assert not numpy.all(fn_mval0 == fn_mval1)\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed))\r\n numpy_mval0 = in_mval.copy()\r\n numpy_mval1 = in_mval.copy()\r\n for row in numpy_mval0:\r\n rng.shuffle(row)\r\n for row in numpy_mval1:\r\n rng.shuffle(row)\r\n\r\n assert numpy.all(numpy_mval0 == fn_mval0)\r\n assert numpy.all(numpy_mval1 == fn_mval1)\r\n\r\n # On vectors, the behaviour is the same as numpy.random.shuffle,\r\n # except that it does not work in place, but returns a shuffled vector.\r\n vm = Module()\r\n vm.random = RandomStreams(utt.fetch_seed())\r\n v_input = tensor.dvector()\r\n vm.f = Method([v_input], vm.random.shuffle_row_elements(v_input))\r\n vmade = vm.make()\r\n vmade.random.initialize()\r\n\r\n in_vval = val_rng.uniform(-3, 3, size=(12,))\r\n fn_vval = vmade.f(in_vval)\r\n numpy_vval = in_vval.copy()\r\n vrng = numpy.random.RandomState(int(rng_seed))\r\n vrng.shuffle(numpy_vval)\r\n print in_vval\r\n print fn_vval\r\n print numpy_vval\r\n assert numpy.all(numpy_vval == fn_vval)\r\n\r\n # Trying to shuffle a vector with function that should shuffle\r\n # matrices, or vice versa, raises a TypeError\r\n self.assertRaises(TypeError, vmade.f, in_mval)\r\n self.assertRaises(TypeError, mmade.f, in_vval)", "def test_vector_class():\n points = 10\n riskfree = .03\n maturity = 30/365\n moneyness = np.linspace(-.04, .04, points)\n premium = np.ones_like(moneyness) * .05\n call = True\n data = {'riskfree': riskfree, 'maturity': maturity,\n 'moneyness': moneyness, 'call': call, 'premium': premium}\n\n sigma = np.ones(points) * .13\n bsm = BSmodel(sigma, data)\n\n print(bsm.premium())\n\n weights = np.ones(points) * .63\n means = np.vstack([np.ones(points) * -.01, np.ones(points) * .09])\n stds = np.vstack([np.ones(points) * .16, np.ones(points) * .05])\n param = np.vstack([weights, means, stds])\n mbs = MBSmodel(param, data)\n\n print(mbs.premium())\n\n param_a, param_p = np.ones(points) * 4.5, np.ones(points) * 2\n param_c = -.05 * np.ones(points)\n gb2 = GB2model([param_a, param_p, param_c], data)\n\n print(gb2.premium())", "def random_operation():\r\n operation = random.choice([\"add\",\"subtract\",\"multiply\",\"divide\"])\r\n return operation", "def generate_random_MT():\n # Generate 6 indepdendent normal deviates:\n six_MT_unnormalised = np.array([np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0)], dtype=float)\n # Normallise sample onto unit 6-sphere:\n six_MT_normalised = six_MT_unnormalised/(np.sum(six_MT_unnormalised**2)**-0.5) # As in Muller (1959)\n # And normallise so that moment tensor magnitude = 1:\n six_MT_normalised = six_MT_normalised/((np.sum(six_MT_normalised**2))**0.5)\n # And set to correct dimensions (so matrix multiplication in forward model works correctly):\n six_MT_normalised = np.reshape(six_MT_normalised, (6, 1))\n return six_MT_normalised", "def random(cls):\n return cls(np.random.randn(3)).normalized()", "def init_variables():\n weights = np.random.normal(size=2)\n bias = 0\n return weights, bias", "def _generate_uniform_planes(self):\n return np.random.randn(self.m, self.dim)", "def test_shuffle_row_elements(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n\r\n # On matrices, for each row, the elements of that row should be shuffled.\r\n # Note that this differs from numpy.random.shuffle, where all the elements\r\n # of the matrix are shuffled.\r\n random = RandomStreams(utt.fetch_seed())\r\n m_input = tensor.dmatrix()\r\n f = function([m_input], random.shuffle_row_elements(m_input), updates=random.updates())\r\n\r\n # Generate the elements to be shuffled\r\n val_rng = numpy.random.RandomState(utt.fetch_seed()+42)\r\n in_mval = val_rng.uniform(-2, 2, size=(20,5))\r\n fn_mval0 = f(in_mval)\r\n fn_mval1 = f(in_mval)\r\n print in_mval[0]\r\n print fn_mval0[0]\r\n print fn_mval1[0]\r\n assert not numpy.all(in_mval == fn_mval0)\r\n assert not numpy.all(in_mval == fn_mval1)\r\n assert not numpy.all(fn_mval0 == fn_mval1)\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed))\r\n numpy_mval0 = in_mval.copy()\r\n numpy_mval1 = in_mval.copy()\r\n for row in numpy_mval0:\r\n rng.shuffle(row)\r\n for row in numpy_mval1:\r\n rng.shuffle(row)\r\n\r\n assert numpy.all(numpy_mval0 == fn_mval0)\r\n assert numpy.all(numpy_mval1 == fn_mval1)\r\n\r\n # On vectors, the behaviour is the same as numpy.random.shuffle,\r\n # except that it does not work in place, but returns a shuffled vector.\r\n random1 = RandomStreams(utt.fetch_seed())\r\n v_input = tensor.dvector()\r\n f1 = function([v_input], random1.shuffle_row_elements(v_input))\r\n\r\n in_vval = val_rng.uniform(-3, 3, size=(12,))\r\n fn_vval = f1(in_vval)\r\n numpy_vval = in_vval.copy()\r\n vrng = numpy.random.RandomState(int(rng_seed))\r\n vrng.shuffle(numpy_vval)\r\n print in_vval\r\n print fn_vval\r\n print numpy_vval\r\n assert numpy.all(numpy_vval == fn_vval)\r\n\r\n # Trying to shuffle a vector with function that should shuffle\r\n # matrices, or vice versa, raises a TypeError\r\n self.assertRaises(TypeError, f1, in_mval)\r\n self.assertRaises(TypeError, f, in_vval)", "def LinearRandomKernel(self, f1, f2):\n\n\t\trf1 = self.computeRandomFeatures(f1)\n\t\trf2 = self.computeRandomFeatures(f2)\n\n\t\treturn np.squeeze(rf1).dot(np.squeeze(rf2))", "def _generate_random_vector(size):\n return np.random.uniform(-0.1, 0.1, size)", "def test_transform_2d(transform, alpha = 1):\r\n points = 20*[None]\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n points[i] = vec2(x, y)\r\n tr_x = random.randrange(-40, 41)\r\n tr_y = random.randrange(-40, 41)\r\n mapping = [(p, vec2(p.x + tr_x, p.y + tr_y)) for p in points]\r\n print(\"Translation\")\r\n print(\"Input\".ljust(20), \"Translation\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_translate = vec2(x + tr_x, y + tr_y)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_translate.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()\r\n th = 2*math.pi*random.random()\r\n mapping = [(p, vec2(p.x*math.cos(th) - p.y*math.sin(th), p.x*math.sin(th) + p.y*math.cos(th))) for p in points]\r\n print(\"Rotation\")\r\n print(\"Input\".ljust(20), \"Rotation\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_rotate = vec2(x*math.cos(th) - y*math.sin(th), x*math.sin(th) + y*math.cos(th))\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_rotate.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()\r\n k = math.exp(2*random.random() - 1)\r\n mapping = [(p, vec2(k*p.x, k*p.y)) for p in points]\r\n print(\"Uniform scaling\")\r\n print(\"Input\".ljust(20), \"Scaling\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_scale = vec2(k*x, k*y)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_scale.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()\r\n k_x = math.exp(2*random.random() - 1)\r\n k_y = 3*random.random() + 1\r\n if (k_x >= k_y + math.exp(-1)): k_y = k_x - k_y\r\n else: k_y = k_x + k_y\r\n mapping = [(p, vec2(k_x*p.x, k_y*p.y)) for p in points]\r\n print(\"Non-uniform scaling\")\r\n print(\"Input\".ljust(20), \"Scaling\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_scale = vec2(k_x*x, k_y*y)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_scale.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()", "def _init_model(self):\n self.A_inv = np.zeros(shape=(self.numUsers, self.d, self.d))\n self.b = np.zeros(shape=(self.numUsers, self.d))\n self.w = np.zeros(shape=(self.numUsers, self.d))\n for i, mat in enumerate(self.A_inv):\n self.A_inv[i] = np.eye(self.d)", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n # Use non-default parameters\r\n post_r, out = normal(rng_R, (2, 3), 4.0, 2.0)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy_rng.normal(4.0, 2.0, size=(2, 3))\r\n numpy_val1 = numpy_rng.normal(4.0, 2.0, size=(2, 3))\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.allclose(val0, numpy_val0))\r\n self.assertTrue(numpy.allclose(val1, numpy_val1))", "def generate_data(params, sigma):\n rng = random.PRNGKey(0)\n k = len(params) // 2\n a_array = params[:k]\n b_array = params[k:]\n n = 20 * k\n xs = sample_our_uniform(n, 1, rng).reshape((n,))\n ys = onp.zeros(n)\n all_indices = set(onp.arange(n))\n for i in range(k):\n i_idxs = onp.random.choice(list(all_indices), 20, replace=False)\n all_indices = set(all_indices) - set(i_idxs)\n ys[i_idxs] = xs[i_idxs] * a_array[i] + b_array[i] + onp.random.normal(0, sigma, size=(20,))\n return xs, ys", "def initial_vel(T,N,masses,dim):\r\n rand_v = np.random.uniform(-1,1,(N,dim)) #random velocities\r\n sum_v = np.sum(rand_v, axis = 0)/N #Total velocity\r\n rand_v = rand_v - sum_v #Subtract cm-Motion\r\n K = np.sum(np.sum(rand_v**2,axis = 1)*masses)/2 #Kinetic energy\r\n T_act = 2/3*K/sc.k/N #Temperature\r\n R_scale = T/T_act #Scaling factor\r\n v = np.transpose(rand_v*np.sqrt(R_scale)) #rescalte to reach T\r\n return v", "def reflect(real_seqs):\n reflectX = np.random.choice([-1, 1])\n reflectY = np.random.choice([-1, 1])\n reflected = real_seqs * np.array([reflectX, reflectY, 1])\n return reflected", "def simulate(self):\n n_samples = self.n_samples\n n_features = self.n_features\n nb_active_features = self.nb_active_features\n K = self.K\n pi_0 = self.pi_0\n gap = self.gap\n p0 = self.p0\n p1 = self.p1\n r_c = self.r_c\n r_cf = self.r_cf\n rho = self.rho\n\n coeffs = np.zeros(n_features)\n coeffs[0:nb_active_features] = K\n\n features = features_normal_cov_toeplitz(n_samples, n_features, rho)\n\n # Add class relative information on the design matrix \n A = np.random.choice(range(n_samples), size=int((1 - pi_0) * n_samples),\n replace=False)\n A_ = np.delete(range(n_samples), A)\n\n index_plus_gap = nb_active_features + int(\n (n_features - nb_active_features) * r_cf)\n features[A, :index_plus_gap] += gap\n features[A_, :index_plus_gap] -= gap\n\n self.features = features\n xc = features.dot(coeffs)\n\n # Simulation of latent variables\n pi = self.logistic_grad(-xc)\n u = np.random.rand(n_samples)\n Z = (u <= 1 - pi)\n self.Z = Z\n\n # Simulation of true times\n n_samples_class_1 = np.sum(Z)\n n_samples_class_0 = n_samples - n_samples_class_1\n T = np.empty(n_samples)\n pi_0_est = 1 - Z.mean()\n T[Z == 0] = np.random.geometric(p0, size=n_samples_class_0)\n\n # Compute p_c to obtain censoring rate r_c\n r_c_ = 1 - r_c\n p0_ = 1 - p0\n p1_ = 1 - p1\n pi_0_ = 1 - pi_0_est\n a = r_c_ * p0_ * p1_\n b = p0 * pi_0_est * p1_ + p1 * pi_0_ * p0_ - r_c_ * (p1_ + p0_)\n c = r_c_ - p0 * pi_0_est - p1 * pi_0_\n res = self.poldeg2_solver(a=a, b=b, c=c)\n if isinstance(res, list):\n if res[0] > 0:\n pc = 1 - res[0]\n else:\n pc = 1 - res[1]\n else:\n pc = 1 - res\n T[Z == 1] = np.random.geometric(p1, size=n_samples_class_1)\n\n # Simulation of the censoring\n C = np.random.geometric(pc, size=n_samples)\n\n # Censoring indicator: 1 if it is a time of failure, 0 if it's \n # censoring.\n delta = (T <= C).astype(int)\n\n # Observed time\n Y = np.minimum(T, C).astype(int)\n if np.sum(Y == 0) > 0:\n Y += 1\n self.delta = delta\n self.Y = Y\n return features, Y, delta", "def generateRandomStandardizedLinkMatrix(size, empty, autoRefLink):\n\n #We start by generating our matrix\n res = np.zeros((size,size),dtype=float);\n\n #If we want to work with a sparse matrix\n #We Generate the index vector (witch vector to populate?)\n emptyVecIndexes = np.random.choice(2,size, p=[EMPTINESS_RATIO,1-EMPTINESS_RATIO])\n\n\n for i in range(size):\n\n ## SPARSE MATRIX ##\n if(empty):\n\n #We generate random vectors for only few columns\n if(emptyVecIndexes[i]==1):\n res[i] = generateProbabilisticVector(size,True)\n\n #We postprocess the non empty columns to ensure certain properties (diag = 0 | sum = (strict) 1 )\n if(res[i].sum()!=0):\n index = np.random.choice(size,1)\n\n while(index==i):\n index = np.random.choice(size,1)\n\n\n if(autoRefLink==False):\n res[i][index]+=res[i][i]\n res[i][i]=0\n\n #float precision sum problem => we ensure normalization of columns\n if(isProbabilisticVector(res[i]) == False):\n diff = 1-res[i].sum()\n res[i][index]+=diff\n\n #for vectors with no link => Same chances to go anywhere\n else:\n #fullfill empty vectors with the same prob\n res[i]= np.full(size,1/size)\n\n ## NORMAL MATRIX ##\n else:\n res[i] = generateProbabilisticVector(size,False)\n\n #Very unlikely but we do it just to be sure\n if res[i].sum()==0:\n\n #fullfill empty vectors with the same prob\n res[i]= np.full(size,1/size)\n\n\n #We postprocess the non empty columns to ensure certain properties (diag = 0 | sum = (strict) 1 )\n else:\n index = np.random.choice(size,1)\n\n while(index==i):\n index = np.random.choice(size,1)\n\n if(autoRefLink==False):\n res[i][index]+=res[i][i]\n res[i][i]=0\n\n #float precision sum problem => we ensure normalization of columns\n if(isProbabilisticVector(res[i]) == False):\n diff = 1-res[i].sum()\n res[i][index]+=diff\n\n #to remove\n #print(np.transpose(res));\n return np.transpose(res)", "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact", "def test_advanced_manipulations(free_alg):\n dr = free_alg\n p = dr.names\n i, j, k = p.i, p.j, p.k\n\n u = IndexedBase('u')\n v = IndexedBase('v')\n f = Vec('f')\n\n tensor = dr.einst(u[i, j] * f[j] + v[i, j] * f[j])\n assert tensor.n_terms == 2\n\n def has_u(term):\n \"\"\"Test if a term have u tensor.\"\"\"\n return term.amp.has(u)\n\n expect = dr.sum((j, p.R), u[i, j] * f[j])\n for res in [\n tensor.filter(has_u),\n tensor.bind(lambda x: [x] if has_u(x) else [])\n ]:\n assert res.n_terms == 1\n assert res == expect\n\n def subst_i(term):\n \"\"\"Substitute i index in the terms.\"\"\"\n return Term(term.sums, term.amp.xreplace({i: k}), term.vecs)\n\n expect = dr.sum((j, p.R), u[k, j] * f[j] + v[k, j] * f[j])\n for res in [\n tensor.map(subst_i),\n tensor.bind(lambda x: [subst_i(x)]),\n tensor.map2scalars(lambda x: x.xreplace({i: k}))\n ]:\n assert res.n_terms == 2\n assert res == expect\n\n alpha, beta = symbols('alpha beta')\n assert tensor.bind(\n lambda x: [Term(x.sums, x.amp * i_, x.vecs) for i_ in [alpha, beta]]\n ) == (tensor * alpha + tensor * beta)\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k})\n ) == dr.sum((j, p.R), u[i, k] * f[k] + v[i, k] * f[k])\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k}), skip_vecs=True\n ) == dr.sum((j, p.R), u[i, k] * f[j] + v[i, k] * f[j])", "def get_random_bvfunction(width, num_inputs, num_outputs, num_assignments, seed,\n external_variable_prefix=None, operation_set_index=0, num_rounds=None,\n extra_operations=None):\n assert num_inputs + num_assignments >= num_outputs\n\n import random\n import functools\n from cascada.bitvector.operation import (\n BvAnd, BvOr, BvXor, BvShl, BvLshr, RotateLeft, RotateRight, BvAdd,\n BvSub, BvNeg, BvNot, Concat, BvIdentity, make_partial_operation\n )\n from cascada.bitvector.secondaryop import BvIf, BvMaj\n\n PRNG = random.Random()\n PRNG.seed(seed)\n\n # SimpleReverse contains Concat and Extract\n # implemented as a class to later use issubclass()\n class SimpleReverse(object):\n def __new__(cls, bv):\n return functools.reduce(Concat, [bv[i] for i in range(bv.width)])\n\n if operation_set_index == 0:\n list_ops = (\n BvNot, BvXor, BvAnd, BvOr,\n BvAdd, BvSub, BvNeg,\n BvIdentity,\n BvIf, BvMaj,\n RotateLeft, RotateRight,\n BvShl, BvLshr,\n SimpleReverse\n # Concat, Extract, Ite,\n # BvComp, BvUlt, BvUle, BvUgt, BvUge,\n # BvMul, BvUdiv, BvUrem,\n # PopCount, Reverse, PopCountSum2, PopCountSum3, PopCountDiff, LeadingZeros,\n )\n elif operation_set_index == 1:\n list_ops = (\n BvNot, BvXor, BvAnd, BvOr,\n BvAdd, BvSub, BvNeg,\n BvIdentity,\n BvIf, BvMaj,\n RotateLeft, RotateRight,\n make_partial_operation(BvShl, (None, core.Constant(PRNG.randint(1, width - 1), width))),\n make_partial_operation(BvLshr, (None, core.Constant(PRNG.randint(1, width - 1), width))),\n )\n elif operation_set_index == 2:\n list_ops = (\n BvNot, BvXor, BvAnd, BvOr,\n BvAdd, BvSub,\n BvIdentity,\n RotateLeft, RotateRight,\n BvShl, BvLshr,\n SimpleReverse\n )\n elif operation_set_index == 3:\n list_ops = (\n BvNot, BvXor, BvAnd, BvOr,\n BvAdd, BvSub, BvNeg,\n BvIdentity,\n BvIf, BvMaj,\n RotateLeft, RotateRight,\n BvShl, BvLshr,\n SimpleReverse\n )\n else:\n raise ValueError(\"operation_set_index must be 0, 1, 2, or 3\")\n\n extra_operations_vrepr = None\n if extra_operations is not None:\n list_ops += tuple(extra_operations)\n extra_operations_vrepr = f\"({','.join(op.__name__ for op in extra_operations)},)\"\n\n while True: # outer loop to check RandomBvFunction does not return Constant\n list_of_lambda_assignments = []\n\n class RandomBvFunction(BvFunction):\n input_widths = [width] * num_inputs\n output_widths = [width] * num_outputs\n \n @classmethod\n def vrepr(cls):\n evp = external_variable_prefix.__repr__()\n return f\"get_random_bvfunction({width}, {num_inputs}, {num_outputs}, \" \\\n f\"{num_assignments}, {seed}, {evp}, {operation_set_index}, {num_rounds}, \" \\\n f\"{extra_operations_vrepr})\"\n\n if external_variable_prefix is not None:\n RandomBvFunction.round_keys = []\n\n def get_random_var_index():\n if len(list_of_lambda_assignments) >= num_inputs + 2:\n min_index = num_inputs\n else:\n min_index = 0\n return PRNG.randint(min_index, len(list_of_lambda_assignments) + num_inputs - 1)\n\n def get_random_var_indices(num_indices, my_unique_indices):\n indices = []\n while True:\n new_index = get_random_var_index()\n if not my_unique_indices or new_index not in indices:\n indices.append(new_index)\n if len(indices) == num_indices:\n break\n return indices\n\n while True:\n _op = list_ops[PRNG.randint(0, len(list_ops) - 1)]\n\n if _op in [RotateLeft, RotateRight]: # only operations with scalar inputs\n class Op(object): # need a class to store the randomness\n op = _op\n my_index = get_random_var_index()\n offset = PRNG.randint(1, width - 1) # != 0\n # kwargs required for external vars (see below)\n def __new__(cls, args, **kwargs): return cls.op(args[cls.my_index], cls.offset)\n\n elif _op == SimpleReverse:\n class Op(object):\n op = _op\n my_index = get_random_var_index()\n def __new__(cls, args, **kwargs): return cls.op(args[cls.my_index])\n\n elif _op in [BvShl, BvLshr]: # ensure 2nd operand ct\n class Op(object):\n op = _op\n my_index = get_random_var_index()\n offset = core.Constant(PRNG.randint(1, width - 1), width)\n def __new__(cls, args, **kwargs): return cls.op(args[cls.my_index], cls.offset)\n\n else:\n assert issubclass(_op, operation.Operation)\n assert _op.arity[1] == 0\n\n if _op.arity[0] >= 2 and PRNG.randint(0, 3) == 0:\n # 1 in 4 to have a ct or external var (each 50%)\n if external_variable_prefix is None or PRNG.randint(0, 1) == 0:\n extra_arg = core.Constant(PRNG.randint(1, 2**width - 2), width) # != 0, allones\n\n class Op(object):\n op = _op\n indices = get_random_var_indices(_op.arity[0] - 1, False)\n ct = extra_arg\n def __new__(cls, args, **kwargs): return cls.op(*([args[i] for i in cls.indices] + [cls.ct]))\n else:\n evi = PRNG.randint(0, len(RandomBvFunction.round_keys))\n if evi == len(RandomBvFunction.round_keys):\n RandomBvFunction.round_keys.append(core.Variable(f\"{external_variable_prefix}{evi}\", width))\n\n class Op(object):\n op = _op\n indices = get_random_var_indices(_op.arity[0] - 1, False)\n ev_index = evi\n # kwargs[\"round_keys\"] instead of RandomBvFunction.round_keys\n # to provide round_keys from cls in running time (in an argument of Op(\n # (otherwise round_keys cannot be changed)\n def __new__(cls, args, **kwargs): return cls.op(*([args[i] for i in cls.indices] +\n [kwargs[\"round_keys\"][cls.ev_index]]))\n else:\n unique_indices = _op in [BvAnd, BvOr, BvXor, BvSub]\n if unique_indices and len(list_of_lambda_assignments) + num_inputs == 1:\n # avoid using duplicated inputs for these operations\n continue\n\n class Op(object):\n op = _op\n indices = get_random_var_indices(_op.arity[0], unique_indices)\n def __new__(cls, args, **kwargs): return cls.op(*[args[i] for i in cls.indices])\n\n if external_variable_prefix is not None and len(RandomBvFunction.round_keys) == 0:\n # ensure at least 1 external variable\n _op = BvXor\n evi = 0\n RandomBvFunction.round_keys.append(core.Variable(f\"{external_variable_prefix}{evi}\", width))\n\n class Op(object):\n op = _op\n indices = get_random_var_indices(_op.arity[0] - 1, False)\n ev_index = evi\n def __new__(cls, args, **kwargs): return cls.op(*([args[i] for i in cls.indices] +\n [kwargs[\"round_keys\"][cls.ev_index]]))\n\n ## debugging\n # print(f\"{len(list_of_lambda_assignments)+1}/{num_assignments} | op: {Op.op}\")\n # if hasattr(Op, \"my_index\"): print(\"\\tmy_index:\", Op.my_index)\n # if hasattr(Op, \"indices\"): print(\"\\tindices:\", Op.indices)\n # if hasattr(Op, \"ct\"): print(\"\\tct:\", Op.ct)\n # if hasattr(Op, \"offset\"): print(\"\\toffset:\", Op.offset)\n # if hasattr(Op, \"ev_index\"): print(\"\\tev_index:\", Op.ev_index)\n\n list_of_lambda_assignments.append(Op)\n if len(list_of_lambda_assignments) == num_assignments:\n break\n\n list_of_lambda_assignments = tuple(list_of_lambda_assignments)\n\n @classmethod\n def eval_method(cls, *args):\n assert isinstance(args, collections.abc.Sequence)\n all_vars = list(args)\n round_keys = getattr(cls, \"round_keys\", None)\n ## debugging\n # print(f\"eval_method({args}):\")\n for index_assign, lambda_assign in enumerate(list_of_lambda_assignments):\n result = lambda_assign(all_vars, round_keys=round_keys)\n\n Op = lambda_assign\n ffo_arg0, ffo_arg1, ffo_arg2 = \"\", \"\", \"\"\n if hasattr(Op, \"my_index\"):\n ffo_arg0 = all_vars[Op.my_index]\n else:\n assert hasattr(Op, \"indices\")\n ffo_arg0 = all_vars[Op.indices[0]]\n if len(Op.indices) >= 2:\n ffo_arg1 = all_vars[Op.indices[1]]\n if len(Op.indices) >= 3:\n ffo_arg2 = all_vars[Op.indices[2]]\n ffo_extra_args = \"\"\n if hasattr(Op, \"ct\"):\n ffo_extra_args = Op.ct\n elif hasattr(Op, \"offset\"):\n ffo_extra_args = Op.offset\n elif hasattr(Op, \"ev_index\"):\n ffo_extra_args = round_keys[Op.ev_index]\n format_string = \"assignment {}/{}: {}({}, {}, {}, extra={}) = {}\"\n ffo = [index_assign, len(list_of_lambda_assignments) - 1,\n Op.op.__name__, ffo_arg0, ffo_arg1, ffo_arg2,\n ffo_extra_args, result]\n cls.log_msg(format_string, ffo)\n\n ## debugging\n # print(f\"\\t{result.width}-width op: {Op.op.__name__}\")\n # if hasattr(Op, \"my_index\"): print(\"\\t\\targ:\", all_vars[Op.my_index])\n # if hasattr(Op, \"indices\"): print(\"\\t\\targs:\", [all_vars[i] for i in Op.indices])\n # if hasattr(Op, \"ct\"): print(\"\\t\\tct:\", Op.ct)\n # if hasattr(Op, \"offset\"): print(\"\\t\\toffset:\", Op.offset)\n # if hasattr(Op, \"ev_index\"): print(\"\\t\\text_arg:\", round_keys[Op.ev_index])\n # print(f\"\\t\\tresult:\", result)\n all_vars.append(result)\n return all_vars[-num_outputs:]\n\n RandomBvFunction.eval = eval_method\n\n # check RandomBvFunction does not return Constant\n try:\n RandomBvFunction(*[core.Variable(f\"x{i}\", width) for i in range(num_inputs)],\n symbolic_inputs=True, simplify=False)\n except ValueError as e:\n if not str(e).startswith(\"if symbolic_inputs, expected no Constant values\"):\n raise e\n else:\n continue\n\n if num_rounds is None:\n return RandomBvFunction\n else:\n _num_rounds = num_rounds\n\n class RandomRBF(RandomBvFunction, RoundBasedFunction):\n num_rounds = _num_rounds\n\n @classmethod\n def eval(cls, *args):\n for index_round in range(cls.num_rounds):\n args = super().eval(*args)\n\n if index_round < cls.num_rounds - 1:\n if len(args) < len(cls.input_widths):\n args = args * ((len(cls.input_widths) // len(args)) + 1)\n args = args[:len(cls.input_widths)]\n\n cls.add_round_outputs(*args)\n\n format_string = \"round {}/{}: outputs = (\" + \"{}, \"*len(args) + \")\"\n ffo = [index_round, cls.num_rounds - 1] + list(args)\n cls.log_msg(format_string, ffo)\n return args\n\n @classmethod\n def set_num_rounds(cls, new_num_rounds):\n cls.num_rounds = new_num_rounds\n\n # check RandomRBF does not return Constant\n try:\n RandomRBF(*[core.Variable(f\"x{i}\", width) for i in range(num_inputs)],\n symbolic_inputs=True, simplify=False)\n except ValueError as e:\n if not str(e).startswith(\"if symbolic_inputs, expected no Constant values\"):\n raise e\n else:\n continue\n else:\n return RandomRBF", "def generate_matrix(rows, cols):\n matrix_random = np.random.rand(rows, cols)\n return matrix_random", "def randomMV(\n layout, min=-2.0, max=2.0, grades=None, mvClass=MultiVector,\n uniform=None, n=1, normed=False):\n\n if n > 1:\n # return many multivectors\n return [randomMV(layout=layout, min=min, max=max, grades=grades,\n mvClass=mvClass, uniform=uniform, n=1,\n normed=normed) for k in range(n)]\n\n if uniform is None:\n uniform = np.random.uniform\n\n if grades is None:\n mv = mvClass(layout, uniform(min, max, (layout.gaDims,)))\n else:\n if isinstance(grades, int):\n grades = [grades]\n newValue = np.zeros((layout.gaDims,))\n for i in range(layout.gaDims):\n if layout.gradeList[i] in grades:\n newValue[i] = uniform(min, max)\n mv = mvClass(layout, newValue)\n\n if normed:\n mv = mv.normal()\n\n return mv", "def init_random_params(self,scale, layer_sizes, rs=npr.RandomState(0)):\n return [(rs.randn(insize, outsize) * scale, # weight matrix\n rs.randn(outsize) * scale) # bias vector\n for insize, outsize in zip(self.layer_sizes[:-1], self.layer_sizes[1:])]", "def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.multinomial((4,4), 1, [0.1]*10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.multinomial(1, [0.1]*10, size=(4,4))\r\n numpy_val1 = rng.multinomial(1, [0.1]*10, size=(4,4))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def _init_LBM_random(self, n1, n2, nq, nl, nb_ones):\n eps_1 = 1e-2 / n1\n eps_2 = 1e-2 / n2\n alpha_1 = (self._np.ones(nq) / nq).reshape((nq, 1))\n alpha_2 = (self._np.ones(nl) / nl).reshape((1, nl))\n tau_1 = self._np.random.uniform(size=(n1, nq)) ** 2\n tau_1 /= tau_1.sum(axis=1).reshape(n1, 1)\n tau_1[tau_1 < eps_1] = eps_1\n tau_1 /= tau_1.sum(axis=1).reshape(n1, 1) # Re-Normalize.\n tau_2 = self._np.random.uniform(size=(n2, nl)) ** 2\n tau_2 /= tau_2.sum(axis=1).reshape(n2, 1)\n tau_2[tau_2 < eps_2] = eps_2\n tau_2 /= tau_2.sum(axis=1).reshape(n2, 1) # Re-Normalize.\n pi = self._np.random.uniform(0, 1e-7, (nq, nl))\n pi = self._np.random.uniform(\n 0.2 * nb_ones / (n1 * n2), 2 * nb_ones / (n1 * n2), (nq, nl)\n )\n return (alpha_1.flatten(), alpha_2.flatten(), tau_1, tau_2, pi)", "def matvec(self, x):\n return self * x", "def randomSolution(self):\n # seed the random number generator\n random.seed()\n # loop through all the features\n for feature in self.features:\n # pick a random number based on the size of the feature's domain\n domainIndex = random.randint(0, len(feature.domain) - 1)\n # assign the value from the domain\n feature.value = feature.domain[domainIndex]", "def initial_vector(self):\n\n return asarray([np.random.uniform(l, u) for l, u in self.bounds])", "def random_rotate():\n u = np.random.uniform(size=3)\n\n # Random quaternion\n q = np.array([np.sqrt(1-u[0])*np.sin(2*np.pi*u[1]),\n np.sqrt(1-u[0])*np.cos(2*np.pi*u[1]),\n np.sqrt(u[0])*np.sin(2*np.pi*u[2]),\n np.sqrt(u[0])*np.cos(2*np.pi*u[2])])\n \n # Convert the quaternion into a rotation matrix \n rotMat = np.array([[q[0]*q[0] + q[1]*q[1] - q[2]*q[2] - q[3]*q[3],\n 2*q[1]*q[2] - 2*q[0]*q[3],\n 2*q[1]*q[3] + 2*q[0]*q[2]],\n [2*q[1]*q[2] + 2*q[0]*q[3],\n q[0]*q[0] - q[1]*q[1] + q[2]*q[2] - q[3]*q[3],\n 2*q[2]*q[3] - 2*q[0]*q[1]],\n [2*q[1]*q[3] - 2*q[0]*q[2],\n 2*q[2]*q[3] + 2*q[0]*q[1],\n q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]]])\n return rotMat", "def test_dot():\n assert_equal(dot(Vector(3.0, 2.0), Vector(2.0, -1.0)), 4.0)", "def test_d_2():\n rs = 10\n d = 2\n np.random.seed(rs)\n num = 3\n theta = np.random.uniform(0, 2 * math.pi)\n rotation = np.identity(d)\n\n rotation[0, 0] = math.cos(theta)\n rotation[0, 1] = - math.sin(theta)\n rotation[1, 0] = math.sin(theta)\n rotation[1, 1] = math.cos(theta)\n\n np.random.seed(rs)\n rotation_function = mt_obj.calculate_rotation_matrix(d, num)\n assert(np.all(rotation == rotation_function))", "def get_model_code():\n\n return \"\"\"\n functions {\n matrix cov_matrix_ard(int N, int D, vector[] x, vector ls, real alpha_sq, int cov_id) {\n matrix[N,N] S;\n real dist_sum;\n real sqrt3;\n real sqrt5;\n sqrt3=sqrt(3.0);\n sqrt5=sqrt(5.0);\n\n // For RBF ARD kernel\n if (cov_id == 1) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_sum = 0;\n for(d in 1:D) {\n dist_sum = dist_sum + square(x[i][d] - x[j][d]) / square(ls[d]);\n }\n S[i,j] = alpha_sq * exp( -0.5 * dist_sum);\n }\n }\n }\n\n // Fill upper triangle\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n S[j,i] = S[i,j];\n }\n }\n\n // Create diagonal\n for(i in 1:N) {\n S[i,i] = alpha_sq;\n }\n\n return S;\n }\n\n matrix distance_matrix_on_vectors(int N, vector[] x) {\n matrix[N, N] distmat;\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n distmat[i, j] = square(distance(x[i], x[j]));\n }\n }\n return distmat;\n }\n\n matrix cov_matrix_matern(int N, matrix dist, real ls, real alpha_sq, int cov_id) {\n matrix[N,N] S;\n real dist_ls;\n real sqrt3;\n real sqrt5;\n sqrt3=sqrt(3.0);\n sqrt5=sqrt(5.0);\n\n // For Matern kernel with parameter nu=1/2 (i.e. absolute exponential kernel)\n if (cov_id == 2) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/square(ls);\n S[i,j] = alpha_sq * exp(-1 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu=3/2\n else if (cov_id == 3) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * (1 + sqrt3 * dist_ls) * exp(-sqrt3 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu=5/2\n else if (cov_id == 4) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * (1 + sqrt5 * dist_ls + 5 * pow(dist_ls,2)/3) * exp(-sqrt5 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu tending to infinity (i.e. RBF kernel)\n else if (cov_id == 1) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * exp( -0.5 * pow(dist_ls, 2) );\n }\n }\n }\n\n // Fill upper triangle\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n S[j,i] = S[i,j];\n }\n }\n\n // Create diagonal\n for(i in 1:N) {\n S[i,i] = alpha_sq;\n }\n\n return S;\n }\n\n }\n\n data {\n int<lower=1> D;\n int<lower=1> N;\n vector[D] x[N];\n vector[N] y;\n real<lower=0> ig1;\n real<lower=0> ig2;\n real<lower=0> n1;\n real<lower=0> n2;\n real<lower=0> sigma;\n int kernel_id;\n }\n\n parameters {\n real<lower=0> rho;\n vector<lower=0>[D] rhovec;\n real<lower=0> alpha;\n }\n\n model {\n int cov_id;\n matrix[N, N] cov;\n matrix[N, N] L_cov;\n matrix[N, N] distmat;\n\n // RBF kernel single lengthscale\n if (kernel_id == 1) {\n cov = cov_exp_quad(x, alpha, rho) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n rho ~ inv_gamma(ig1, ig2);\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n // Matern kernel single lengthscale\n else if (kernel_id >= 2 && kernel_id <= 4) {\n if (kernel_id == 2) { cov_id = 2; }\n if (kernel_id == 3) { cov_id = 3; }\n if (kernel_id == 4) { cov_id = 4; }\n\n distmat = distance_matrix_on_vectors(N, x);\n cov = cov_matrix_matern(N, distmat, rho, square(alpha), cov_id) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n rho ~ inv_gamma(ig1, ig2);\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n // RBF kernel with ARD (D-dimensional) lengthscale\n else if (kernel_id == 5) {\n cov_id = 1;\n cov = cov_matrix_ard(N, D, x, rhovec, square(alpha), cov_id) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n for(d in 1:D) {\n rhovec[d] ~ inv_gamma(ig1, ig2);\n }\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n }\n \"\"\"", "def generate_random_single_force_crack_uncoupled_tensor():\n # 1. Generate random single force vector and crack MT:\n # generate random single force vector:\n random_SF_vector = generate_random_single_force_vector()\n # generate crack MT (from Tape 2013, eq. 41 and Fig. 6):\n # To randomly generate lune_perim_angle (from Tape 2013 eq. 41 and Fig. 6):\n # --- To randomly generate lune_perim_angle (from Tape 2013 eq. 41 and Fig. 6) ---:\n # Get between small range:\n theta_lune_sphere = np.random.uniform(-1.,1.)*np.pi/2.\n random_num = random.random()\n if random_num <= 0.5:\n phi_lune_sphere = 0. #np.pi/6. #0. #np.pi/6.\n elif random_num > 0.5:\n phi_lune_sphere = np.pi/3 #-1.*np.pi/6. #np.pi/3 #-1.*np.pi/6.\n # calculate lune_perim_angle, allowing for outside tan(-pi/2->pi/2):\n lune_perim_angle = np.arctan(np.sin(phi_lune_sphere)/np.sin(theta_lune_sphere)) # Generates uniform distribution of lune crack angle in Lune plot space #random.random()*2.*np.pi # Random number in uniform distribution betwen 0 and 2 pi\n # And redistribute evenly everywhere on boundary:\n random_num = random.random()\n if random_num>0.25 and random_num<=0.5:\n lune_perim_angle = lune_perim_angle+np.pi # Allow to use full 2 pi space\n if random_num>0.5 and random_num<=0.75:\n lune_perim_angle = lune_perim_angle+np.pi/2 # Allow to use full 2 pi space\n if random_num>0.75 and random_num<=1.0:\n lune_perim_angle = lune_perim_angle+3*np.pi/2 # Allow to use full 2 pi space\n # --- ---\n # random_num = random.random()\n # if random_num <= 0.5:\n # theta_lune_sphere = random.random()*np.pi/2.\n # elif random_num > 0.5:\n # theta_lune_sphere = -1.*random.random()*np.pi/2.\n # random_num = random.random()\n # if random_num <= 0.5:\n # phi_lune_sphere = np.pi/6.\n # elif random_num > 0.5:\n # phi_lune_sphere = -1.*np.pi/6.\n # lune_perim_angle = np.arctan(np.sin(phi_lune_sphere)/np.sin(theta_lune_sphere)) # Generates uniform distribution of lune crack angle in Lune plot space #random.random()*2.*np.pi # Random number in uniform distribution betwen 0 and 2 pi\n crack_MT_to_rot = ((((4*(np.sin(lune_perim_angle)**2)) + (np.cos(lune_perim_angle)**2))**-0.5)/np.sqrt(3.)) * np.vstack(([np.cos(lune_perim_angle)-(np.sqrt(2)*np.sin(lune_perim_angle)),0.,0.],[0.,np.cos(lune_perim_angle)-(np.sqrt(2)*np.sin(lune_perim_angle)),0.], [0.,0.,np.cos(lune_perim_angle)+(2.*np.sqrt(2)*np.sin(lune_perim_angle))])) # crack moment tensor\n # 2. Randomly rotate crack MT to random orientation:\n # Get a random sample 3-vector on a 3-unit sphere to use to calculate random theta and phi rotation angles:\n a_unnormalised = np.array([np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0)], dtype=float) # Generate 3 indepdendent normal deviates\n a_normalised = a_unnormalised/(np.sum(a_unnormalised**2)**-0.5) # Normallise sample onto unit 3-sphere - As in Muller (1959)\n # And normallise so that vector magnitude = 1:\n a_normalised = a_normalised/((np.sum(a_normalised**2))**0.5)\n x = a_normalised[0]\n y = a_normalised[1]\n z = a_normalised[2]\n theta = np.arccos(z)\n phi = np.arccos(x/np.sin(theta))\n crack_MT_rotated = rot_mt_by_theta_phi(crack_MT_to_rot, theta, phi)\n # 3. Convert crack MT to 6 MT:\n crack_six_MT_rotated = get_six_MT_from_full_MT_array(crack_MT_rotated)\n # And set to correct dimensions (so matrix multiplication in forward model works correctly):\n crack_six_MT_rotated = np.reshape(crack_six_MT_rotated, (6, 1))\n # 4. Split the amplitude of crack to single force randomly:\n random_amp_frac = random.random() # random number between 0. and 1.\n random_SF_vector = random_SF_vector*random_amp_frac\n crack_six_MT_rotated = crack_six_MT_rotated*(1.-random_amp_frac)\n # 5. Finally combine to tensor of length 9:\n random_crack_single_force_uncoupled_tensor = np.vstack((crack_six_MT_rotated, random_SF_vector))\n return random_crack_single_force_uncoupled_tensor, random_amp_frac", "def random():\n np.random.seed(1939)", "def sample_random_architecture(self, dataset_api):\n while True:\n matrix = np.random.choice(\n [0, 1], size=(NUM_VERTICES, NUM_VERTICES))\n matrix = np.triu(matrix, 1)\n ops = np.random.choice(OPS, size=NUM_VERTICES).tolist()\n ops[0] = INPUT\n ops[-1] = OUTPUT\n spec = dataset_api['api'].ModelSpec(matrix=matrix, ops=ops)\n if dataset_api['nb101_data'].is_valid(spec):\n break\n \n self.set_spec({'matrix':matrix, 'ops':ops})", "def mdot(*args):\n r = args[0]\n for a in args[1:]:\n r = N.dot(r,a)\n return r", "def init_random_params(scale, layer_sizes, rs=npr.RandomState(0)):\n return [(scale * rs.randn(m, n), # weight matrix\n scale * rs.randn(n)) # bias vector\n for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]", "def _matvec(x):\n return _normal_matvec(matvec, x)", "def box_mull():\n x1 = 1 - random.random()\n x2 = 1 - random.random()\n return math.sqrt(-2 * math.log(x1)) * math.cos(2 * math.pi * x2)", "def testNorm(self):\n assert(Vector(0, 3, 4).norm() == 5)\n assert(Vector(3, 4).norm() == 5)\n assert Vector(0, 3, 0, 0, 4, 0, size=10).norm() == 5", "def draw_random_u(d):\n mu = np.zeros(d)\n cov = np.eye(d)\n u = multivariate_normal.rvs(mean=mu, cov=cov)\n return u / np.linalg.norm(u)", "def test_feature_computation(self):\n k = [2, 3, 4, 5, 6]\n mn = self.create_chain_model(k)\n d = 4\n\n for i in range(len(k)):\n mn.set_unary_weights(i, np.random.randn(k[i], d))", "def test_inverse_of_linear_vector_transforms(free_alg: Drudge):\n\n dr = free_alg\n p = dr.names\n v = p.v\n\n a = Vec('a')\n b = Vec('b')\n\n defs = [\n dr.define(a, v + 1),\n dr.define(b, v - 1)\n ]\n res = dr.lvt_inv(defs)\n\n assert len(res) == 2\n half = Rational(1, 2)\n one_checked = False\n v_checked = False\n for i in res:\n if i.lhs == 1:\n assert (i - half * a + half * b).simplify() == 0\n one_checked = True\n elif i.lhs == v:\n assert (i - half * a - half * b).simplify() == 0\n v_checked = True\n else:\n assert False\n continue\n\n assert one_checked and v_checked", "def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer" ]
[ "0.6085895", "0.6054807", "0.59700584", "0.5968296", "0.5936185", "0.59123176", "0.5900732", "0.5889028", "0.58634204", "0.58405113", "0.5830244", "0.580792", "0.5787771", "0.57753277", "0.5718604", "0.5715407", "0.5711817", "0.5695124", "0.56888473", "0.5683767", "0.5669956", "0.56483954", "0.5646562", "0.5644628", "0.5633371", "0.5621625", "0.56132585", "0.56085676", "0.5599826", "0.5591243", "0.55711144", "0.55428255", "0.55350137", "0.55187225", "0.54985726", "0.5471707", "0.54583174", "0.54560685", "0.54455376", "0.5409092", "0.5400288", "0.53972006", "0.5389969", "0.5375858", "0.5368657", "0.53670686", "0.536649", "0.5360998", "0.53583646", "0.5350644", "0.53498083", "0.53435314", "0.53422195", "0.53375447", "0.53359616", "0.53341687", "0.53222966", "0.532116", "0.53191775", "0.53185225", "0.5316869", "0.5296866", "0.52966326", "0.5294384", "0.5287138", "0.52866703", "0.5278745", "0.52746755", "0.52741796", "0.5274107", "0.5271097", "0.5263972", "0.5262907", "0.5255674", "0.52553535", "0.52552205", "0.5253801", "0.5249584", "0.5249296", "0.52461505", "0.5244031", "0.5243972", "0.52360374", "0.5235597", "0.52303326", "0.5229319", "0.5227924", "0.5225641", "0.52245593", "0.5223549", "0.5218617", "0.5213112", "0.52113026", "0.5210138", "0.52068996", "0.52019", "0.51990753", "0.51960087", "0.51910955", "0.5184825" ]
0.53885347
43
Implementing scalar versus vector math
def exercise_9(path_to_X_data, path_to_w_data): print("="*30) print("Running exercise_9()") #### YOUR CODE HERE #### # load the X and w data from file into arrays X = numpy.loadtxt('data/X.txt', delimiter=',') w = numpy.loadtxt('data/w.txt', delimiter=',') print(f'X:\n{X}') print(f'w: {w}') #### YOUR CODE HERE #### # Extract the column 0 (x_n1) and column 1 (x_n2) vectors from X x_n1 = X[numpy.array([0,1,2,3,4]), 0] x_n2 = X[numpy.array([0,1,2,3,4]), 1] print(f'x_n1: {x_n1}') print(f'x_n2: {x_n2}') #### YOUR CODE HERE #### w_0 = w[0] w_1 = w[1] scalar_result_0 = w_0 * w_0 * sum(x_n1*x_n1) + 2 * w_0 * w_1 * sum(x_n2 * x_n1) + w_1 * w_1 * sum(x_n2*x_n2) # Use scalar arithmetic to compute the right-hand side of Exercise 3 # (Exercise 1.3 from FCMA p.35) # Set the final value to scalar_result = scalar_result_0 print(f'scalar_result: {scalar_result}') #### YOUR CODE HERE #### # Now you will compute the same result but using linear algebra operators. # (i.e., the left-hand of the equation in Exercise 1.3 from FCMA p.35) # You can compute the values in any linear order you want (but remember, # linear algebra is *NOT* commutative!), however here will require you to # first compute the inner term: X-transpose times X (XX), and then # below you complete the computation by multiplying on the left and right # by w (wXXw) X_transpose = numpy.transpose(X) XX = numpy.dot(X_transpose, X) print(f'XX:\n{XX}') #### YOUR CODE HERE #### # Now you'll complete the computation by multiplying on the left and right # by w to determine the final value: wXXw wXX = numpy.dot(w, XX) wXXw = numpy.dot(wXX, w) print(f'wXXw: {wXXw}') print("DONE exercise_9()") return X, w, x_n1, x_n2, scalar_result, XX, wXXw
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __div__(self, scalar):\n return Vector(self.x / scalar, self.y / scalar)", "def scalar_vector_ext(alpha, v, a, b):\n return [alpha * v[0],\n alpha * v[0] * a + b]", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def __rdiv__(self, scalar):\n return Vector(self.x / scalar, self.y / scalar)", "def scalar_vector_mult(alpha, v):\n return [alpha*x for x in v]", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def test_multiply_scalar(self):\n a = Vector(1, 2)\n c = a * 3\n assert c.x == 3\n assert c.y == 6", "def apply_scalar(vector, scalar):\n new_coordinates = []\n index = 0\n while index < vector.dimension:\n new_value = vector.coordinates[index] * scalar\n new_coordinates.append(new_value)\n index += 1\n new_vector = Vector(new_coordinates)\n return new_vector", "def scalar_multiply(s: float, v: Vector) -> Vector:\n return [s * v_item for v_item in v]", "def scalarMultiplication(self, factor):\n components = self.components() * factor\n return Vector.initializeFromComponents(components)", "def __truediv__(self, other):\n # other is a scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Vector([i / other for i in self.data], self.column)\n # other is not a scalar\n else:\n raise TypeError('Argument is not a number')", "def scalar(self, other):\n return self.x * other.x + self.y * other.y", "def __rmul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def __truediv__(self, scalar):\n return self.div(scalar)", "def vector_magnitude(vec, axis=None):\n return (vec * vec).sum(axis=axis) ** 0.5", "def vector(p0, p1):\n a = p1[0] - p0[0]\n b = p1[1] - p0[1]\n return (a, b)", "def truediv_(self, scalar):\n for idx in range(len(self)):\n self.parameters[idx] /= scalar", "def x(self) -> float:\n return self.A[1] if self.scalar_vector else self.A[0]", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def __mul__(self,a):\n return Vector(self.x*a,self.y*a)\n pass", "def v(x):\n return x*x", "def _scalar_vectorized(scalar, M):\n return scalar[:, np.newaxis, np.newaxis]*M", "def __rtruediv__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other)\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return other / obj\n\n return other * self.inv()", "def __mul__(self, other):\n if isinstance(other, Vector):\n return self.dot(other)\n else:\n raise TypeError(other)", "def norm(vec):\n vel = numpy.sqrt(numpy.dot(vec,vec))\n return vel", "def scalar_function(x, y):\n if x <= y:\n return x*y\n else:\n return x/y", "def __truediv__(self, factor):\n if type(factor) == Vector:\n raise NotImplementedError\n else:\n return Vector([c / factor for c in self.components])", "def vector_component(u, v):\n x = dot_vectors(u, v) / length_vector_sqrd(v)\n return scale_vector(v, x)", "def __mul__(self,other):\n if type(other) is Vector:\n return(self.x*other.x + self.y*other.y + self.z*other.z)\n else:\n return(Vector(self.x*other,self.y*other,self.z*other))", "def __rmul__(self, el2):\n if type(el2) is float or type(el2) is int:\n return vector(el2 * self.x, el2 * self.y, el2 * self.z)\n elif type(el2) is vector:\n return vector(el2.y * self.z - el2.z * self.y,\n el2.z * self.x - el2.x * self.z,\n el2.x * self.y - el2.y * self.x)\n else:\n raise TypeError('Cannot multiply a vector with something'\n 'that is neither a vector, a float or an int')", "def unit(vector: np.array) -> np.array:\n return np.array([*vector]) / np.sqrt((vector * vector).sum(axis=0))", "def __rmul__(self,a):\n return Vector(self.x*a,self.y*a)\n pass", "def __itruediv__(self, scalar):\n return self.div_(scalar)", "def __idiv__(self, other):\r\n T = type(other)\r\n # vec4/=scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n self.x/=other\r\n self.y/=other\r\n self.z/=other\r\n self.w/=other\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for /=\"", "def getNormalizedVector(self):\n return self.scalarMultiplication(self.norm() ** -1.0)", "def __truediv__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other, coerce=False)\n\n if mv:\n return self * other.inv()\n else:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj/other\n newValue = self.value / other\n return self._newMV(newValue)", "def normalize(self):\n return Vector(self.args + []) / self.magnitude()", "def magnitude(v: Vector) -> float:\n return math.sqrt(sum_of_squares(v)) #math.sqrt() is a square root function", "def unit_vector(vector):\n #print 'unit_vector'\n #print vector\n #print type(vector)\n #npvector = np.array(vector)\n return vector / np.linalg.norm(vector)", "def vecnorm(*axes):\n vecsum = 0\n for axis in axes:\n vecsum += axis**2\n return np.sqrt(vecsum)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\r\n return vector / np.linalg.norm(vector)", "def v_o(A,vd):\n return A*vd", "def __rdiv__(self, scalar):\n raise(VetorError, \"Not possible divide a scalar by a vector\")", "def vector(x, y, z):\n return point_or_vector(x,y,z,0.0)", "def vector(self) -> Vector:\n return self._normal * self._distance_from_origin", "def matvec(self, x):\n return self * x", "def __rmul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Vector(other * self.x, other * self.y)\n else:\n raise TypeError(other)", "def scalar_mult(v, u):\n return [v[i] * u[i] for i in range(len(v))]", "def scalar_proj(v, w):\n return vector_dot(v, vector_hat(w))", "def __truediv__(self, other):\n try:\n other = float(other)\n return tuple.__new__(Vec2, (self[0] / other, self[1] / other))\n except TypeError:\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (self[0] / ox, self[1] / oy))", "def __mul__(self, other):\n # print other\n if type(other) == int or type(other) == float:\n return self.scale(other)\n elif type(other) == Vector:\n return self.dot(other)\n else:\n return NotImplemented", "def vector_magnitude(v):\n\n v = np.atleast_2d(v)\n\n return np.sqrt((v**2).sum(axis=1))", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def __truediv__(self, o): \n return self * o.inv()", "def __mul__(self, scale):\n return Vec(self.x * scale, self.y * scale)", "def _unit_vector(pt0, pt1):\n dis_0_to_1 = sqrt((pt0[0] - pt1[0])**2 + (pt0[1] - pt1[1])**2)\n return (pt1[0] - pt0[0]) / dis_0_to_1, \\\n (pt1[1] - pt0[1]) / dis_0_to_1", "def magnitude(vector:tuple)->float:\n return math.sqrt(vector[0] ** 2 + vector[1] ** 2)", "def magni(vector):\n return(np.linalg.norm(vector))", "def scalar_mult(diagram, scalar):\n raise NotImplementedError", "def unit_vector(self,vector):\n\t\tunit_vector_query=0;\n\t\tfor word in vector:\n\t\t\tunit_vector_query += vector[word]*vector[word];\n\t\tunit_vector_query = math.sqrt(unit_vector_query);\n\t\treturn unit_vector_query", "def __truediv__(self, b):\n try:\n b = float(b)\n return Vector(self.x / b, self.y / b)\n except ValueError:\n raise ValueError(\"Right value must be castable to float, was {}\".format(b))", "def __mul__(self, other):\r\n\r\n T = type(other)\r\n # vec4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x*other, self.y*other, self.z*other, self.w*other)\r\n # vec4*vec4\r\n if isinstance(other, vec4):\r\n return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w\r\n # unsupported\r\n else:\r\n # Try to delegate the operation to the other operand\r\n if getattr(other,\"__rmul__\",None)!=None:\r\n return other.__rmul__(self)\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"", "def __mul__(self, other):\n x = self.x * other\n y = self.y * other\n return vec(x, y)", "def __mul__(self, other):\n return Vec2d(self.v[0] * other, self.v[1] * other)", "def unit_vector(vector):\n return 0 if vector[0] == 0 else vector[0]/abs(vector[0]), 0 if vector[1] == 0 else vector[1]/abs(vector[1])", "def __call__(self, t=1 / 2):\n return (t * self.vector)(self.p1)", "def __pow__(self, other):\n if isinstance(other, Vector):\n a, b, c = self._ar\n d, e, f = other._ar\n c1 = numpy.linalg.det(numpy.array(((b, c), (e, f))))\n c2 = -numpy.linalg.det(numpy.array(((a, c), (d, f))))\n c3 = numpy.linalg.det(numpy.array(((a, b), (d, e))))\n return Vector(c1, c2, c3)\n else:\n a = self._ar * numpy.array(other)\n return Vector(a)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def scalar_function(x, y):\n #Your code here\n if x<=y:\n fs = x*y\n else:\n fs = x/y\n return fs\n raise NotImplementedError", "def custom_np_function(vector):\n summation = 0\n for item in vector:\n summation += item ** 2\n return summation", "def scalarmul(scalar,vector):\r\n result = [[0] for row in range(len(vector))]\r\n # creates a list full of 0s with the same lenght of the vector\r\n for z in range(len(vector)):\r\n # for loop which continues as long as there are more elements left in the given vector \r\n result[z] = scalar*vector[z]\r\n # the list full of 0s is replaced by the product of each element in the given vector and the the scalar \r\n return result", "def test_vector2():\n\n input_scalar = ECScalar(\n bytes.fromhex(\"4b66e9d4d1b4673c5ad22691957d6af5c11b6421e0ea01d42ca4169e7918ba0d\")\n )\n input_u_coordinate = ECPoint(\n bytes.fromhex(\"e5210f12786811d3f4b7959d0538ae2c31dbe7106fc03c3efc4cd549c715a493\")\n )\n\n output_u_coordinate = ECPoint(\n bytes.fromhex(\"95cbde9476e8907d7aade45cb4b873f88b595a68799fa152e6f8f7647aac7957\")\n )\n\n assert x25519_scalarmult(input_scalar, input_u_coordinate) == output_u_coordinate", "def unit_vector(self,vector):\n return vector / np.linalg.norm(vector)", "def normalise(vectorA):\r\n normal = 0\r\n for i in range(len(vectorA)):\r\n normal += eval(vectorA[i])*eval(vectorA[i])\r\n return round(math.sqrt(normal),2)", "def __truediv__(self, scalar: float):\n if not (isinstance(scalar, float) or isinstance(scalar, int)):\n raise TypeError('scalar must be an scalar type (integer or float')\n return Quaternion(self.__real / scalar, self.__img / scalar)", "def unit_vector(v):\n h = ((v[0]**2)+(v[1]**2))**0.5\n if h == 0:\n h = 0.000000000000001\n ua = v[0] / h\n ub = v[1] / h\n return (ua, ub)", "def __rtruediv__(self, other):\n try:\n other = float(other)\n return tuple.__new__(Vec2, (other / self[0], other / self[1]))\n except TypeError:\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (ox / self[0], oy / self[1]))", "def to_scalar(self, v):\n raise NotImplementedError('to_scalar')", "def unit_vector(vector):\n assert(vector != [0,0])\n return vector / np.linalg.norm(vector)", "def __rtruediv__(self, other):\r\n return other * self.reciprocal()", "def l2(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n\n return (u - v) ** 2", "def normal_vector(origin, vectors):\n return np.cross(vectors[0] - origin, vectors[1] - origin)", "def test_mul():\n assert_equal(Vector(3, 1) * 2, Vector(6, 2))\n assert_equal(2 * Vector(3, 1), Vector(6, 2))", "def _get_unit_vector(self, v):\n return v / np.linalg.norm(v)", "def sum_of_squares(v: Vector) -> float:\n return dot(v,v)", "def test_vector1():\n\n input_scalar = ECScalar(\n bytes.fromhex(\"a546e36bf0527c9d3b16154b82465edd62144c0ac1fc5a18506a2244ba449ac4\")\n )\n input_u_coordinate = ECPoint(\n bytes.fromhex(\"e6db6867583030db3594c1a424b15f7c726624ec26b3353b10a903a6d0ab1c4c\")\n )\n\n output_u_coordinate = ECPoint(\n bytes.fromhex(\"c3da55379de9c6908e94ea4df28d084f32eccf03491c71f754b4075577a28552\")\n )\n\n assert x25519_scalarmult(input_scalar, input_u_coordinate) == output_u_coordinate", "def apply(self, vec):\n return self._disp_over_m * vec - self._gp * ( # pylint: disable = E1130\n self._gp @ vec / self.L ** self.ndim\n )", "def test_scalar_vector_cmp(self, _, opType, op):\n utils.compare_tracing_methods(\n SimpleScalarVectorCmpModule(opType, 0.5),\n torch.randn(3, 4, 5),\n fusible_ops={op},\n )" ]
[ "0.7258965", "0.70950174", "0.6895658", "0.6889606", "0.6887228", "0.68746805", "0.68746805", "0.6856566", "0.6807518", "0.67988276", "0.67679286", "0.67590684", "0.6742905", "0.6713513", "0.6690686", "0.6619376", "0.66111094", "0.6601558", "0.6597171", "0.6592091", "0.65902984", "0.65699285", "0.6564546", "0.6557989", "0.6555132", "0.65341425", "0.65314984", "0.65293187", "0.6511927", "0.6488984", "0.64669734", "0.645226", "0.64381665", "0.6423828", "0.6419599", "0.6417156", "0.64054954", "0.6379831", "0.637561", "0.636942", "0.6357435", "0.63549715", "0.63549715", "0.63549715", "0.6352467", "0.63503337", "0.6349909", "0.63447636", "0.63342965", "0.6330111", "0.6310535", "0.6307234", "0.6302576", "0.6302269", "0.6301286", "0.63008845", "0.6292517", "0.6291623", "0.62836766", "0.62812096", "0.6278613", "0.6273168", "0.62619716", "0.62493384", "0.6246865", "0.6236135", "0.6221196", "0.6198679", "0.61960316", "0.61932075", "0.6190172", "0.6190172", "0.6190172", "0.6190172", "0.6190172", "0.6190172", "0.6190172", "0.6190172", "0.6190172", "0.6190172", "0.6190172", "0.6188131", "0.6178241", "0.61776936", "0.6173636", "0.6167168", "0.6158593", "0.6152425", "0.6151024", "0.61456394", "0.614458", "0.61436224", "0.612105", "0.6115311", "0.6108713", "0.6107518", "0.61050224", "0.6104339", "0.61030203", "0.60873044", "0.6085389" ]
0.0
-1
extends the init_buffer of OffsetColorProgram class by creating the additional carry flag VBO
def _init_buffers(self, v, n, _): super()._init_buffers(v, n, _) self.vbos.append(gl.glGenBuffers(1)) # init VBO 2 - dynamic color data gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3]) loc = self.get_attribute_location("carried") gl.glEnableVertexAttribArray(loc) gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0)) gl.glVertexAttribDivisor(loc, 1) gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def setupVAO(self, gpuShape):\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_ARRAY_BUFFER, gpuShape.vbo)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gpuShape.ebo)\n\n # 3d vertices + rgb color specification => 3*4 + 3*4 = 24 bytes\n position = glGetAttribLocation(self.shaderProgram, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n color = glGetAttribLocation(self.shaderProgram, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n\n # Unbinding current vao\n glBindVertexArray(0)", "def _init_plot_buffer(self, configuration):\n if not isinstance(configuration, dict):\n configuration = { 'length': configuration }\n\n # initialize vao/vbo\n vao, vbo = util.VAO(), util.VBO()\n\n # put kernel function into vertex shader\n vertex_shader_kernel = open(SHADER_DIR+'/data.vert.glsl').read()\n if configuration['kernel'] is not None:\n vertex_shader_kernel = vertex_shader_kernel.replace(\n self.KERNEL_PLACEHOLDER,\n configuration['kernel'])\n\n shader = util.Shader(\n vertex=vertex_shader_kernel,\n geometry=open(SHADER_DIR+'/data.geom.glsl').read(),\n fragment=open(SHADER_DIR+'/data.frag.glsl').read(),\n link=True\n )\n norm = configuration.get('norm', float)\n buffer_configuration = {\n 'byte_count': configuration['length'] * 4,\n 'vertex_count': configuration['length']/2,\n 'point_base_color': configuration.get('point_base_color', [0,0,0.5,1]),\n 'point_size': configuration.get('point_size', norm(2.0/configuration['length'])),\n 'vao': vao,\n 'vbo': vbo,\n 'shader': shader\n }\n\n # uniforms\n shader.uniform('mat_plane', self._mat_plot)\n shader.uniform('geometry_color', buffer_configuration['point_base_color'])\n shader.uniform('dot_size', buffer_configuration['point_size'])\n\n # configure vbo\n with vbo.get(0):\n vertex_position = shader.attributeLocation('vertex_position')\n glBufferData(GL_ARRAY_BUFFER, buffer_configuration['byte_count'], None, GL_STATIC_DRAW)\n with vao:\n glVertexAttribPointer(vertex_position, 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n return buffer_configuration", "def setupVAO(self, gpuShape):\n\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_ARRAY_BUFFER, gpuShape.vbo)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gpuShape.ebo)\n\n # 3d vertices + rgb color specification => 3*4 + 3*4 = 24 bytes\n position = glGetAttribLocation(self.shaderProgram, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n color = glGetAttribLocation(self.shaderProgram, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n\n # Unbinding current vao\n glBindVertexArray(0)", "def setupVAO(self, gpuShape):\n\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_ARRAY_BUFFER, gpuShape.vbo)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gpuShape.ebo)\n\n # 3d vertices + rgb color specification => 3*4 + 3*4 = 24 bytes\n position = glGetAttribLocation(self.shaderProgram, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n color = glGetAttribLocation(self.shaderProgram, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n\n # Unbinding current vao\n glBindVertexArray(0)", "def setup(self, gl_buffers, color_vbo, pos_vbo, partNumber):\n self.gl_objects = gl_buffers\n self.color_vbo, self.pos_vbo = color_vbo, pos_vbo\n self.partNumber = partNumber", "def prepareUniformBuffers(self):\n # Vertex shader uniform buffer block\n uboVSSize = sum([glm.sizeof(ubo) for ubo in self.uboVS.values()])\n bufferInfo = vk.VkBufferCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n size = uboVSSize,\n # This buffer will be used as a uniform buffer\n usage = vk.VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT\n )\n # Create a new buffer\n self.uniformBufferVS['buffer'] = vk.vkCreateBuffer(self.device, bufferInfo, None)\n # Get memory requirements including size, alignment and memory type\n memReqs = vk.vkGetBufferMemoryRequirements(self.device, self.uniformBufferVS['buffer'])\n # Get the memory type index that supports host visibile memory access\n # Most implementations offer multiple memory types and selecting the correct one to allocate memory from is crucial\n # We also want the buffer to be host coherent so we don't have to flush (or sync after every update.\n #Note: This may affect performance so you might not want to do this in a real world application that updates buffers on a regular base\n allocInfo = vk.VkMemoryAllocateInfo(\n sType = vk.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n pNext = None,\n allocationSize = memReqs.size,\n memoryTypeIndex = self.vulkanDevice.getMemoryType(memReqs.memoryTypeBits, vk.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | vk.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)\n )\n # Allocate memory for the uniform buffer\n self.uniformBufferVS['memory'] = vk.vkAllocateMemory(self.device, allocInfo, None)\n # Bind memory to buffer\n vk.vkBindBufferMemory(self.device, self.uniformBufferVS['buffer'], self.uniformBufferVS['memory'], 0)\n # Store information in the uniform's descriptor that is used by the descriptor set\n self.uniformBufferVS['descriptor'] = vk.VkDescriptorBufferInfo(\n buffer = self.uniformBufferVS['buffer'],\n offset = 0,\n range = uboVSSize\n )\n\n self.updateUniformBuffers()", "def __init__(self, shape, pts, texcoords, faces, normals=None, smooth=True):\r\n super(Buffer, self).__init__()\r\n\r\n # Uniform variables all in one array!\r\n self.unib = (c_float * 12)(0.0, 0.0, 0.0,\r\n 0.5, 0.5, 0.5,\r\n 1.0, 1.0, 0.0,\r\n 0.0, 0.0, 0.0)\r\n \"\"\" pass to shader array of vec3 uniform variables:\r\n\r\n ===== ============================ ==== ==\r\n vec3 description python\r\n ----- ---------------------------- -------\r\n index from to\r\n ===== ============================ ==== ==\r\n 0 ntile, shiny, blend 0 2\r\n 1 material 3 5\r\n 2 umult, vmult, point_size 6 8\r\n 3 u_off, v_off (only 2 used) 9 10\r\n ===== ============================ ==== ==\r\n \"\"\"\r\n #self.shape = shape\r\n self.textures = []\r\n pts = np.array(pts, dtype=float)\r\n texcoords = np.array(texcoords, dtype=float)\r\n faces = np.array(faces)\r\n\r\n if normals == None: #i.e. normals will only be generated if explictly None\r\n LOGGER.debug('Calculating normals ...')\r\n\r\n normals = np.zeros(pts.shape, dtype=float) #empty array rights size\r\n\r\n fv = pts[faces] #expand faces with x,y,z values for each vertex\r\n #cross product of two edges of triangles\r\n fn = np.cross(fv[:][:][:,1] - fv[:][:][:,0], fv[:][:][:,2] - fv[:][:][:,0])\r\n fn = Utility.normalize_v3(fn)\r\n normals[faces[:,0]] += fn #add up all normal vectors for a vertex\r\n normals[faces[:,1]] += fn\r\n normals[faces[:,2]] += fn\r\n normals = Utility.normalize_v3(normals)\r\n else:\r\n normals = np.array(normals)\r\n \r\n # keep a copy for speeding up the collision testing of ElevationMap\r\n self.vertices = pts\r\n self.normals = normals\r\n self.tex_coords = texcoords\r\n self.indices = faces\r\n self.material = (0.5, 0.5, 0.5, 1.0)\r\n\r\n # Pack points,normals and texcoords into tuples and convert to ctype floats.\r\n n_verts = len(pts)\r\n if len(texcoords) != n_verts:\r\n if len(normals) != n_verts:\r\n self.N_BYTES = 12 # only use pts\r\n self.array_buffer = c_floats(pts.reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 24 # use pts and normals\r\n self.array_buffer = c_floats(np.concatenate((pts, normals),\r\n axis=1).reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 32 # use all three NB doesn't check that normals are there\r\n self.array_buffer = c_floats(np.concatenate((pts, normals, texcoords),\r\n axis=1).reshape(-1).tolist())\r\n\r\n self.ntris = len(faces)\r\n self.element_array_buffer = c_shorts(faces.reshape(-1))\r\n from pi3d.Display import Display\r\n self.disp = Display.INSTANCE # rely on there always being one!\r", "def buildCommandBuffers(self):\n cmdBufInfo = vk.VkCommandBufferBeginInfo(\n sType = vk.VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n pNext = None\n )\n # Set clear values for all framebuffer attachments with loadOp set to clear\n # We use two attachments (color and depth) that are cleared at the start of the subpass and as such we need to set clear values for both\n clearValues = []\n clearValue = vk.VkClearValue(\n color = [[ 0.0, 0.0, 0.2, 1.0 ]]\n )\n clearValues.append(clearValue)\n clearValue = vk.VkClearValue(\n depthStencil = [1.0, 0 ]\n )\n clearValues.append(clearValue)\n offset = vk.VkOffset2D(x = 0, y = 0)\n extent = vk.VkExtent2D(width = self.width, height = self.height)\n renderArea = vk.VkRect2D(offset = offset, extent = extent)\n for i in range(len(self.drawCmdBuffers)):\n renderPassBeginInfo = vk.VkRenderPassBeginInfo(\n sType = vk.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,\n pNext = None,\n renderPass = self.renderPass,\n renderArea = renderArea,\n clearValueCount = 2,\n pClearValues = clearValues,\n # Set target frame buffer\n framebuffer = self.frameBuffers[i]\n )\n # wait this buffer to be released\n #vk.vkWaitForFences(self.device, 1, [self.waitFences[i]], vk.VK_TRUE, vk.UINT64_MAX)\n # rebuild this buffer\n vk.vkBeginCommandBuffer(self.drawCmdBuffers[i], cmdBufInfo)\n # Start the first sub pass specified in our default render pass setup by the base class\n # This will clear the color and depth attachment\n vk.vkCmdBeginRenderPass(self.drawCmdBuffers[i], renderPassBeginInfo, vk.VK_SUBPASS_CONTENTS_INLINE)\n # Update dynamic viewport state\n viewport = vk.VkViewport(\n height = float(self.height),\n width = float(self.width),\n minDepth = 0.0,\n maxDepth = 1.0\n )\n vk.vkCmdSetViewport(self.drawCmdBuffers[i], 0, 1, [viewport])\n # Update dynamic scissor state\n offsetscissor = vk.VkOffset2D(x = 0, y = 0)\n extentscissor = vk.VkExtent2D(width = self.width, height = self.height)\n scissor = vk.VkRect2D(offset = offsetscissor, extent = extentscissor)\n vk.vkCmdSetScissor(self.drawCmdBuffers[i], 0, 1, [scissor])\n\n # Bind descriptor sets describing shader binding points\n vk.vkCmdBindDescriptorSets(self.drawCmdBuffers[i], vk.VK_PIPELINE_BIND_POINT_GRAPHICS, self.pipelineLayout, 0, 1, [self.descriptorSet], 0, None)\n # Bind the rendering pipeline\n # The pipeline (state object) contains all states of the rendering pipeline, binding it will set all the states specified at pipeline creation time\n vk.vkCmdBindPipeline(self.drawCmdBuffers[i], vk.VK_PIPELINE_BIND_POINT_GRAPHICS, self.pipeline);\n # Bind triangle vertex buffer (contains position and colors)\n offsets = [ 0 ]\n vk.vkCmdBindVertexBuffers(self.drawCmdBuffers[i], 0, 1, [self.vertices['buffer']], offsets)\n # Bind triangle index buffer\n vk.vkCmdBindIndexBuffer(self.drawCmdBuffers[i], self.indices['buffer'], 0, vk.VK_INDEX_TYPE_UINT32)\n # Draw indexed triangle\n vk.vkCmdDrawIndexed(self.drawCmdBuffers[i], self.indices['count'], 1, 0, 0, 1)\n # uncomment for imgui support\n self.drawUI(self.drawCmdBuffers[i])\n vk.vkCmdEndRenderPass(self.drawCmdBuffers[i])\n # Ending the render pass will add an implicit barrier transitioning the frame buffer color attachment to\n # VK_IMAGE_LAYOUT_PRESENT_SRC_KHR for presenting it to the windowing system\n vk.vkEndCommandBuffer(self.drawCmdBuffers[i])", "def _initialize_buffers(self) -> None:", "def create_buffers(self):", "def screen_vao(cls, gl, program):\n\n vbo = [\n -1.0, -1.0,\n +1.0, -1.0,\n -1.0, +1.0,\n +1.0, +1.0,\n ]\n vbo = np.array(vbo).astype(np.float32)\n vbo = [(gl.buffer(vbo), \"2f\", \"in_pos\")]\n\n ibo = [0, 1, 2, 1, 2, 3]\n ibo = np.array(ibo).astype(np.int32)\n ibo = gl.buffer(ibo)\n\n vao = gl.vertex_array(program, vbo, ibo)\n return vao", "def _build_bufferview(buffer, target, byte_length, byte_offset, byte_stride):\n new_buffer_view = {\n \"buffer\": buffer,\n \"byteLength\": byte_length,\n \"byteOffset\": byte_offset\n }\n\n properties_keys = [\"target\", \"byteStride\"]\n properties_values = [target, byte_stride]\n\n for key, val in zip(properties_keys, properties_values):\n if val is not None:\n new_buffer_view[key] = target\n\n return new_buffer_view", "def __init__(self, attributes, index=None, usage=GL.GL_STATIC_DRAW):\n\n # create vertex array object, bind it\n self.glid = GL.glGenVertexArrays(1)\n GL.glBindVertexArray(self.glid)\n self.buffers = [] # we will store buffers in a list\n nb_primitives, size = 0, 0\n\n # load buffer per vertex attribute (in list with index = shader layout)\n for loc, data in enumerate(attributes):\n if data is not None:\n # bind a new vbo, upload its data to GPU, declare size and type\n self.buffers.append(GL.glGenBuffers(1))\n data = np.array(data, np.float32, copy=False) # ensure format\n nb_primitives, size = data.shape\n GL.glEnableVertexAttribArray(loc)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ARRAY_BUFFER, data, usage)\n GL.glVertexAttribPointer(loc, size, GL.GL_FLOAT, False, 0, None)\n\n # optionally create and upload an index buffer for this object\n self.draw_command = GL.glDrawArrays\n self.arguments = (0, nb_primitives)\n if index is not None:\n self.buffers += [GL.glGenBuffers(1)]\n index_buffer = np.array(index, np.int32, copy=False) # good format\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, index_buffer, usage)\n self.draw_command = GL.glDrawElements\n self.arguments = (index_buffer.size, GL.GL_UNSIGNED_INT, None)", "def initializeGL(self):\n # background color\n gl.glClearColor(0, 0, 0, 0)\n # create a Vertex Buffer Object with the specified data\n self.vbo = glvbo.VBO(self.data)\n # compile the vertex shader\n vs = compile_vertex_shader(VS)\n # compile the fragment shader\n fs = compile_fragment_shader(FS)\n # compile the vertex shader\n self.shaders_program = link_shader_program(vs, fs)\n vs2 = compile_vertex_shader(VS2)\n fs2 = compile_fragment_shader(FS2)\n self.my_shaders_program = link_shader_program(vs2, fs2)", "def __init__(self, attributes, index=None, usage=GL.GL_STATIC_DRAW):\n\n # create vertex array object, bind it\n self.glid = GL.glGenVertexArrays(1)\n GL.glBindVertexArray(self.glid)\n self.buffers = [] # we will store buffers in a list\n nb_primitives, size = 0, 0\n\n # load a buffer per initialized vertex attribute (=dictionary)\n for loc, data in enumerate(attributes):\n if data is None:\n continue\n\n # bind a new vbo, upload its data to GPU, declare its size and type\n self.buffers += [GL.glGenBuffers(1)]\n data = np.array(data, np.float32, copy=False)\n nb_primitives, size = data.shape\n GL.glEnableVertexAttribArray(loc) # activates for current vao only\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ARRAY_BUFFER, data, usage)\n GL.glVertexAttribPointer(loc, size, GL.GL_FLOAT, False, 0, None)\n\n # optionally create and upload an index buffer for this object\n self.draw_command = GL.glDrawArrays\n self.arguments = (0, nb_primitives)\n if index is not None:\n self.buffers += [GL.glGenBuffers(1)]\n index_buffer = np.array(index, np.int32, copy=False)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, index_buffer, usage)\n self.draw_command = GL.glDrawElements\n self.arguments = (index_buffer.size, GL.GL_UNSIGNED_INT, None)\n\n # cleanup and unbind so no accidental subsequent state update\n GL.glBindVertexArray(0)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0)", "def prepare_attrib_mapping(self, primitive):\n buffer_info = []\n for name, accessor in primitive.attributes.items():\n info = VBOInfo(*accessor.info())\n info.attributes.append((name, info.components))\n\n if buffer_info and buffer_info[-1].buffer_view == info.buffer_view:\n if buffer_info[-1].interleaves(info):\n buffer_info[-1].merge(info)\n continue\n\n buffer_info.append(info)\n\n return buffer_info", "def glGetBufferPointerv( baseOperation, target, pname, params=None ):\n if params is None:\n size = glGetBufferParameteriv( target, GL_BUFFER_SIZE )\n data = arrays.ArrayDatatype.zeros( (size,), GL_UNSIGNED_BYTE )\n baseOperation( target, pname, ctypes.byref( data ) )\n return data\n else:\n return baseOperation( target, pname, params )", "def _select(self):\r\n opengles.glBindBuffer(GL_ARRAY_BUFFER, self.vbuf)\r\n opengles.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ebuf)", "def _create_bufferview(self, name, buffer, byte_length, byte_offset, byte_stride, target=None):\n new_buffer_view = self._build_bufferview(buffer=self._resolve_mapping(inp=buffer, mapping=self.buffers_map),\n target=target,\n byte_length=byte_length,\n byte_offset=byte_offset,\n byte_stride=byte_stride)\n\n self.bufferViews.append(new_buffer_view)\n\n if name:\n self.bufferViews_map[name] = self._last_index(self.bufferViews)\n\n return self._last_index(self.bufferViews)", "def pc_output_buffers_full(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_output_buffers_full(self, *args)", "def _make_buffer(self, width, height):\n fb_prop = p3d.FrameBufferProperties(p3d.FrameBufferProperties.get_default())\n fb_prop.set_multisamples(self._multisamples)\n fb_prop.set_srgb_color(self._srgb_color)\n\n self._buffer = self._engine.make_output(\n self._pipe, name=\"offscreen\", sort=0,\n fb_prop=p3d.FrameBufferProperties.get_default(),\n win_prop=p3d.WindowProperties(size=(width, height)),\n flags=p3d.GraphicsPipe.BFRefuseWindow)\n\n self._region = self._buffer.make_display_region()\n\n self._depth_tex = p3d.Texture()\n self._depth_tex.setFormat(p3d.Texture.FDepthComponent)\n self._buffer.addRenderTexture(\n self._depth_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPDepth)\n\n self._color_tex = p3d.Texture()\n self._color_tex.setFormat(p3d.Texture.FRgba8)\n self._buffer.addRenderTexture(\n self._color_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPColor)", "def initializeGL(self):\n # background color\n gl.glClearColor(0.8, 0.8, 0.8, 0)\n # Make initial data array.\n # compile the vertex shader\n vs = compile_shader(VERTEX, gl.GL_VERTEX_SHADER)\n # compile the geometry shader\n gs = compile_shader(GEOMETRY, gl.GL_GEOMETRY_SHADER)\n # compile the fragment shader\n fs = compile_shader(FRAGMENT, gl.GL_FRAGMENT_SHADER)\n # Link the programs.\n self.render_program = link_shaders(vs, gs, fs)\n # Compile the compute shader\n cs = compile_shader(COMPUTE, gl.GL_COMPUTE_SHADER)\n # Create the compute shader buffers.\n self.makeBuffers()\n #self.vbo = glvbo.VBO(self.attributes)\n self.vbo = gl.glGenBuffers(1)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, self.attributes.nbytes,\n self.attributes, gl.GL_DYNAMIC_COPY)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)\n\n self.ssbo = gl.glGenBuffers(1)\n gl.glBindBufferBase(gl.GL_SHADER_STORAGE_BUFFER, 1, self.ssbo)\n gl.glBufferData(gl.GL_SHADER_STORAGE_BUFFER, self.velocities.nbytes,\n self.velocities, gl.GL_DYNAMIC_COPY)\n self.compute_program = link_shaders(cs)", "def init_shader(self):\r\n self.attrib_locs = {\r\n \"mc_vertex\": -1,\r\n \"vert_tex_coord\": -1,\r\n }\r\n self.uniform_locs = {\r\n \"model_matrix\": -1,\r\n \"view_matrix\": -1,\r\n \"proj_matrix\": -1,\r\n }\r\n vert_prog = self._compile_shader(ORTH_VERT_SOURCE, gl.GL_VERTEX_SHADER)\r\n frag_prog = self._compile_shader(\r\n ORTH_FRAG_SOURCE, gl.GL_FRAGMENT_SHADER)\r\n self.shader = gl.glCreateProgram()\r\n gl.glAttachShader(self.shader, vert_prog)\r\n gl.glAttachShader(self.shader, frag_prog)\r\n gl.glLinkProgram(self.shader)\r\n assert (gl.glGetProgramiv(self.shader, gl.GL_LINK_STATUS) ==\r\n gl.GL_TRUE), (\r\n \"Error: %s\" % (gl.glGetProgramInfoLog(self.shader)))\r\n\r\n self.attrib_locs = {\r\n name: gl.glGetAttribLocation(self.shader, name)\r\n for name in self.attrib_locs\r\n }\r\n self.uniform_locs = {\r\n name: gl.glGetUniformLocation(self.shader, name)\r\n for name in self.uniform_locs\r\n }\r\n\r\n # Load vertices for final ortho view\r\n self.vao = gl.glGenVertexArrays(1)\r\n gl.glBindVertexArray(self.vao)\r\n self.buffers['mc_vertex'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['mc_vertex'])\r\n\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(ORTH_VERTICES),\r\n ORTH_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['mc_vertex'], 4,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['mc_vertex'])\r\n\r\n self.buffers['vert_tex_coord'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['vert_tex_coord'])\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(TEXTURE_VERTICES),\r\n TEXTURE_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['vert_tex_coord'], 2,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['vert_tex_coord'])\r\n gl.glActiveTexture(gl.GL_TEXTURE0)", "def __init__(self, camera=None, light=None, name=\"\", z=0.1):\r\n super(Canvas, self).__init__(camera, light, name, x=0.0, y=0.0, z=0.0,\r\n rx=0.0, ry=0.0, rz=0.0, sx=1.0, sy=1.0, sz=1.0,\r\n cx=0.0, cy=0.0, cz=0.0)\r\n self.ttype = GL_TRIANGLES\r\n self.verts = []\r\n self.norms = []\r\n self.texcoords = []\r\n self.inds = []\r\n self.depth = z\r\n\r\n ww = 20.0\r\n hh = 20.0\r\n\r\n self.verts = ((-ww, -hh, z), (0.0, hh, z), (ww, -hh, z))\r\n self.norms = ((0, 0, -1), (0, 0, -1), (0, 0, -1))\r\n self.texcoords = ((0.0, 0.0), (0.5, 1.0), (1.0, 0.0))\r\n\r\n self.inds = ((0, 1, 2), ) #python quirk: comma for tuple with only one val\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.verts, self.texcoords, self.inds, self.norms))", "def __init__(self, buffer_size, random_seed=None):\n self.buffer_size = buffer_size\n self.count = 0\n self.oldPos = 0\n self.currPos = 0\n self.full = False\n self.buffer = []\n self.featCount = 3\n random.seed(random_seed)\n self.useSubBuffer = False", "def re_init_buffer(self):\n #~ print(self.verts)\n #~ print(self.texcoords)\n #~ print(self.inds)\n self.shape.buf[0].re_init(pts=np.array(self.verts, 'f'),texcoords=np.array(self.texcoords, 'f'))", "def pc_input_buffers_full(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_input_buffers_full(self, *args)", "def pc_output_buffers_full_var(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_output_buffers_full_var(self, *args)", "def set_min_output_buffer(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_set_min_output_buffer(self, *args)", "def merge(self, bufr, x=0.0, y=0.0, z=0.0,\r\n rx=0.0, ry=0.0, rz=0.0,\r\n sx=1.0, sy=1.0, sz=1.0):\r\n if not isinstance(bufr, list) and not isinstance(bufr, tuple):\r\n buflist = [[bufr, x, y, z, rx, ry, rz, sx, sy, sz]]\r\n else:\r\n buflist = bufr\r\n\r\n for b in buflist:\r\n if not(type(b[0]) is Buffer):\r\n bufr = b[0].buf[0]\r\n else:\r\n bufr = b[0]\r\n\r\n #assert shape.ttype == GL_TRIANGLES # this is always true of Buffer objects\r\n assert len(bufr.vertices) == len(bufr.normals)\r\n\r\n if VERBOSE:\r\n print(\"Merging\", bufr.name)\r\n\r\n original_vertex_count = len(self.vertices)\r\n\r\n for v in range(0, len(bufr.vertices)):\r\n # Scale, offset and store vertices\r\n vx, vy, vz = rotate_vec(b[4], b[5], b[6], bufr.vertices[v])\r\n self.vertices.append((vx * b[7] + b[1], vy * b[8] + b[2], vz * b[9] + b[3]))\r\n\r\n # Rotate normals\r\n self.normals.append(rotate_vec(b[4], b[5], b[6], bufr.normals[v]))\r\n\r\n self.tex_coords.extend(bufr.tex_coords)\r\n\r\n ctypes.restype = ctypes.c_short # TODO: remove this side-effect.\r\n indices = [(i[0] + original_vertex_count, i[1] + original_vertex_count,\r\n i[2] + original_vertex_count) for i in bufr.indices]\r\n self.indices.extend(indices)\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))", "def __init__(self, texture, texcoords, enabled=True):\n vfunc = Function(\"\"\"\n void pass_coords() {\n $v_texcoords = $texcoords;\n }\n \"\"\")\n ffunc = Function(\"\"\"\n void apply_texture() {\n if ($enabled == 1) {\n gl_FragColor *= texture2D($u_texture, $texcoords);\n }\n }\n \"\"\")\n self._texcoord_varying = Varying('v_texcoord', 'vec2')\n vfunc['v_texcoords'] = self._texcoord_varying\n ffunc['texcoords'] = self._texcoord_varying\n self._texcoords_buffer = VertexBuffer(\n np.zeros((0, 2), dtype=np.float32)\n )\n vfunc['texcoords'] = self._texcoords_buffer\n super().__init__(vcode=vfunc, vhook='pre', fcode=ffunc)\n\n self.enabled = enabled\n self.texture = texture\n self.texcoords = texcoords", "def __init__(self, texture, texcoords, enabled=True):\n vfunc = Function(\"\"\"\n void pass_coords() {\n $v_texcoords = $texcoords;\n }\n \"\"\")\n ffunc = Function(\"\"\"\n void apply_texture() {\n if ($enabled == 1) {\n gl_FragColor *= texture2D($u_texture, $texcoords);\n }\n }\n \"\"\")\n self._texcoord_varying = Varying('v_texcoord', 'vec2')\n vfunc['v_texcoords'] = self._texcoord_varying\n ffunc['texcoords'] = self._texcoord_varying\n self._texcoords_buffer = VertexBuffer(\n np.zeros((0, 2), dtype=np.float32)\n )\n vfunc['texcoords'] = self._texcoords_buffer\n super().__init__(vcode=vfunc, vhook='pre', fcode=ffunc)\n\n self.enabled = enabled\n self.texture = texture\n self.texcoords = texcoords", "def __init__(self, camera=None, light=None, name=\"\",\r\n x=0.0, y=0.0, z=0.0,\r\n rx=0.0, ry=0.0, rz=0.0,\r\n sx=1.0, sy=1.0, sz=1.0,\r\n cx=0.0, cy=0.0, cz=0.0):\r\n super(MergeShape, self).__init__(camera, light, name, x, y, z,\r\n rx, ry, rz, sx, sy, sz, cx, cy, cz)\r\n\r\n if VERBOSE:\r\n print(\"Creating Merge Shape ...\")\r\n\r\n self.vertices = []\r\n self.normals = []\r\n self.tex_coords = []\r\n self.indices = [] #stores all indices for single render\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))", "def __init__(self, *args, **kwargs):\n _gdi_.BufferedPaintDC_swiginit(self,_gdi_.new_BufferedPaintDC(*args, **kwargs))\n if len(args) > 1: self.__bmp = args[1]", "def _prepare_gl(self):\n # init gl\n shader = Shader()\n shader.attachShader(GL_VERTEX_SHADER, VERTEX_SHADER)\n shader.attachShader(GL_FRAGMENT_SHADER, FRAGMENT_SHADER)\n shader.linkProgram()\n self.shader = shader\n\n self._gl_uniforms = {}\n # cache uniform locations (much faster)\n self._gl_uniforms['tex'] = self._uloc('tex')\n self._gl_uniforms['color'] = self._uloc('color')\n self._gl_uniforms['mat_projection'] = self._uloc('mat_projection')\n self._gl_uniforms['mat_modelview'] = self._uloc('mat_modelview')\n self._gl_uniforms['mat_real_projection'] = self._uloc('mat_real_projection')\n self.vao_id = glGenVertexArrays(1)\n self.vbo_id = glGenBuffers(2)", "def __init__(self, bgr_lb=[0, 0, 180], bgr_ub=[50, 50, 255], pixels_threshold=10000):\n super(ColorFilter, self).__init__(bgr_lb, bgr_ub, pixels_threshold)\n self.bgr_lb = np.array(bgr_lb).astype(np.uint8)\n self.bgr_ub = np.array(bgr_ub).astype(np.uint8)\n self.pixels_threshold = pixels_threshold", "def recompile(self):\n\n self.vaos = []\n try:\n self.program, uniforms = self.build_prog(self.gl)\n self.u_time, self.u_width, self.u_height = uniforms\n vao = GLUtil.screen_vao(self.gl, self.program)\n self.vaos.append(vao)\n\n self.compute, uniforms, buffers = self.build_cs(self.gl)\n self.u_cstime, self.u_cswidth, self.u_csheight = uniforms\n self.buf_in, self.buf_out = buffers\n\n self.set_gpu_wh(width, height)\n\n self.gx, self.gy = int(width / 8), int(height / 8)\n self.set_gpu_time()\n\n log(\"[Renderer] shader recompiled.\")\n\n except Exception as e:\n log(e)", "def __init__(self, *args, **kwargs):\n super(Dummy, self).__init__()\n \n self.affine = np.eye(4, dtype = np.float32)\n self._update_glaffine()\n \n self.vertices = np.random.random( (10,3)).astype(np.float32) * 10\n\n self.colors = np.array( [[255,255,0,255],\n [255,255,0,255],\n [0,255,0,255],\n [0,255,0,255]], dtype = np.ubyte )\n \n self.indices = np.array( [[0,1], [1,2], [5,6], [8,9]] , dtype = np.uint32).ravel()\n self.vertices = self.vertices[self.indices,:]\n self.indices = np.array( range(len(self.indices)), dtype = np.uint32)\n self.colors = self.colors.repeat(2, axis = 0)\n self.colors_ptr = self.colors.ctypes.data\n \n self.vertices_ptr = self.vertices.ctypes.data\n self.indices_ptr = self.indices.ctypes.data\n self.indices_nr = self.indices.size\n self.mode = GL_LINES\n self.type = GL_UNSIGNED_INT", "def __init__(self):\n kernel=numpy.array([[-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]])\n VConvolutionFilter.__init__(self,kernel)", "def pc_input_buffers_full_var(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_input_buffers_full_var(self, *args)", "def __init__(self):\n kernel=numpy.array([[-2, -1, 0],\n [-1, 1, 1],\n [0, 1, 2]])\n VConvolutionFilter.__init__(self,kernel)", "def __init__(self):\n self._id = GLuint()\n glGenVertexArrays(1, self._id)", "def _add_buffer(self, p_buffer_element:PyTorchIOElement):\r\n\r\n self._buffer.add_element(p_buffer_element)", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def __init__(self, blend_src, blend_dest, program, parent=None):\n super().__init__(parent=parent)\n self.program = program\n self.blend_src = blend_src\n self.blend_dest = blend_dest", "def update_carried(self, data):\n self.use()\n gpu_data = np.array(data, dtype=np.float32)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n gl.glBufferData(gl.GL_ARRAY_BUFFER, gpu_data.nbytes, gpu_data, gl.GL_DYNAMIC_DRAW)", "def __init__(self):\n kernel=numpy.array([[-1, -1, -1],\n [-1, 9, -1],\n [-1, -1, -1]])\n VConvolutionFilter.__init__(self,kernel)", "def __init__(self, verts=None, frags=None, geoms=None, count=0):\n\n GLObject.__init__(self)\n self._count = count\n self._buffer = None\n\n # Make sure shaders are shaders\n self._verts = self._get_shaders(verts, VertexShader)\n self._frags = self._get_shaders(frags, FragmentShader)\n self._geoms = self._get_shaders(geoms, GeometryShader)\n\n self._uniforms = {}\n self._attributes = {}\n\n # Build hooks, uniforms and attributes\n self._build_hooks()\n self._build_uniforms()\n self._build_attributes()\n\n # Build associated structured vertex buffer if count is given\n if self._count > 0:\n dtype = []\n for attribute in self._attributes.values():\n dtype.append(attribute.dtype)\n self._buffer = np.zeros(self._count, dtype=dtype).view(VertexBuffer)\n self.bind(self._buffer)", "def __init__(self, name):\r\n super(OffScreenTexture, self).__init__(name)\r\n from pi3d.Display import Display\r\n self.ix, self.iy = Display.INSTANCE.width, Display.INSTANCE.height\r\n self.im = Image.new(\"RGBA\",(self.ix, self.iy))\r\n self.image = self.im.convert(\"RGBA\").tostring('raw', \"RGBA\")\r\n self.alpha = True\r\n self.blend = False\r\n\r\n self._tex = ctypes.c_int()\r\n self.framebuffer = (ctypes.c_int * 1)()\r\n opengles.glGenFramebuffers(1, self.framebuffer)\r\n self.depthbuffer = (ctypes.c_int * 1)()\r\n opengles.glGenRenderbuffers(1, self.depthbuffer)", "def render( self, shader, mode ):\n location = shader.getLocation( mode, self.name, uniform=False )\n if location is not None and location != -1:\n vbo = self.buffer.bind( mode )\n glVertexAttribPointer( \n location, self.size, GL_FLOAT, False, self.stride, \n vbo+self.offset\n )\n glEnableVertexAttribArray( location )\n return (vbo,location)\n return None", "def __init__(self):\n super().__init__()\n self._do_paint = True\n self._do_process_data = True\n #opengl data\n self._dentsvertsdata = {} # dictionary that holds vertex data for all primitive and submodel combinations\n self._multFactor = 1 # maybe not the brighest idea -> this caused problems in memory allocation\n self._showBack = False\n # Shader program data\n self.program = 0\n self.normalMatrixLoc = 0\n self.vertexShader = self._vertex_shader_source()\n self.fragmentShader = self._fragment_shader_source()\n # paint device (e.g. glWin), and coresponding transforamtion matrices\n self.paintDevice = 0\n self.projMatrixLoc = 0\n self.mvMatrixLoc = 0\n # light position\n self.lightPosLoc = 0 # opengl light position\n self._light_position = QVector3D(0, 0, 100000) # vector\n #geometry manager selected/visible changed events\n geometry_manager.geometry_state_changing.connect(self.onGeometryStateChanging)\n geometry_manager.visible_geometry_changed.connect(self.onVisibleGeometryChanged)\n self._s_visible_geo_guids: set = set()\n\n # Add menu items\n self.initialize_painter_menus()\n self._color =[0.4, 1.0, 1.0, 1.0] # default color", "def __init__(self, stencil_coefs, loffset, roffset):\n self.stencil_coefs = stencil_coefs\n self.loffset = loffset\n self.roffset = roffset", "def __init__(self, *args, **kwargs):\n _gdi_.AutoBufferedPaintDC_swiginit(self,_gdi_.new_AutoBufferedPaintDC(*args, **kwargs))", "def __init__(self,\n shape=[300,300],\n ra=[ 64, 24, 9, 3, 1],\n ri=[ 96, 32, 12,4.5,1.5],\n dt=[.04,.03,.03,.02,.02], # wt=[1],\n pal=[[1,1,0],[1,0,0],[1,0,1],[0,0,1],[0,1,1],[1,1,1]]):\n\n \"Greyscale buffer that contains the actual Multiscale Turing Pattern.\"\n self.z = rand(*shape)\n \"Colour buffer with RGB values tracking the colour of local scales.\"\n self.c = ones(list(shape)+[3])\n \"Timestep per scale.\"\n self.dt = array(dt)\n# \"Weight per scale.\"\n# self.wt = array(wt)\n \"Activator radii\"\n self.ra = ra\n \"Inhibitor radii\"\n self.ri = ri\n \"Colourmap of scale to RGB.\"\n self.pal = array(pal)\n \"Transform function before filter.\"\n self._xform = lambda z: z\n \"Filter function.\"\n self._filter = boxG\n \"Colour buffer update speed.\"\n self._dc = .04\n # init these as instance variables so they don't have to be\n # allocated on each call to self.step()\n self._tmp = zeros_like(self.z)\n self._min_var = zeros_like(self.z).astype(int)\n self._variance = zeros([len(ra)] + list(shape))", "def pc_output_buffers_full(self, *args):\n return _spacegrant_swig.G3RUH_descramble_sptr_pc_output_buffers_full(self, *args)", "def __init__(self, mesh, V):\n super().__init__(mesh, V)\n self.source = Constant(0)", "def glBufferData( baseOperation, target, size, data=None, usage=None ):\n if usage is None:\n usage = data\n data = size\n size = None\n data = ArrayDatatype.asArray( data )\n if size is None:\n size = ArrayDatatype.arrayByteCount( data )\n return baseOperation( target, size, data, usage )", "def pc_output_buffers_full_var(self, *args):\n return _spacegrant_swig.G3RUH_descramble_sptr_pc_output_buffers_full_var(self, *args)", "def set_min_output_buffer(self, *args):\n return _spacegrant_swig.G3RUH_descramble_sptr_set_min_output_buffer(self, *args)", "def InitGL(self):\r\n glClearColor(0.0,0.0,0.0,0.0); # set clear color to black\r\n glEnable(GL_TEXTURE_2D)\r\n self.SetupView()\r\n return", "def pc_input_buffers_full(self, *args):\n return _spacegrant_swig.G3RUH_descramble_sptr_pc_input_buffers_full(self, *args)", "def __init__(self, description, vdma=None):\n super().__init__(description)\n self._vdma = vdma\n self._color = self.color_convert\n self._pixel = self.pixel_unpack\n self._hdmi = self.frontend", "def Render(self, mode):\n\n shaders.glUseProgram(self.shader)\n try:\n self.vbo.bind()\n try:\n glEnableClientState(GL_VERTEX_ARRAY)\n GLVertexPointer(self.vbo)\n glDrawArrays(GL_TRIANGLES, 0, 9)\n finally:\n self.vbo.unbind()\n glDisableClientState(GL_VERTEX_ARRAY)\n finally:\n shaders.glUseProgram(0)", "def _start(self):\r\n opengles.glBindFramebuffer(GL_FRAMEBUFFER, self.framebuffer[0])\r\n opengles.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,\r\n GL_TEXTURE_2D, self._tex.value, 0)\r\n #thanks to PeterO c.o. RPi forum for pointing out missing depth attchmnt\r\n opengles.glBindRenderbuffer(GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16,\r\n self.ix, self.iy)\r\n opengles.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,\r\n GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT)\r\n\r\n #assert opengles.glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE\r", "def setup_view(self, shader_program):\n n = self.normalize(self.eyepoint - self.lookat)\n u = self.normalize(np.cross(self.normalize(self.up), n))\n v = self.normalize(np.cross(n, u))\n\n view_mat = np.array([u[0], v[0], n[0], 0.0,\n u[1], v[1], n[1], 0.0,\n u[2], v[2], n[2], 0.0,\n -np.dot(u, self.eyepoint),\n -np.dot(v, self.eyepoint),\n -np.dot(n, self.eyepoint), 1.0],\n dtype=np.float32)\n\n view_location = glGetUniformLocation(shader_program, \"view\")\n glUseProgram(shader_program)\n glUniformMatrix4fv(view_location, 1, GL_FALSE, view_mat)", "def pc_output_buffers_full_avg(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_output_buffers_full_avg(self, *args)", "def __init__(self, shader_program):\n self.tessellate(20)\n\n self.k_ambient = np.array([0.3, 0.3, 0.21], dtype=np.float32)\n self.k_diffuse = np.array([0.4, 0.5, 0.35], dtype=np.float32)\n self.k_specular = np.array([0.3, 0.3, 0.3], dtype=np.float32)\n self.shininess = 7.0\n\n self.set_buffers(shader_program)", "def __bind_pca_texture(self):\n size = View.__principal_components.size // 3\n data = View.__principal_components.transpose() * View.__deviations\n\n columns = 2**13\n rows = ceil(size / columns)\n\n padding = [0] * (rows * columns - size) * 3\n data = concatenate((data.flatten(), padding))\n\n self.__sh.create_float_texture(data, (columns, rows), 2, 3)", "def min_output_buffer(self, i):\n return _add_vector_swig.add_vector_2_cpp_sptr_min_output_buffer(self, i)", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def __copy2array(self):\n # extract gradient and bias\n w = self.init_layer.weight\n b = self.init_layer.bias\n self.params = [[w,b]] # save it to buffer\n if self.bpoints is not None:\n for d, x1 in zip(self.deltas, self.bpoints):\n y1 = w *x1 + b # find the endpoint of line segment (x1, y1)\n w = w + d # add on the delta to gradient\n b = y1 - w * x1 # find new bias of line segment\n self.params.append([w,b]) # add to buffer\n\n # create buffer\n self.wb = torch.zeros(len(self.params), len(self.params[0]))\n for i in range(self.wb.shape[0]):\n for j in range(self.wb.shape[1]):\n self.wb[i,j] = self.params[i][j]", "def pc_output_buffers_full_var(self, *args):\n return _spacegrant_swig.invert_bit_sptr_pc_output_buffers_full_var(self, *args)", "def __init__(self, r, g, b, struct=None):\n self._intern = struct or dlib.Color(r,g,b)", "def __init__(self,\r\n W_regularizer=None, b_regularizer=None,\r\n W_constraint=None, b_constraint=None,\r\n bias=True, **kwargs):\r\n self.supports_masking = True\r\n self.init = initializations.get('glorot_uniform')\r\n\r\n self.W_regularizer = regularizers.get(W_regularizer)\r\n self.b_regularizer = regularizers.get(b_regularizer)\r\n\r\n self.W_constraint = constraints.get(W_constraint)\r\n self.b_constraint = constraints.get(b_constraint)\r\n\r\n self.bias = bias\r\n super(AttLayer, self).__init__(**kwargs)", "def pc_output_buffers_full(self, *args):\n return _spacegrant_swig.invert_bit_sptr_pc_output_buffers_full(self, *args)", "def use(self):\r\n opengles.glUseProgram(self.program)", "def initialize(self, gl):\n\n self.waterProgram = self.linkProgram(gl, 'water')\n self.waterVBO = QOpenGLBuffer(QOpenGLBuffer.VertexBuffer)\n assert self.waterVBO.create(), \"Can't create water vertex buffer =\\\\\"\n self.waterVBO.setUsagePattern(QOpenGLBuffer.DynamicDraw)\n\n self.waterRefractionProgram = self.linkProgram(gl, 'water-refraction')\n self.refractionFramebuffer = self.createFramebuffer(gl, 512, depth=True)\n self.refractionNormalMap = self.createTexture(gl, wrapMode=QOpenGLTexture.Repeat, filename='normalmap.bmp')\n\n self.depthProgram = self.linkProgram(gl, 'depth')\n self.depthFramebuffer = self.createFramebuffer(gl, 512)\n self.depthTexture = self.createTexture(gl, self.depthFramebuffer.width(), format=QOpenGLTexture.D32F, allocate=False,\n GL_TEXTURE_COMPARE_MODE=gl.GL_COMPARE_REF_TO_TEXTURE,\n GL_TEXTURE_COMPARE_FUNC=gl.GL_LESS)\n self.depthTexture.bind()\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT32, \n self.depthFramebuffer.width(), self.depthFramebuffer.height(), \n 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, None)\n self.depthTexture.release()\n assert self.depthFramebuffer.bind()\n gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, self.depthTexture.textureId(), 0)\n assert self.depthFramebuffer.release()\n\n self.landscapeProgram = self.linkProgram(gl, 'landscape')\n self.landscapeVBO = QOpenGLBuffer(QOpenGLBuffer.VertexBuffer)\n assert self.landscapeVBO.create(), \"Can't create water vertex buffer =\\\\\"\n self.landscapeVBO.setUsagePattern(QOpenGLBuffer.DynamicDraw)\n\n self.heightsTexture = self.createTexture(gl, self.logicalResources.m, self.logicalResources.n, \n format=QOpenGLTexture.RG32F, filter=QOpenGLTexture.Nearest)\n \n self.updateMeshesAndHeightsTexture(gl)", "def vbo( self, mode ):\n uploaded = mode.cache.getData( self, 'buffer' )\n if uploaded is None:\n uploaded = vbo.VBO( \n self.buffer, \n usage=self.gl_usage(), \n target=self.gl_target(),\n ) # TODO: stream type\n holder = mode.cache.holder( self, uploaded, 'buffer' )\n holder.depend( self, 'buffer' )\n return uploaded", "def __enter__(self):\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)", "def pc_input_buffers_full_var(self, *args):\n return _spacegrant_swig.G3RUH_descramble_sptr_pc_input_buffers_full_var(self, *args)", "def __init__( self, buffer, start_offset, bytes_reverse=False, bits_reverse=False, output_reverse=False, bytes_to_cache=1 ):\n assert is_bytes( buffer )\n assert start_offset in range( len( buffer ) )\n self.buffer = buffer\n self.bits_reverse = bits_reverse\n self.bytes_reverse = bytes_reverse\n self.output_reverse = output_reverse\n self.pos = start_offset\n self.bytes_to_cache = bytes_to_cache\n self._fill_buffer()", "def allocatememory(self):\n self._numvertstotal = self._numents * self._nvet\n self._cords = VertDataSingleChannel(GLDataType.FLOAT, 3, self._numvertstotal)\n self._colors = VertDataSingleChannel(GLDataType.UBYTE, 4, self._numvertstotal)", "def _create(self):\n\n log.debug(\"GPU: Creating program\")\n\n # Check if program has been created\n if self._handle <= 0:\n self._handle = gl.glCreateProgram()\n if not self._handle:\n raise ValueError(\"Cannot create program object\")\n\n self._build_shaders(self._handle)\n\n log.debug(\"GPU: Linking program\")\n\n # Link the program\n gl.glLinkProgram(self._handle)\n if not gl.glGetProgramiv(self._handle, gl.GL_LINK_STATUS):\n print(gl.glGetProgramInfoLog(self._handle))\n raise ValueError('Linking error')\n\n # Activate uniforms\n active_uniforms = [name for (name,gtype) in self.active_uniforms]\n for uniform in self._uniforms.values():\n if uniform.name in active_uniforms:\n uniform.active = True\n else:\n uniform.active = False\n\n # Activate attributes\n active_attributes = [name for (name,gtype) in self.active_attributes]\n for attribute in self._attributes.values():\n if attribute.name in active_attributes:\n attribute.active = True\n else:\n attribute.active = False", "def __init__(self):\r\n super(Defocus, self).__init__(\"defocus\")\r\n # load blur shader\r\n self.shader = Shader(\"defocus\")", "def __init__(self, kernel_size, out_ch, stride, pad, shape):\n self.kernel_size = kernel_size\n self.out_ch = out_ch\n self.stride = stride\n self.pad = (0,) + pad + (0,)\n if shape is None:\n self.shape = None\n else:\n self.shape = shape", "def __init__(self,x_pos, y_pos, velocity, kind, fillcolor = 'red'):\n self._velocity = velocity\n self._kind = kind\n super().__init__(x = x_pos, y=y_pos, width = BOLT_WIDTH, \\\n height = BOLT_HEIGHT, fillcolor=fillcolor)", "def add_vertex_main(self, *args, **kwargs):\n kwargs['shader'] = 'vertex'\n self.add_main(*args, **kwargs)", "def __init__(self, **kwargs):\n base.Layer.__init__(self, **kwargs)\n self._num_output = self.spec.get('num_output', 0)\n if self._num_output <= 0:\n raise base.InvalidLayerError(\n 'Incorrect or unspecified num_output for %s' % self.name)\n self._reg = self.spec.get('reg', None)\n self._filler = self.spec.get('filler', None)\n self._weight = base.Blob(filler=self._filler)\n self._has_bias = self.spec.get('bias', True)\n if self._has_bias:\n self._bias_filler = self.spec.get('bias_filler', None)\n self._bias = base.Blob(filler=self._bias_filler)\n self._param = [self._weight, self._bias]\n else:\n self._param = [self._weight]", "def __init__(self):\n kernel=numpy.array([[0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4]])\n VConvolutionFilter.__init__(self,kernel)", "def __init__(self, *args, **kwargs):\n _gdi_.RendererVersion_swiginit(self,_gdi_.new_RendererVersion(*args, **kwargs))", "def pre_render(self) -> None:\n self.buffer = Surface((self.render_width, self.render_height), SRCALPHA)\n self.buffer.fill(list(self.halo_texture.surfaces.values())[0].get_at((0, 0)))\n\n self.buffer.fill((0, 0, 0, 0), Rect(\n (self.render_width - self.halo_texture.get_width()) // 2,\n (self.render_height - self.halo_texture.get_height()) // 2,\n self.halo_texture.get_width(),\n self.halo_texture.get_height()\n ))", "def __init__(self, *args, **kwargs):\n _gdi_.Colour_swiginit(self,_gdi_.new_Colour(*args, **kwargs))", "def pc_output_buffers_full_var(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.doaesprit_sptr_pc_output_buffers_full_var(self, *args)", "def pc_output_buffers_full(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.doaesprit_sptr_pc_output_buffers_full(self, *args)", "def __init__(self, mesh, V):\n super().__init__(mesh, V)\n self.bcs = None", "def __init__(self, **kwargs):\n super(VeryCleverBeamsplitter, self).__init__(**kwargs)\n self.shader_source = IL_SHADER_SOURCE\n self.centre = [0.5, 0.5]\n self.blazing_function = np.linspace(0,1,32)\n self.zernike_coefficients = np.zeros(12)", "def render_2d_vector(v1, gridsize=50):\n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n fb.render_vector_2d( v1, scale=gridsize)\n fb.save('vec.png')" ]
[ "0.60485387", "0.5838095", "0.58365405", "0.58271587", "0.58271587", "0.5774059", "0.5710976", "0.5705954", "0.5652349", "0.5554869", "0.5520962", "0.55178773", "0.54938525", "0.53955853", "0.5374403", "0.53721666", "0.5345584", "0.53194004", "0.5283975", "0.5239198", "0.5213602", "0.52115166", "0.5201943", "0.518131", "0.5143271", "0.5130739", "0.5089273", "0.5062824", "0.50595576", "0.49882728", "0.49789935", "0.49580547", "0.49580547", "0.4948128", "0.49090755", "0.48943138", "0.48690045", "0.4854212", "0.48443177", "0.48306975", "0.48212612", "0.48200405", "0.4812909", "0.47922736", "0.47902563", "0.47860357", "0.47851583", "0.47831804", "0.47722232", "0.47662657", "0.47657338", "0.4763539", "0.47386956", "0.47281596", "0.4724926", "0.47218663", "0.47216898", "0.47143838", "0.4713766", "0.47088796", "0.46741015", "0.46580085", "0.4656895", "0.46437138", "0.46416888", "0.46388903", "0.46337682", "0.46265393", "0.46257454", "0.46240875", "0.4624024", "0.4624024", "0.4624024", "0.46187362", "0.46159473", "0.4611276", "0.4607722", "0.4606708", "0.46053982", "0.46018934", "0.45982823", "0.45970887", "0.4594517", "0.4580913", "0.4580556", "0.45777586", "0.45775017", "0.4568497", "0.45449248", "0.45242572", "0.45228535", "0.45221123", "0.45189893", "0.4516662", "0.4505984", "0.44995895", "0.4497073", "0.44867352", "0.44851556", "0.4482549" ]
0.69480604
0
updates the carry flag data (VBO3)
def update_carried(self, data): self.use() gpu_data = np.array(data, dtype=np.float32) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3]) gl.glBufferData(gl.GL_ARRAY_BUFFER, gpu_data.nbytes, gpu_data, gl.GL_DYNAMIC_DRAW)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bcs(self, arg):\n\n self.pc += arg if self.p & const.FLAG_CARRY else 0\n self.pc = c_uint16(self.pc).value", "def bvc(self, arg):\n\n self.pc += arg if not self.p & const.FLAG_OVERFLOW else 0\n self.pc = c_uint16(self.pc).value", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def _init_buffers(self, v, n, _):\n super()._init_buffers(v, n, _)\n\n self.vbos.append(gl.glGenBuffers(1))\n\n # init VBO 2 - dynamic color data\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n loc = self.get_attribute_location(\"carried\")\n gl.glEnableVertexAttribArray(loc)\n gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0))\n gl.glVertexAttribDivisor(loc, 1)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)", "def bcc(self, arg):\n\n self.pc += arg if not self.p & const.FLAG_CARRY else 0\n self.pc = c_uint16(self.pc).value", "def bvs(self, arg):\n\n self.pc += arg if self.p & const.FLAG_OVERFLOW else 0\n self.pc = c_uint16(self.pc).value", "def BVC(self, value):\n if not self.reg.V:\n self.reg.PC += value", "def update(self):\n self.bpos_x += 3", "def TAY(self, *_):\n self.reg.Y = self.reg.A\n self.reg.N = self.reg.Y << 7\n self.reg.Z = self.reg.Y == 0", "def _update_bit_features(self):\n index = 1 if self.is_info_v2 else 0\n for feature, keys in BIT_FEATURES.items():\n status = self.lookup_bit(keys[index])\n self._update_feature(feature, status, False)", "def update_control(self):\n self._control_ctr += 0x01", "def update(self, v, r):\n pass", "def BVS(self, value):\n if self.reg.V:\n self.reg.PC += value", "def _update(self, buf):\n\n self.setup()\n\n # TODO there has to be a better way to force the white colour to be used instead of clear...\n\n for i in range(len(buf)):\n if buf[i] & 0xf == 7:\n buf[i] = (buf[i] & 0xf0) + 1\n # print buf[i]\n if buf[i] & 0xf0 == 0x70:\n buf[i] = (buf[i] & 0xf) + 0x10\n # print buf[i]\n\n self._send_command(AC073TC1_DTM, buf)\n\n self._send_command(AC073TC1_PON)\n self._busy_wait(0.4)\n\n self._send_command(AC073TC1_DRF, [0x00])\n self._busy_wait(45.0) # 41 seconds in testing\n\n self._send_command(AC073TC1_POF, [0x00])\n self._busy_wait(0.4)", "def update_frame(self, frame):\n self.set_bank(frame)\n offset = 0\n for chunk in self._chunk(self._buf[frame], 32):\n self.i2c.write_i2c_block_data(self.address, _COLOR_OFFSET + offset, chunk)\n offset += 32", "def recompile(self):\n\n self.vaos = []\n try:\n self.program, uniforms = self.build_prog(self.gl)\n self.u_time, self.u_width, self.u_height = uniforms\n vao = GLUtil.screen_vao(self.gl, self.program)\n self.vaos.append(vao)\n\n self.compute, uniforms, buffers = self.build_cs(self.gl)\n self.u_cstime, self.u_cswidth, self.u_csheight = uniforms\n self.buf_in, self.buf_out = buffers\n\n self.set_gpu_wh(width, height)\n\n self.gx, self.gy = int(width / 8), int(height / 8)\n self.set_gpu_time()\n\n log(\"[Renderer] shader recompiled.\")\n\n except Exception as e:\n log(e)", "def update(self,update_flags):\n pass", "def _select(self):\r\n opengles.glBindBuffer(GL_ARRAY_BUFFER, self.vbuf)\r\n opengles.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ebuf)", "def data(self, data, enable):\n app_x, app_y = self.pointer\n self.buffer[\n self.area['start_x'] + app_x][self.area['start_y'] + app_y\n ] = data\n self._inc_pointer()", "def _commit(self):\n ckresult(\n _dll.FMOD_System_Set3DListenerAttributes(\n self._sysptr,\n self._id,\n byref(self._pos),\n byref(self._vel),\n byref(self._fwd),\n byref(self._up),\n )\n )", "def mv_step(self):\n # def mv_all(self):\n self.device_reg_data &= ~(0x1 << 3)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def writeBOV(g):\n global counter\n bovNm = 'file_%03d.bov' % counter\n dataNm = 'file_%03d.data' % counter\n counter += 1\n with open(bovNm, 'w') as f:\n f.write('TIME: %g\\n' % float(counter))\n f.write('DATA_FILE: %s\\n' % dataNm)\n if len(g.shape) == 2:\n f.write('DATA_SIZE: %d %d 1\\n' % g.shape)\n elif len(g.shape) == 3:\n f.write('DATA_SIZE: %d %d %d\\n' % g.shape)\n else:\n raise RuntimeError(f'unexpected shape {g.shape}')\n if g.dtype == np.float64:\n f.write('DATA_FORMAT: DOUBLE\\n')\n elif g.dtype == np.int32:\n f.write('DATA_FORMAT: INT\\n')\n else:\n raise RuntimeError(f'unexpected data type {g.dtype}')\n f.write('VARIABLE: U\\n')\n f.write('DATA_ENDIAN: LITTLE\\n')\n f.write('CENTERING: ZONAL\\n')\n f.write('BRICK_ORIGIN: 0. 0. 0.\\n')\n f.write('BRICK_SIZE: 1.0 1.0 1.0\\n')\n with open(dataNm, 'w') as f:\n g.T.tofile(f) # BOV format expects Fortran order", "def appGL(deltaT):#-------------------------------- OpenGL UPDATE\n pass # -> Delete this line if you do something here !", "def vbo( self, mode ):\n uploaded = mode.cache.getData( self, 'buffer' )\n if uploaded is None:\n uploaded = vbo.VBO( \n self.buffer, \n usage=self.gl_usage(), \n target=self.gl_target(),\n ) # TODO: stream type\n holder = mode.cache.holder( self, uploaded, 'buffer' )\n holder.depend( self, 'buffer' )\n return uploaded", "def update(): # (12)\n with canvas(device) as draw:\n for led_pos in range(0, len(color_buffer)):\n color = color_buffer[led_pos]\n\n ## If your LED strip's colors are are not in the expected\n ## order, uncomment the following lines and adjust the indexes\n ## in the line color = (rgb[0], rgb[1], rgb[2])\n # rgb = getrgb(color)\n # color = (rgb[0], rgb[1], rgb[2])\n # if len(rgb) == 4:\n # color += (rgb[3],) # Add in Alpha\n\n draw.point((led_pos, 0), fill=color)", "def int_33H_3(self):\r\n horizontal_position, vertical_position = MainWindow.get_cursor_poisition()\r\n button_status = 1\r\n\r\n self.registers['CX'].set_bytes(horizontal_position, is_int=True)\r\n self.registers['DX'].set_bytes(vertical_position, is_int=True)\r\n self.registers['BX'].set_bytes(button_status, is_int=True)", "def write_reg3(self, value: int) -> None:\n self.length_ctr_load = value >> 3\n\n # TODO Restart envelope", "def test_add_to_vx(self, cpu):\n for x in range(0x0, 0xF):\n for v in range(0x0, 0xFF):\n for kk in range(0x0, 0xFF):\n cpu.V_register[x] = v\n cpu.opcode = 0x7000 | (x << 8) | kk\n cpu.add_to_vx()\n assert(cpu.V_register[x] == (v + kk) & 0xFF)", "def TXA(self, *_):\n self.reg.A = self.reg.X\n self.reg.N = self.reg.A << 7\n self.reg.Z = self.reg.A == 0", "def TYA(self, *_):\n self.reg.A = self.reg.Y\n self.reg.N = self.reg.A << 7\n self.reg.Z = self.reg.A == 0", "def on_flags_update(self, event):\n self.entity.on_flags_update(event)", "def cb_plus(event):\n delta_alpha = pm_rate\n # Increase Alpha \n sAlpha0.set_val( np.clip(sAlpha0.val + delta_alpha, alpha_min[0], alpha_max[0]) )\n sAlpha1.set_val( np.clip(sAlpha1.val + delta_alpha, alpha_min[1], alpha_max[1]) )\n sAlpha2.set_val( np.clip(sAlpha2.val + delta_alpha, alpha_min[2], alpha_max[2]) )\n print(\"+++\")", "def increment(self):\n self.data[self.pointer] += 1\n self.data[self.pointer] %= 256", "def _refresh_buffers(self) -> None:", "def inc(self, params):\n reg = params[0]\n if self.reg_dct[reg] == (2 ** 32) - 1:\n self.reg_dct[reg] = 0\n else:\n self.reg_dct[reg] += 1", "def test_set_vx_to_vx_plus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8004 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_plus_vy()\n value = v1 + v2\n if value > 0xFF:\n assert(cpu.V_register[0xF] == 1)\n assert(cpu.V_register[x] == value & 0xFF)\n else:\n assert(cpu.V_register[0xF] == 0)\n assert(cpu.V_register[x] == value)", "def mv_all(self):\n # def mv_step(self):\n self.device_reg_data &= ~(0x1 << 2)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def update(self):\n\t\tfor x in range(self.leds):\n\t\t\tself.spi.write(self.buffer[x])\n\t\t\t#self.spi.flush()\n\t\t\t\n\t\tself.spi.write(bytearray(b'\\x00'))\n\t\tself.spi.flush()", "def update(self, buffer: ReplayBuffer) -> np.ndarray:\n raise NotImplementedError", "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def update(self):\n self._g, self._B = self._constraint_assembler.preallocate_g_and_B(self._no_of_dofs_unconstrained,\n self._dofidxs(),\n self._no_of_constraints_by_object())", "def enable_cl1_direct(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x44) ## Enable clock input only and refmode\n self.write_versa5(0x21,0x0c) ## Use previous channel, direct input, may have skew", "def updateInterface(self):\n p = self.cxn[self.selectedADR].packet()\n p.magnetv().pscurrent().psvoltage()\n p.time()\n p.temperatures()\n p.get_state_var('CompressorStatus')\n p.get_instrument_state()\n state = yield p.send()\n # change instrument statuses\n for name,status in state['get_instrument_state']:\n if status[0] == False: color = 'red3'\n elif status[1] == False: color = 'orange3'\n elif status[1] == True: color = 'green3'\n else: color = 'gray70'\n self.instrumentStatuses[name].config(bg=color)\n # change compressor button\n if state['get_state_var'] == True:\n self.compressorButton.configure(text='Stop Compressor',\n command=self.stopCompressor,\n state=Tkinter.NORMAL)\n elif state['get_state_var'] == False:\n self.compressorButton.configure(text='Start Compressor',\n command=self.startCompressor,\n state=Tkinter.NORMAL)\n else: self.compressorButton.configure(state=Tkinter.DISABLED)\n # update current, voltage fields\n temps = {}\n stages = ('T_60K','T_3K','T_GGG','T_FAA')\n for i in range(len(stages)):\n temps[stages[i]] = state['temperatures'][i]\n #if temps[stages[i]] == 'nan': temps[stages[i]] = numpy.nan\n if numpy.isnan(state['magnetv']['V']):\n emf = 'ERR'\n else:\n emf = \"{0:.3f}\".format(state['magnetv']['V'])\n if numpy.isnan(state['pscurrent']['A']):\n psI = 'PS OFF'\n else:\n psI = \"{0:.3f}\".format(state['pscurrent']['A'])\n if numpy.isnan(state['psvoltage']['V']):\n psV = 'PS OFF'\n else:\n psV = \"{0:.3f}\".format(state['psvoltage']['V'])\n self.currentBackEMF.set( emf )\n self.currentI.set( psI )\n self.currentV.set( psV )\n # update plot:\n # change data to plot\n self.stage60K.set_xdata(numpy.append(self.stage60K.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stage60K.set_ydata(numpy.append(self.stage60K.get_ydata(),temps['T_60K']['K']))\n self.stage03K.set_xdata(numpy.append(self.stage03K.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stage03K.set_ydata(numpy.append(self.stage03K.get_ydata(),temps['T_3K']['K']))\n self.stageGGG.set_xdata(numpy.append(self.stageGGG.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stageGGG.set_ydata(numpy.append(self.stageGGG.get_ydata(),temps['T_GGG']['K']))\n self.stageFAA.set_xdata(numpy.append(self.stageFAA.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stageFAA.set_ydata(numpy.append(self.stageFAA.get_ydata(),temps['T_FAA']['K']))\n #update plot\n self.updatePlot()\n # update legend\n labelOrder = ['T_60K','T_3K','T_GGG','T_FAA']\n lines = [self.stage60K,self.stage03K,self.stageGGG,self.stageFAA]\n labels = [l.strip('T_')+' ['+\"{0:.3f}\".format(temps[l]['K'])+'K]' for l in labelOrder]\n labels = [s.replace('1.#QOK','OoR') for s in labels]\n # legend on top (if not using this, delete \\n in title)\n self.ax.legend(lines,labels,bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=4, mode=\"expand\", borderaxespad=0.)", "def set_global_problem_vf_3_gr1_bif(self):\n #0\n std_map = Epetra.Map(len(self.all_fine_vols_ic),0,self.comm)\n self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)\n self.b = Epetra.Vector(std_map)\n for volume in self.all_fine_vols_ic - set(self.neigh_wells_d):\n #1\n soma = 0.0\n soma2 = 0.0\n soma3 = 0.0\n volume_centroid = self.mesh_topo_util.get_average_position([volume])\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]\n lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]\n lbt_vol = lamb_w_vol + lamb_o_vol\n z_vol = self.tz - volume_centroid[2]\n soma = 0.0\n temp_glob_adj = []\n temp_k = []\n flux_gr = []\n for adj in adj_volumes:\n #2\n global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n adj_centroid = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - adj_centroid[2]\n altura = adj_centroid[2]\n direction = adj_centroid - volume_centroid\n uni = self.unitary(direction)\n kvol = np.dot(np.dot(kvol,uni),uni)\n #kvol = kvol*(lamb_w_vol + lamb_o_vol)\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n kadj = np.dot(np.dot(kadj,uni),uni)\n lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]\n lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]\n lbt_adj = lamb_w_adj + lamb_o_adj\n\n #kadj = kadj*(lamb_w_adj + lamb_o_adj)\n keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)\n keq = keq*(np.dot(self.A, uni)/float(abs(np.dot(direction, uni))))\n grad_z = (z_adj - z_vol)\n q_grad_z = grad_z*self.gama*keq\n flux_gr.append(q_grad_z)\n\n temp_glob_adj.append(self.map_vols_ic[adj])\n temp_k.append(-keq)\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n #1\n soma2 = -sum(flux_gr)\n temp_k.append(-sum(temp_k))\n temp_glob_adj.append(self.map_vols_ic[volume])\n self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)\n if volume in self.wells_n:\n #2\n index = self.wells_n.index(volume)\n # tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)\n if volume in self.wells_inj:\n #3\n self.b[self.map_vols_ic[volume]] += self.set_q[index] + soma2\n #2\n else:\n #3\n self.b[self.map_vols_ic[volume]] += -self.set_q[index] + soma2\n #1\n else:\n #2\n self.b[self.map_vols_ic[volume]] += soma2\n #0\n for volume in self.neigh_wells_d:\n #1\n soma2 = 0.0\n soma3 = 0.0\n volume_centroid = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - volume_centroid[2]\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]\n lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]\n lbt_vol = lamb_w_vol + lamb_o_vol\n soma = 0.0\n temp_glob_adj = []\n temp_k = []\n flux_gr = []\n for adj in adj_volumes:\n #2\n global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n adj_centroid = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - adj_centroid[2]\n altura = adj_centroid[2]\n direction = adj_centroid - volume_centroid\n uni = self.unitary(direction)\n z = uni[2]\n kvol = np.dot(np.dot(kvol,uni),uni)\n #kvol = kvol*(lamb_w_vol + lamb_o_vol)\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n kadj = np.dot(np.dot(kadj,uni),uni)\n lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]\n lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]\n lbt_adj = lamb_o_adj + lamb_o_adj\n #kadj = kadj*(lamb_w_adj + lamb_o_adj)\n keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)\n keq = keq*(np.dot(self.A, uni)/(abs(np.dot(direction, uni))))\n grad_z = (z_adj - z_vol)\n q_grad_z = grad_z*self.gama*keq\n flux_gr.append(q_grad_z)\n #2\n if adj in self.wells_d:\n #3\n soma = soma + keq\n index = self.wells_d.index(adj)\n self.b[self.map_vols_ic[volume]] += self.set_p[index]*(keq)\n #2\n else:\n #3\n temp_glob_adj.append(self.map_vols_ic[adj])\n temp_k.append(-keq)\n soma = soma + keq\n #2\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n #1\n soma2 = -sum(flux_gr)\n temp_k.append(soma)\n temp_glob_adj.append(self.map_vols_ic[volume])\n self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)\n if volume in self.wells_n:\n #2\n index = self.wells_n.index(volume)\n # tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)\n if volume in self.wells_inj:\n #3\n self.b[self.map_vols_ic[volume]] += self.set_q[index] + soma2\n #2\n else:\n #3\n self.b[self.map_vols_ic[volume]] += -self.set_q[index] + soma2\n #1\n else:\n #2\n self.b[self.map_vols_ic[volume]] += soma2\n #0\n self.trans_fine.FillComplete()", "def pc_input_buffers_full(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_input_buffers_full(self, *args)", "def flush(self) -> None:\n super().put(self.buffer)\n self.buffer = np.ndarray((0, 1), dtype=np.int16)", "def update(self, idx, add):\n idx += 1\n while idx < len(self.array):\n self.array[idx] += add\n idx += idx & -idx #Adding the last bit", "def update_bom(self):\n bom2 = copy.copy(self.bom)\n ias1 = self.ias[self.chgs == 1]\n vs2 = copy.copy(self.vs)\n for i in ias1:\n iasc = self.ias[ np.logical_and(self.chgs==-1, self.bom[i]>0) ]\n nac = len(iasc)\n if nac > 0:\n #assert nac == 1\n j = iasc[0]\n bij = self.bom[i,j] - 1\n bom2[i,j] = bij\n bom2[j,i] = bij\n vs2[i] = vs2[i]+1; vs2[j] = vs2[j]+1\n self.bom = bom2\n self.vs = vs2", "def get_data(self):\n self.dev.write(1, 'A0')\n digit1, digit2 = self.dev.read(0x81, 64)[:2]\n # Save the data as voltage between 0.0 and 5.0\n self.data0.append((digit1 + 256*digit2)*5.0/1024)", "def prepare_attrib_mapping(self, primitive):\n buffer_info = []\n for name, accessor in primitive.attributes.items():\n info = VBOInfo(*accessor.info())\n info.attributes.append((name, info.components))\n\n if buffer_info and buffer_info[-1].buffer_view == info.buffer_view:\n if buffer_info[-1].interleaves(info):\n buffer_info[-1].merge(info)\n continue\n\n buffer_info.append(info)\n\n return buffer_info", "def pc_output_buffers_full(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_output_buffers_full(self, *args)", "def bass_increase():\n request_command(tv_command=TVCommand.bass_increase)", "def updateCavity(self, obj):\n obj.Shape = self.getCavityShape(obj.Structure, obj.Position)\n obj.Volume = obj.Shape.Volume", "def cb_update(val):\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n\n # update Dirichlet's parameters alpha\n dirichlet.set_param(alpha_update)\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n # MAP\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n draw_pdf_contours(axPosteriorDirichlet, posteriorDirichlet) # Draw Posterior Dirichlet\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar graph\n\n print('Update')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def re_init_buffer(self):\n #~ print(self.verts)\n #~ print(self.texcoords)\n #~ print(self.inds)\n self.shape.buf[0].re_init(pts=np.array(self.verts, 'f'),texcoords=np.array(self.texcoords, 'f'))", "def led3(self, val):\n data = val & self.LED3_MASK\n self._ftdi.spi_write(self.LED3_ADDR, [data], burst='fixed')", "def update(self, v_input):\n\n self.v = v_input", "def create_reference_array(self):\r\n self.active = True\r\n self.pxarray = pygame.surfarray.pixels3d(self.surface)\r\n pygame.display.flip()\r\n return", "def vbat_interrupt(self, activate=True):\n\n # Get the values of the register control_3\n ctrl3_val = self.__read_register(_REGISTER_CONTROL_3)\n\n if activate is True:\n # Sets the 1 first bits of control_3 to 1\n # 0bXXXXXXXX | 0b11100000\n ctrl3_val = ctrl3_val | 0x01 # 0x01 = 0b00000001\n elif activate is False:\n # Sets the 1 first of control_3 to 0\n # 0bXXXXXXXX & 0b00011111\n ctrl3_val = ctrl3_val & 0xFE # 0xFE = 0b11111110\n else:\n raise ValueError(\"vbat_interrupt only takes boolean argument\")\n\n # Write the new control_3 register in memory\n self.__write_register(_REGISTER_CONTROL_3, ctrl3_val)", "def _update_color(self):\n self._vertex_list.colors[:] = self._rgba * self._num_verts", "def updateArrays(self):\n for channelNumber in range(0, 8):\n self.channels[channelNumber][self.currentPosition]=self._voltage_get(channelNumber)#update next element in each array\n self.currentPosition+=1\n if self.currentPosition>=self.numberOfPoints:#reset position to beginning when we hit max number of points (like rolling oscilloscope)\n self.currentPosition=0\n self.cursorXS = self.getCurrentPositionArray()\n #could also set the next points to NaN's to make a gap!", "def update(self):\n\n self.pta_time[0] = 1 + Globals.clock.get_frame_time() * self.options.time_scale\n\n Globals.base.graphicsEngine.dispatch_compute(\n (self.options.size // 16, self.options.size // 16, 1),\n self.attr_update,\n Globals.base.win.get_gsg())\n\n self.fftX.execute()\n self.fftY.execute()\n self.fftZ.execute()\n\n # Execute the shader which combines the 3 displacement maps into\n # 1 displacement texture and 1 normal texture. We could use dFdx in\n # the fragment shader, however that gives no accurate results as\n # dFdx returns the same value for a 2x2 pixel block\n Globals.base.graphicsEngine.dispatch_compute(\n (self.options.size // 16, self.options.size // 16, 1),\n self.attr_combine,\n Globals.base.win.get_gsg())", "def updateParameters(self, parameters):\r\n #return\r\n parameters[2].enabled = 0\r\n parameters[3].enabled = 0\r\n parameters[4].enabled = 0", "def updateParameters(self, parameters):\r\n #return\r\n parameters[2].enabled = 0\r\n parameters[3].enabled = 0\r\n parameters[4].enabled = 0", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def L3G_acquisition(add):\n\n # control register\n CTRL_REG1 = 0x20\n CTRL_REG4 = 0x23\n LOW_ODR = 0x39\n FIFO_CTRL = 0x2e\n # output register\n OUT_X_L = 0x28\n OUT_X_H = 0x29\n OUT_Y_L = 0x2a\n OUT_Y_H = 0x2b\n OUT_Z_L = 0x2c\n OUT_Z_H = 0x2d\n\n # low odr mode, 50Hz, 2000 dps full scale\n bus.write_byte_data(add, CTRL_REG1, 0b10001111)\n bus.write_byte_data(add, CTRL_REG4, 0b00110000)\n bus.write_byte_data(add, LOW_ODR, 0b00000001)\n bus.write_byte_data(add, FIFO_CTRL, 0b01000000)\n\n # accelerator accumulate\n while True:\n uint16_gx = (bus.read_byte_data(add, OUT_X_H) << 8) + \\\n bus.read_byte_data(add, OUT_X_L)\n uint16_gy = (bus.read_byte_data(add, OUT_Y_H) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L)\n uint16_gz = (bus.read_byte_data(add, OUT_Z_H) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L)\n\n gx = twos_comp(uint16_gx, 16)\n gy = twos_comp(uint16_gy, 16)\n gz = twos_comp(uint16_gz, 16)\n\n yield [gx, gy, gz]", "def appendlistdata_f3xyzf3rgb(self, x, y, z, r, g, b):\n self._ivert += 1\n self._cords.add_Data3(x, y, z)\n self._colors.add_Data4(r * 255, g * 255, b * 255, 255)\n return self._ivert", "def __set_AP(self):\n\t\tif self.version == 1:\n\t\t\treturn 0\n\t\tloc = self.__get_AP_position()\n\t\tfor r in range(-2,3):\n\t\t\tfor c in range(-2,3):\n\t\t\t\tif (r == -2 or r == 2 or c == -2 or c == 2 or (r == 0 and c == 0)):\n\t\t\t\t\tself.matrix[loc+r][loc+c] = 1\n\t\t\t\telse:\n\t\t\t\t\tself.matrix[loc+r][loc+c] = 0", "def gVI(g,rBC,lBC,time,npts):\n #Important coeffcients\n global gamma\n gamma = g\n global alpha\n alpha = (gamma+1)/(gamma-1)\n global beta\n beta = (2*gamma)/(gamma-1)\n global epsilon\n epsilon = (2*gamma)/(gamma+1)\n #Boundary conditions\n global lbc\n lbc = lBC\n global rbc\n rbc = rBC\n #Time\n global t\n t = time\n #points\n global numPts\n numPts = npts\n #Speed of sound for states 1 and 5\n global cL\n cL = np.sqrt(gamma*lbc[0]/lbc[1])\n global cR\n cR = np.sqrt(gamma*rbc[0]/rbc[1])", "def reset_cbc(self):\n if not self.block_count:\n raise ValueError(\"cannot reset cbc until block_count is set\")\n cbc_len = np.prod(self.block_count)\n self.cbc = np.ones(cbc_len, dtype=np.bool)", "def update(self):\n self.smd3.update()\n self.logic.update(self.smd3)\n self.header.update(self.smd3)", "def BCS(self, value):\n if self.reg.C:\n self.reg.PC += value", "def inc_rstate(self):\r\n #self.rstate = ff_2p134(self.rstate)\r\n self.rstate = multMatVect(self.rstate, A1p134, M1, A2p134, M2)\r\n assert self.rstate.dtype == numpy.int32", "def update(self) -> None:\n self._state = b'\\x00' == self._device.readCharacteristic(self._handle)\n print(\"state\", self._state)", "def _update_feedback(self):\n #First read in the current voltage (power)\n #Read in numReadsPerCycle signals (arb) to average\n #TODO: allow user to select reads per signal\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, self.numReadsPerCycle, max_range=self.max_input_voltage)\n\n #Add new data to the pid\n self.pid.set_pv(np.atleast_1d(np.mean(currSignal)))\n\n #Now compute the new control value and update the AO\n self.pid.set_cv()\n self._curr_output_voltage = self._curr_output_voltage + self.pid.cv\n if self._curr_output_voltage < self.min_voltage:\n self._curr_output_voltage = self.min_voltage\n elif self._curr_output_voltage > self.max_voltage:\n self._curr_output_voltage = self.max_voltage\n\n\n #Finally updating the analog output\n\n #Do a final check to make sure that if you are in hardware control mode that the voltage control is still HIGH\n #This is to avoid the potential error if the voltage control is toggled low between the last call of _check_hardware_control\n #and update_feedback, whcih would mean that currSignal would be 0 (assuming a pulsed experiment), and causing a garbage\n #feedback which could be an issue in the next pulse.\n if (~self._under_hardware_control or self.ai_client.get_ai_voltage(self._hwc_ai_channel)[-1] > self._hwc_thresh):\n self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage)", "def pc_output_buffers_full_var(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_output_buffers_full_var(self, *args)", "def BRK(self, *_):\n # first, increment PC. Awkwardly, this overshoots the instruction after BRK, but that's a c64 bug and not an emulator bug\n self.reg.PC += 1\n # now write the PC to the stack in two steps\n self.push(self.reg.PCH)\n self.push(self.reg.PCL)\n # write processor flags to the stack\n self.push(self.reg.P | 0b00010000) # this logical OR forces the B flag set on write to stack only\n # set PC from the 16 bytes at 0xfffe and 0xffff\n self.reg.PC = self.ram.get(0xffff) | (self.ram.get(0xfffe) << 8)", "def update(self, x_train_single, updated_h):\n # x_row = cp.array(x_train_single.toarray())\n # cp.cuda.Stream.null.synchronize()\n updater(x_train_single,updated_h,self.weights,self.num_features,self.num_models,self.learning_rate)\n # self.biases += updated_h * self.learning_rate", "def add_buffer_data(self, cycle: int, src_comp, comp_id: int, dst_comp, data_id: int, value=-1):\n bus_item = BusItem(src_comp, comp_id, dst_comp, data_id, value)\n\n #delay_cycles = CYCLE_DELAYS[\"PE\"][self.component_subtype] + 1\n delay_cycles = 0\n if not self.cycle_state_exists(cycle + delay_cycles):\n _ = self.generate_intermediate_states(cycle + delay_cycles)\n\n # if self.is_data_present(cycle, bus_item):\n # return self.get_cycle_state(cycle)\n\n for future_cycle in range(cycle, cycle + delay_cycles + 1):\n # if self.is_data_present(future_cycle, bus_item):\n # continue\n print(f\"add_buffer_data: {cycle}\")\n\n if self.component_subtype == \"PEGB\":\n pe_buffers = self.get_cycle_buffer(future_cycle)\n # NOTE: dst_comp is the destination PE category ID\n buffer = pe_buffers[dst_comp]\n if dst_comp == \"pegb\":\n pe_buffers[\"pegb\"] = bus_item\n else:\n if src_comp == \"pegb\":\n buffer[\"read\"].append(bus_item)\n for item in self.pegb_read_buffer_written:\n #if bus_item.value[1]\n #if dst_comp not in self.pegb_read_buffer_written:\n self.pegb_read_buffer_written.append({'src': bus_item.value[1], 'dst': bus_item.value[2], 'id': bus_item.value[3]})\n else:\n buffer[\"write\"].append(bus_item)\n state_name = self.get_state_name(future_cycle)\n pe_buffers_updated = {}\n for pe_id in self.pes:\n if pe_id == dst_comp:\n pe_buffers_updated[pe_id] = copy.deepcopy(buffer)\n else:\n pe_buffers_updated[pe_id] = copy.deepcopy(pe_buffers[pe_id])\n pe_buffers_updated['pegb'] = copy.copy(pe_buffers[\"pegb\"])\n metadata = {\"buffer\": copy.deepcopy(pe_buffers_updated)}\n self.update_cycle_state(future_cycle, state_name, metadata)\n\n # self.pegb_read_buffer_written.append(dst_comp)\n\n elif self.component_subtype == \"PENB\":\n read_buffer = self.get_cycle_buffer(future_cycle)\n #print(read_buffer)\n buffer = read_buffer[\"read\"]\n buffer.append(bus_item)\n state_name = self.get_state_name(future_cycle)\n metadata = {'buffer': {'read': copy.deepcopy(buffer)}}\n self.update_cycle_state(future_cycle, state_name, metadata)\n elif self.component_subtype == \"PUGB\":\n pugb = self.get_cycle_buffer(future_cycle)\n pu_buffer = pugb[dst_comp]\n if dst_comp == 'bus':\n pugb['bus'] = bus_item\n else:\n if src_comp == 'bus':\n pu_buffer['read'].append(bus_item)\n else:\n pu_buffer['write'].append(bus_item)\n\n state_name = self.get_state_name(future_cycle)\n pugb_updated = {}\n for key in self.pus:\n if key == dst_comp:\n pugb_updated[key] = copy.deepcopy(pu_buffer)\n else:\n pugb_updated[key] = copy.deepcopy(pugb[key])\n pugb_updated['bus'] = copy.deepcopy(pugb['bus'])\n metadata = {'buffer': pugb_updated}\n self.update_cycle_state(future_cycle, state_name, metadata)\n elif self.component_subtype == \"PUNB\":\n buffer = self.get_cycle_buffer(future_cycle)\n read_buffer = buffer[\"read\"]\n read_buffer.append(bus_item)\n state_name = self.get_state_name(future_cycle)\n metadata = {'buffer': {'read': copy.deepcopy(read_buffer)}}\n self.update_cycle_state(future_cycle, state_name, metadata)\n else:\n buffer = self.get_cycle_buffer(future_cycle)\n buffer.append(bus_item)\n\n state_name = self.get_state_name(future_cycle)\n metadata = {\"buffer\": copy.deepcopy(buffer)}\n self.update_cycle_state(future_cycle, state_name, metadata)\n\n return self.get_cycle_state(cycle)", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def update(self):\n\t\t# If being controlled by COM\n\t\tif self.controled_by_com :\n\t\t\t# Substract 1 from the update counter\n\t\t\tself.update_counter -= 1\n\t\t\t# If the update counter reaches zero\n\t\t\tif self.update_counter == 0. :\n\t\t\t\t# then ask for an action \n\t\t\t\tif self.intermediate_phase is False :\n\t\t\t\t\tself.action_required = True \n\t\t\t\t\t\t\n\t\t\t\t# if during a change\n\t\t\t\t# then make the change\n\t\t\t\tif self.intermediate_phase is True : \n\t\t\t\t\tself.action_required = False\n\t\t\t\t\tself._color_changer() #Make the change in the Simulator\n\t\telse :\n\t\t\tpass", "def update(self, idx, x):\n while idx < len(self.bit):\n self.bit[idx] += x\n idx |= idx + 1", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_6f.pack(_x.major_ax, _x.minor_ax, _x.coup_strength, _x.limit_cycle, _x.forward_velocity, _x.curvature))\n length = len(self.x_offset)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.x_offset))\n length = len(self.y_offset)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.y_offset))\n length = len(self.coupling_1)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *self.coupling_1))\n length = len(self.coupling_2)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *self.coupling_2))\n length = len(self.coupling_3)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *self.coupling_3))\n length = len(self.coupling_4)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *self.coupling_4))\n length = len(self.coupling_5)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *self.coupling_5))\n length = len(self.coupling_6)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *self.coupling_6))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def updateATOM3instance( self, atom3i ):\r\n self.cb = atom3i.cb \r\n self.atom3i = atom3i", "def Cls(self):\n self.Bus.Write_uInt8(self.Address,0x10,0x00)", "def acc_b_v(self):\r\n return self._acc_b_v", "def do_BA2(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_update, col, col2, my_max, BA=0):\n # Setting the Format of inputs for using BA modules\n camera_params, points_3d, points_2d, camera_ind, points_ind, final_l1, final_l2, low_bound, up_bound, map_des, map_2d = get_things1(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_max)\n n_cameras = camera_params.shape[0]\n n_points = points_3d.shape[0]\n n = 9 * n_cameras + 3 * n_points\n m = 2 * points_2d.shape[0]\n # Optimisation Variable\n x0 = np.hstack((camera_params.ravel(), points_3d[:, 0:3].ravel()))\n resx = x0.copy()\n if(BA==1):\n # Standard BA Module\n f0 = fun(x0, n_cameras, n_points, camera_ind, points_ind, points_2d[:,:2], points_2d[:,2])\n A = bundle_adjustment_sparsity(n_cameras, n_points, camera_ind, points_ind)\n t0 = time.time()\n\n res = least_squares(fun, x0, jac_sparsity=A, bounds=(low_bound, up_bound), verbose=2, x_scale='jac', ftol=1e-4, method='trf',\n args=(n_cameras, n_points, camera_ind, points_ind, points_2d[:,:2], points_2d[:,2]))\n t1 = time.time()\n\n resx = res.x\n # Updating the Map with updated points and transformations\n my_min = 0\n my_max = np.max(camera_ind)+1\n H_op = np.zeros((3,4))\n H_op[0:3,0:3] = R.from_rotvec(resx[(my_max-1)*9:(my_max-1)*9+3]).as_matrix()\n H_op[0:3,3] = resx[(my_max-1)*9+3:(my_max-1)*9+6] # Updating the final transformation\n \n final_pts = np.array(resx[my_max*9:]).reshape(-1,3)\n ini_pts = np.array(x0[my_max*9:]).reshape(-1,3)\n map_view = np.vstack((map_view,resx[(my_max-1)*9:(my_max-1)*9+6])) # Updating Transformations in the map\n\n for i in range(my_min,my_max-1):\n map_view[i] = resx[i*9 : i*9+6]\n update_list = []\n count = 0\n count1 = 0\n for i in range(len(final_l1)):\n # Identifying the Map points\n if(final_l2[i]==1):\n update_list.append(final_l1[i])\n if(final_l2[i]==0):\n count1 += 1\n err = np.sqrt(np.sum(np.square((final_pts[points_ind[i]] - ini_pts[points_ind[i]]).ravel()))/3)\n map_3d[final_l1[i]] = final_pts[points_ind[i]] # Updating the map points\n if(np.max(map_cam[final_l1[i]])!=my_max-1):\n map_cam[final_l1[i]].append(my_max-1) # Updating the map views\n count +=1\n \n # Adding the Notseen points to the Map\n update_list = np.array(update_list)\n l2 = np.unique(np.sort(update_list))\n if(my_update==1):\n l1 = []\n l2 = []\n new_3d = []\n new_2d = []\n new_cam = []\n new_view = []\n new_des = []\n new_col = []\n l2 = np.unique(np.sort(update_list))\n j = 0\n for i in range(len(kp_2d)):\n if(i == l2[j]):\n j += 1\n if(j==len(l2)):\n j = 0\n else:\n pt = (np.linalg.inv(H_op[0:3,0:3])@(kp_3d[i].T - H_op[:,3]))\n new_3d.append(pt)\n new_2d = []\n new_cam = []\n new_des.append(des[i])\n new_2d.append(kp_2d[i])\n new_cam.append(my_max-1)\n new_col.append(col2[i])\n map_2d.append(new_2d)\n map_cam.append(new_cam)\n\n new_3d = np.array(new_3d)\n new_des = np.array(new_des)\n new_col = np.array(new_col)\n map_3d = np.vstack((map_3d,new_3d))\n map_des = np.vstack((map_des,new_des))\n col = np.vstack((col,new_col))\n\n return H_op, map_3d, map_2d, map_des, map_cam, map_view, col, my_max-1, len(l2)", "def ADD_Vx_byte(self, x, byte):\n\t\tself.V[x] += byte\n\t\tself.V[x] &= 255", "def ball_increase_velocity():\n global ball_vel\n ball_vel[0] = ball_vel[0] * 1.10\n ball_vel[1] = ball_vel[1] * 1.10", "def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True", "def obtain_data(self):\n ##MODIFY THIS\n #ipdb.set_trace()\n print('obtain_data')\n print(self.enabler)\n print(self.index)\n helper = '>'+str(1+int(self.chann_span.get()))+'Q'\n print('helper='+helper)\n while(self.enabler):\n #print('size'+str(1+int(self.chann_span.get())))\n #print('offset'+str(self.index-self.index_offset))\n A2 = struct.unpack(helper, fpga.read('A2', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8))) \n #print(A2)\n #print(str(10*np.log10(A2))+'dB')\n self.amp_a2[0] = np.mean(A2)\n self.amp_a2 = np.roll(self.amp_a2, -1)\n B2 = struct.unpack(helper, fpga.read('B2', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8)))\n self.amp_b2[0] = np.mean(B2)\n self.amp_b2 = np.roll(self.amp_b2, -1)\n AB_re = struct.unpack(helper, fpga.read('AB_re', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8)))\n self.ab_re[0] = np.mean(AB_re)\n self.ab_re = np.roll(self.ab_re, -1)\n AB_im = struct.unpack(helper, fpga.read('AB_im', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8)))\n self.ab_im[0] = np.mean(AB_im)\n self.ab_im = np.roll(self.ab_im, -1) \n # print('RE:' + str(self.ab_re)+ '\\t IM:' +str(self.ab_im))\n log_a = 10*np.log10(np.mean(self.amp_a2)+1.0)\n log_b = 10*np.log10(np.mean(self.amp_b2)+1.0) \n ang = np.rad2deg(np.arctan2(np.mean(self.ab_im), np.mean(self.ab_re))) #review the way of avg this... i dont know if its the most correct way to do it...\n self.a2.set(log_a)\n self.b2.set(log_b)\n self.ang.set(ang)\n self.amp_rel.set(log_a-log_b)\n return 1", "def update(self):\n\n bumperCode = self.robot.getBumperStatus()\n if bumperCode == 2: # Left side of bumper was hit\n self.setVector(0.4, 220)\n elif bumperCode == 1: # should be right\n self.setVector(0.4, 160)\n elif bumperCode == 3: # should be both\n self.setVector(0.4, 180)\n else:\n self.setVector(0.0, 0.0)", "def update (self):\n\t\tidx = self.idx\n\t\tC = self.C[idx]\t\t# choice\n\t\tPE = self.PE[idx]\t# choice PE\n\t\talpha = self.alpha\t# learning rate\n\n\t\t# don't need to update anything for UCB\n\t\tif self.UCB_samplemean:\n\t\t\treturn\n\n\t\tif not self.gamble:\n\t\t\t# carry over values for the unselected options\n\t\t\tself.Q[idx+1,:] = self.Q[idx,:]\n\t\t\t# check if two learning rates (pos/neg)\n\t\t\tif isinstance(alpha,float):\n\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha*PE\n\t\t\telse:\n\t\t\t\tif PE > 0:\n\t\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha[0]*PE\n\t\t\t\telse:\n\t\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha[1]*PE\n\n\t\telse:\n\t\t\t# check if two learning rates (pos/neg)\n\t\t\t# PE = 0 if gamble isn't chosen\n\t\t\tif isinstance(alpha,float):\n\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha*PE\n\t\t\telse:\n\t\t\t\tif PE > 0:\n\t\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha[0]*PE\n\t\t\t\telse:\n\t\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha[1]*PE", "def updateLayer(self):\n if self.num_layers == 0:\n self.box[0].setDisabled(False)\n for i in range(1,4):\n self.box[i].setDisabled(True)\n elif self.num_layers == 1:\n self.box[0].setDisabled(False)\n self.box[1].setDisabled(False)\n for i in range(2,4):\n self.box[i].setDisabled(True)\n elif self.num_layers == 2:\n self.box[0].setDisabled(False)\n self.box[1].setDisabled(False)\n self.box[2].setDisabled(False)\n self.box[3].setDisabled(True)\n else:\n self.box[0].setDisabled(False)\n self.box[1].setDisabled(False)\n self.box[2].setDisabled(False)\n self.box[3].setDisabled(False)", "def test_set_vx_to_vx_and_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8002 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_and_vy()\n assert(cpu.V_register[x] == v1 & v2)", "def update(self):\n if self.black + self.white == self.SIZE*self.SIZE:\n if self.black > self.white:\n self.gc.black_wins = True\n elif self.white > self.black:\n self.gc.white_wins = True\n else:\n self.gc.tie = True\n self.gc.black_num = self.black\n self.gc.white_num = self.white", "def enable_cl2_copy_ad9866(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x01) ## Enable aux output on clock 1\n self.write_versa5(0x31,0x0c) ## Use clock1 aux output as input for clock2\n self.write_versa5(0x63,0x01) ## Enable clock2", "def register(self):\n active = True\n self.rgb = colormodel.RGB(0, 255, 0)\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.update()" ]
[ "0.57576525", "0.55997807", "0.55879223", "0.5496893", "0.54818845", "0.5372455", "0.530587", "0.5240245", "0.51702", "0.5161561", "0.515714", "0.5140299", "0.51236033", "0.5092073", "0.50008583", "0.49901924", "0.49867123", "0.49156582", "0.4907488", "0.4875377", "0.48464125", "0.48419502", "0.4833743", "0.48262808", "0.47982398", "0.47925672", "0.47923496", "0.4788469", "0.4784059", "0.47831526", "0.47665086", "0.47551143", "0.47544804", "0.4727651", "0.47111702", "0.4708179", "0.47010297", "0.4699243", "0.4695213", "0.46919018", "0.4678126", "0.4677694", "0.4675437", "0.46692404", "0.4665881", "0.46649173", "0.46619418", "0.46561614", "0.4653476", "0.46502528", "0.46469936", "0.46400797", "0.46396184", "0.4632952", "0.46325243", "0.46308628", "0.46291208", "0.4622645", "0.46222088", "0.46196374", "0.46135402", "0.46100345", "0.46092328", "0.46092328", "0.46088716", "0.46088716", "0.46088716", "0.46088552", "0.46073446", "0.46025768", "0.4601413", "0.45965648", "0.45940492", "0.4593788", "0.45915288", "0.45871535", "0.45786935", "0.45751864", "0.45746085", "0.45745906", "0.4574533", "0.45738986", "0.45704305", "0.45679566", "0.4564826", "0.45610943", "0.4558959", "0.45577624", "0.45533672", "0.45443857", "0.4542486", "0.45360252", "0.45287415", "0.4528308", "0.45220605", "0.4521109", "0.45205554", "0.4519488", "0.4512641", "0.45124745" ]
0.65785253
0
Sets scale control bitword = 0 x, y frozen scales + 1 x is interactive + 2 y is interactive bit value 0/1 frozen/interactive
def set_scale_control(self, scale_ctl=3): self._scale_ctl = scale_ctl
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _force_rescale(self, setpoint_x, setpoint_y):", "def scale(self,id,x,y,s):\n if id not in self.elements.keys():\n print(\"Id input not registered! Please check your process\")\n return False\n element=self.elements[id]\n state=element.scale(self.h-1-y,x,s,self.w,self.h)\n if state==True:\n self.canvas=np.ones((self.h,self.w,3),dtype=np.uint8)*255\n self.sync=False\n return state", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def setPlotScaling(x,y):\n dislin.trfscl(x,y)", "def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale,bits=2)\r\n\t\tcurrentVal[3] = scale[0]\r\n\t\tcurrentVal[4] = scale[1]\r\n\t\tcurrentVal = self.binList2Dec(currentVal)\r\n\t\tself.write(reg, currentVal)", "def scale(self):", "def set_scales(self):\r\n self.canvas.update()\r\n self.dxmin = self.dmargin\r\n self.dymin = self.dmargin\r\n self.dxmax = self.canvas.winfo_width() - self.dmargin - 1\r\n self.dymax = self.canvas.winfo_height() - self.dmargin - 1\r\n\r\n # Flip the Y coordinates to invert the result.\r\n if self.y_is_flipped:\r\n self.dymin, self.dymax = self.dymax, self.dymin\r\n\r\n self.xscale = (self.dxmax - self.dxmin) / (self.wxmax - self.wxmin)\r\n self.yscale = (self.dymax - self.dymin) / (self.wymax - self.wymin)\r\n\r\n # Calculate 1 pixel in world coordinates.\r\n self.xpix = 1 / self.xscale\r\n self.ypix = 1 / self.yscale", "def RatingScale(self):\r\n\t\tself.timer.reset()\r\n\t\tself.respKey = []\r\n\t\tself.choice = []\r\n\t\tself.final_choice = []\r\n\t\tself.keyList = self.respKeys + self.acceptKey\r\n\t\tself.hit_accept = False\r\n\t\tself.y = -0.2 # labels y position\r\n\r\n\t\tself._initScaleTitle()\r\n\t\tself._initExtraText()\r\n\t\tself._initScaleInstruct()\r\n\t\tself._initLine(start = (self.xLeft, -0.05), end = (self.xRight,-0.05))\r\n\t\tself._initScaleMarker(fillColor= self.markerColor)\r\n\t\tself._initTickMarks()\r\n\t\tself._initlabelsList()\r\n\t\tself._initlabelsText()\r\n\t\tself._initChoiceText()\r\n\t\tself._initAcceptText()\r\n\r\n\t\tself.scale_marker.setPos((self.xMid,0), log=None)\r\n\r\n\t\tself.scale_title.draw()\r\n\t\tself.scale_extra_text.draw()\r\n\t\tself.scale_instr_text.draw()\r\n\t\tself.scale_line.draw()\r\n\t\tself.scale_marker.draw()\r\n\t\tself.scale_labels_text.draw()\r\n\r\n\t\tfor label in self.labelsList:\r\n\t\t\tself.scale_labels.setText(label)\r\n\t\t\tself.x = self.labelsPosList[self.labelsList.index(label)]\r\n\t\t\tself.scale_labels.setPos((self.x, self.y),log=None)\r\n\t\t\tself.scale_labels.draw()\r\n\r\n\t\tfor tick in self.tickList:\r\n\t\t\tself.tick_marks.setStart((tick,-0.05), log=None)\r\n\t\t\tself.tick_marks.setEnd((tick,-0.1), log=None)\r\n\t\t\tself.tick_marks.draw()\r\n\r\n\t\tself.win.flip()\r\n\r\n\t\twhile self.hit_accept == False or self.choice == []:\r\n\t\t\tself.respKey = event.getKeys(keyList = self.keyList)\r\n\t\t\tif self.respKey != [] and set(self.respKey).issubset(self.respKeys):\r\n\t\t\t\tself.keyIndex = self.respKeys.index(self.respKey[-1])\r\n\t\t\t\tself.xPos = self.tickList[self.keyIndex]\r\n\t\t\t\tself.choice = self.tickNumber[self.keyIndex]\r\n\t\t\t\tself.choice_text.setText(self.choiceText + str(self.choice))\r\n\t\t\t\tself.scale_title.draw()\r\n\t\t\t\tself.scale_extra_text.draw()\r\n\t\t\t\tself.scale_instr_text.draw()\r\n\t\t\t\tself.scale_line.draw()\r\n\t\t\t\tself.scale_marker.setPos((self.xPos,0), log=None)\r\n\t\t\t\tself.scale_marker.draw()\r\n\t\t\t\tself.scale_labels_text.draw()\r\n\t\t\t\tself.choice_text.draw()\r\n\t\t\t\tself.accept_text.draw()\r\n\t\t\t\tfor label in self.labelsList:\r\n\t\t\t\t\tself.scale_labels.setText(label)\r\n\t\t\t\t\tself.x = self.labelsPosList[self.labelsList.index(label)]\r\n\t\t\t\t\tself.scale_labels.setPos((self.x, self.y),log=None)\r\n\t\t\t\t\tself.scale_labels.draw()\r\n\t\t\t\tfor tick in self.tickList:\r\n\t\t\t\t\tself.tick_marks.setStart((tick,-0.05), log=None)\r\n\t\t\t\t\tself.tick_marks.setEnd((tick,-0.1), log=None)\r\n\t\t\t\t\tself.tick_marks.draw()\r\n\t\t\t\tself.win.flip()\r\n\t\t\tif self.respKey != [] and set(self.respKey).issubset(self.acceptKey):\r\n\t\t\t\tif self.choice != []:\r\n\t\t\t\t\tself.hit_accept = True\r\n\t\t\ttime.sleep(0.2)\r\n\t\tself.response_time = round(self.timer.getTime(),2)\r\n\t\tself.win.setUnits(self.savedUnits, log=None)\r\n\t\tevent.clearEvents('keyboard')\r\n\t\treturn(self.choice, self.response_time)", "def scaleBoard(self, scale):\n self.scaling = scale\n self.my_font.config(size=25 * self.scaling)\n self.reset_button.config(width=40 * self.scaling, height=40 * self.scaling, borderwidth=2 * self.scaling)\n self.board.updateBoardUI(self.scaling)", "def edit_scale(scale, direction):\n if direction in (up, shift_up, plus):\n scale = scale*2\n elif direction in (down, shift_down, minus):\n scale = scale/2\n return scale", "def scale(self, sx, sy):\n frameWidth *= sx\n frameHeight *= sy\n repaint()", "def set_scaling(self, scaling):\n self.scaling = scaling\n self.eff_box_size = int(self.box_size*self.scaling+0.5)", "def setScale(self, sx, sy=None, sz=None):\n self.transform.setScale(sx, sy, sz)", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "def SetLogicalScale(*args, **kwargs):\n return _gdi_.DC_SetLogicalScale(*args, **kwargs)", "def b_scale_object():\n \n bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))\n bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))\n bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))\n bpy.ops.object.transform_apply(scale=True)", "def scale(self, x, y, z) -> None:\n ...", "def setScalingMode(mode='down'):\n mdict = {'down':'DOWN','full':'FULL'}\n dislin.sclmod(mode)", "def Draw_Scale( self ):\r\n self.canvas_scale.delete(ALL)\r\n if(cb.longx != 0):\r\n value = str( round( cb.longx, 3 ) )\r\n self.canvas_scale.create_line( cb.xorigin,5,cb.xorigin + cb.xtotal,5 )\r\n splits = 10.0\r\n increment = cb.xtotal/splits\r\n for i in range(int(splits + 1)):\r\n self.canvas_scale.create_line( int(cb.xorigin+i*increment),1,int(cb.xorigin+i*increment),9 )\r\n if( self.filter_distance > cb.longx ):\r\n self.filter_distance = cb.longx\r\n x = cb.xtotal - self.filter_distance*cb.xtotal/cb.longx + cb.xorigin\r\n top = str(round(self.filter_distance,3))\r\n \r\n while len(top) < 5:\r\n top = top + \"0\"\r\n self.scale_text = self.canvas_scale.create_text( cb.xorigin + cb.xtotal + 10,1,anchor = \"nw\",text = top + \"/\" + value)\r\n self.scale_marker = self.canvas_scale.create_polygon( x,7, x+4,3, x-4,3, fill=self.highlight_color,outline=self.highlight_color )\r\n if( self.filter_line_on ):\r\n if(self.filter_line != 0 ):\r\n self.canvas_one.delete( self.filter_line )\r\n self.filter_line = self.canvas_one.create_line( x,0,x,self.ys, fill=self.highlight_color)", "def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self", "def reset_limits(self):\n self.autoscale = True\n self.pixels.autoscale()", "def scale(self, scale):\n\t\tself._current_score *= scale", "def __init__(self,scale):\n self.scale = scale", "def _onToggleScale(self, event):\r\n if self.get_yscale() == 'log':\r\n self.set_yscale('linear')\r\n else:\r\n self.set_yscale('log')\r\n self.subplot.figure.canvas.draw_idle()", "def reset_scale(self) -> None:\n self._scale.set(self._start_val)", "def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True, render=True):\n if xscale is None:\n xscale = self.scale[0]\n if yscale is None:\n yscale = self.scale[1]\n if zscale is None:\n zscale = self.scale[2]\n self.scale = [xscale, yscale, zscale]\n\n # Reset all actors to match this scale\n for actor in self.actors.values():\n if hasattr(actor, 'SetScale'):\n actor.SetScale(self.scale)\n\n self.parent.render()\n if reset_camera:\n self.update_bounds_axes()\n self.reset_camera(render=render)\n self.Modified()", "def ScaleShape(shape, scale_x, scale_y):\n for i, pt in enumerate(shape.points):\n x, y = pt\n shape.points[i] = [scale_x * x, scale_y * y]", "def __init__(self,options,pos):\n self.options = options\n numobjects = pos.shape[1]\n plt.ion() # turn on interactive plotting mode\n dpi=72.0 # set dpi (I think this is appropriate on mac)\n # fig accepts size in inches\n # so divide desired pixel width, height by dpi to get inches\n w,h=(self.options.width/dpi,self.options.height/dpi)\n fig = plt.figure(1,figsize=(w,h),dpi=dpi)\n fig.clear()\n\n #w = self.options.width/fig.get_dpi() # desired width in inches\n #h = self.options.height/fig.get_dpi() # desired height in inches\n #fig.set_size_inches(w,h,forward=True) # last arg resizes the canvas to match\n\n self.ax = plt.axes()\n self.ax.set_xlim(self.options.xmin,self.options.xmax)\n self.ax.set_ylim(self.options.ymin,self.options.ymax)\n #pyplot.axis('scaled')\n\n # I don't know why axis('scaled') doesn't work here\n # But I think the next two commands are equivalent\n self.ax.set_aspect('equal', adjustable='box', anchor='C')\n self.ax.set_autoscale_on(False)\n\n #self.redraw()\n\n\n #facecolors = [cm.jet(x) for x in np.random.rand(len(vicon_objects))]\n facecolors = [cm.jet(x) for x in np.linspace(0,1,numobjects)]\n if self.options.visualize_switch_xy:\n if self.options.axis==1:\n self.ax.axvline(linewidth=4, c='k')\n else:\n self.ax.axhline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,1],pos[:,0],c=facecolors,s=3000)\n else:\n if self.options.axis==1:\n self.ax.axhline(linewidth=4, c='k')\n else:\n self.ax.axvline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,0],pos[:,1],c=facecolors,s=3000)\n\n # scores\n self.tpos = self.ax.text(0.75*self.options.xmax,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n self.tneg = self.ax.text(0.75*self.options.xmin,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n\n self.canvas = agg.FigureCanvasAgg(fig)\n self.canvas.draw()\n self.renderer = self.canvas.get_renderer()\n raw_data = self.renderer.tostring_rgb()\n\n pygame.init()\n \n self.window = pygame.display.set_mode((options.width,options.height), DOUBLEBUF)\n self.screen = pygame.display.get_surface()\n\n self.set_caption(\"Possession: Waiting for Vicon\")\n \n size = self.canvas.get_width_height()\n \n surf = pygame.image.fromstring(raw_data, size, \"RGB\")\n self.screen.blit(surf, (0,0))\n pygame.display.flip()", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale=1):\n self.x *= scale\n self.y *= scale\n self.width *= scale\n self.height *= scale\n\n # Always update the corners after operation\n self.update_corners()\n return", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def setscaling(self, scaling):\n\n self.__scaling = scaling", "def __init__(self, scale=False):\n self.scale = scale", "def setScaleX(self,startx,endx):\r\n if startx == endx:\r\n endx += 1\r\n self.scaleLock.acquire()\r\n self.scalex = [startx,endx]\r\n self.scaleLock.release()", "def setPxMode(self, b):\n self.setFlag(self.GraphicsItemFlag.ItemIgnoresTransformations, b)", "def scale(self, sx, sy):\n self._impl.scale(sx, sy)", "def __call__(self):\n self.brain._update_fscale(self.factor)\n for key in self.brain.keys:\n if self.widgets[key] is not None:\n self.widgets[key].set_value(self.brain._data[key])", "def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()", "def clickAutoscale(self, event):\n self.axes.autoscale_view()", "def setScaleY(self,starty,endy):\r\n if starty == endy:# Prevent /0 errors when scaling\r\n endy += 0.1\r\n self.scaleLock.acquire()\r\n self.scaley = [starty,endy]\r\n self.scaleLock.release()", "def setScaling(factor=1.0):\n dislin.sclfac(factor)", "def scale(self, state, action):\n control_action = action[..., : self._true_dim_action[0]]\n scale = super().scale(state, control_action)\n\n return scale", "def get_scale():\r\n\r\n \r\n return 0.5", "def _set_scale_factors_to_one(self):\n self.wnorm = 1.0\n self.hnorm = 1.0\n self.xnorm = 0.0\n self.ynorm = 0.0\n self.scale = 1.0", "def update_axis_scale(self, scale, axis='left'):\n self.plt.getAxis(axis).setScale(scale=scale)", "def test_set_scale():\n data = io.create_sample_Dataset()\n tmp = data.piv.set_scale(1.0)\n assert np.allclose(tmp[\"x\"], data[\"x\"])\n\n tmp = data.copy()\n tmp.piv.set_scale(2.0)\n tmp_mean = tmp[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n data_mean = data[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n assert np.allclose(tmp_mean / data_mean, 2.0)", "def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)", "def update_rescale_entry(self):\n if self.var_rescale_frame.get() == 0:\n self.checkbox_rescale_frame[\"text\"] = \"Rescale Frames\"\n self.rescale_factor_entry.config(state=\"disabled\")\n elif self.var_rescale_frame.get() == 1:\n self.checkbox_rescale_frame[\"text\"] = \"By a factor of: \"\n self.rescale_factor_entry.config(state=\"normal\")", "def change_scaling(self, scales=None, offsets=None) -> None:\n self.points.change_scaling(scales, offsets)\n\n self.header.scales = scales\n self.header.offsets = offsets", "def run_script():\n creator = EnergyScaleCreator(IMP, FONT_SIZE)\n creator.get_scaled_image(WIDTH_MIN)\n creator.extend_image()\n ticks = create_ticks(IMP, TICK_COUNT)\n creator.draw_scale(ticks)\n imp_extended = creator.image\n imp_extended.show()", "def scale(self, x, y, sticky=False):\n\n if sticky:\n self.stickyScale = (x, y)\n QGraphicsView.scale(self, x, y)\n else:\n QGraphicsView.scale(self, x * self.stickyScale[0], y * self.stickyScale[1])", "def manipScaleContext(*args, activeHandle: Union[int, bool]=0, alignAlong: List[float, float,\n float]=None, constrainAlongNormal: bool=True, currentActiveHandle:\n Union[int, bool]=0, editPivotMode: bool=True, editPivotPosition:\n bool=True, exists: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", lastMode:\n Union[int, bool]=0, manipVisible: bool=True, mode: Union[int, bool]=3,\n orientAxes: Union[List[float, float, float], bool]=None, orientObject:\n AnyStr=\"\", orientTowards: List[float, float, float]=None, pinPivot:\n bool=True, pivotOriHandle: bool=True, position: bool=True, postCommand:\n Script=None, postDragCommand: List[Script, AnyStr]=None, preCommand:\n Script=None, preDragCommand: List[Script, AnyStr]=None,\n preserveChildPosition: bool=False, preserveUV: bool=True,\n preventNegativeScale: bool=True, reflection: bool=True, reflectionAbout:\n int=0, reflectionAxis: int=0, reflectionTolerance: float=0.0, scale:\n Union[List[float, float, float], bool]=None, snap: bool=True,\n snapPivotOri: bool=True, snapPivotPos: bool=True, snapRelative: bool=True,\n snapValue: Union[float, bool]=0.0, tweakMode: bool=True, useManipPivot:\n bool=True, useObjectPivot: bool=True, xformConstraint: Union[AnyStr,\n bool]=\"\", q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass", "def update_zoom_plot(self):\n self.plot_zoom.setXRange(*self.linear_region.getRegion(), padding=0)", "def on_scale (self):\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_foreground()\n\t\t\tself.redraw_background()\n\n\t\tif self.expand2 == _('Use a scrollbar'):\n\t\t\tself.width = int((self.icon_size * 2 * self.rows + ((self.border_size+self.shadow_size)*2)+15 ) + 24/self.scale)\n\t\t\tself.update_scrollbar()", "def toggle_auto_scale(self, checked):\n logger.debug(\"Set auto scale to %s.\", checked)\n self.auto_scale = checked", "def loadData(self, data):\n\n super(SimpleControlComponentGuide, self).loadData( data )\n\n self.ctrlSizeInputAttr.setValue(data[\"ctrlSize\"])\n self.mainCtrl.xfo = data[\"ctrlXfo\"]\n\n scaleValue = data[\"ctrlSize\"]\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))\n\n return True", "def set_scale(self, scale):\n scale = float(scale)\n if scale <= 1:\n raise ValueError('The scale parameter must exceed 1.')\n self._a = scale", "def scale(self, scale_x: float, scale_y: float) -> None:\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y", "def interactive(self, name='grid', bind_x=True, bind_y=True):\n encodings = []\n if bind_x:\n encodings.append('x')\n if bind_y:\n encodings.append('y')\n self.selection = {name: {'bind': 'scales',\n 'type': 'interval',\n 'encodings': encodings}}\n return self", "def yscale(value):\n impl.yscale(**locals())", "def __init__(self, initial_x:int, initial_y:int, width:int, height:int, power_type:str, time_to_live:int, debug:bool = False):\n\n #Call the superclass contructor\n super().__init__(initial_x, initial_y, width, height, PowerUp.sprites[power_type], debug)\n\n #Store variables\n self.power_type = power_type\n self.ttl = time_to_live\n\n #Scale the image\n self.scale(30,30)", "def update_simulate_plot(self):\n a = self.plot_zoom.getViewBox().viewRange()\n self.plot_simulate.setXRange(a[0][0], a[0][1])\n self.plot_simulate.setYRange(a[1][0], a[1][1])", "def plane_scale(self, scale):\n cmd = '{}testPlaneScale {}'.format(self.console, scale)\n self.write_command(cmd)", "def onScales(self):\n # Ensure that we can work\n plt = Plot.getPlot()\n if not plt:\n self.updateUI()\n return\n # Get again all the subwidgets (to avoid PySide Pitfalls)\n mw = self.getMainWindow()\n form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\n form.all = self.widget(QtGui.QCheckBox, \"allAxes\")\n form.xAuto = self.widget(QtGui.QCheckBox, \"xAuto\")\n form.yAuto = self.widget(QtGui.QCheckBox, \"yAuto\")\n form.xSMin = self.widget(QtGui.QLineEdit, \"xMin\")\n form.xSMax = self.widget(QtGui.QLineEdit, \"xMax\")\n form.ySMin = self.widget(QtGui.QLineEdit, \"yMin\")\n form.ySMax = self.widget(QtGui.QLineEdit, \"yMax\")\n\n axesList = [plt.axes]\n if form.all.isChecked():\n axesList = plt.axesList\n if not self.skip:\n self.skip = True\n # X axis\n if form.xAuto.isChecked():\n for ax in axesList:\n ax.set_autoscalex_on(True)\n form.xSMin.setEnabled(False)\n form.xSMax.setEnabled(False)\n lim = plt.axes.get_xlim()\n form.xSMin.setText(str(lim[0]))\n form.xSMax.setText(str(lim[1]))\n else:\n form.xSMin.setEnabled(True)\n form.xSMax.setEnabled(True)\n try:\n xMin = float(form.xSMin.text())\n except:\n xMin = plt.axes.get_xlim()[0]\n form.xSMin.setText(str(xMin))\n try:\n xMax = float(form.xSMax.text())\n except:\n xMax = plt.axes.get_xlim()[1]\n form.xSMax.setText(str(xMax))\n for ax in axesList:\n ax.set_xlim((xMin, xMax))\n # Y axis\n if form.yAuto.isChecked():\n for ax in axesList:\n ax.set_autoscaley_on(True)\n form.ySMin.setEnabled(False)\n form.ySMax.setEnabled(False)\n lim = plt.axes.get_ylim()\n form.ySMin.setText(str(lim[0]))\n form.ySMax.setText(str(lim[1]))\n else:\n form.ySMin.setEnabled(True)\n form.ySMax.setEnabled(True)\n try:\n yMin = float(form.ySMin.text())\n except:\n yMin = plt.axes.get_ylim()[0]\n form.ySMin.setText(str(yMin))\n try:\n yMax = float(form.ySMax.text())\n except:\n yMax = plt.axes.get_ylim()[1]\n form.ySMax.setText(str(yMax))\n for ax in axesList:\n ax.set_ylim((yMin, yMax))\n plt.update()\n self.skip = False", "def SetUserScale(*args, **kwargs):\n return _gdi_.DC_SetUserScale(*args, **kwargs)", "def set_base_transforms(self):\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()", "def reset_limits(self):\n self.autoscale = True\n self.camera.autoscale()", "def tweak_base(*args):\n\n up = utils.arrow_up\n down = utils.arrow_down\n left = utils.arrow_left\n right = utils.arrow_right\n shift_up = utils.shift_arrow_up\n shift_down = utils.shift_arrow_down\n plus = utils.plus\n minus = utils.minus\n scale = 0.1\n abs_status = '{}: {:.4f}'\n exp_status = '{}: {:.4e}'\n\n if len(args) == 1:\n move_keys = (left, right)\n scale_keys = (up, down, plus, minus, shift_up, shift_down)\n elif len(args) == 2:\n move_keys = (left, right, up, down)\n scale_keys = (plus, minus, shift_up, shift_down)\n\n def show_status():\n if scale >= 0.0001:\n template = abs_status\n else:\n template = exp_status\n text = [template.format(mot.name, mot.wm()) for mot in args]\n text.append(f'scale: {scale}')\n print('\\x1b[2K\\r' + ', '.join(text), end='')\n\n def usage():\n print() # Newline\n if len(args) == 1:\n print(\" Left: move x motor backward\")\n print(\" Right: move x motor forward\")\n print(\" Up or +: scale*2\")\n print(\" Down or -: scale/2\")\n else:\n print(\" Left: move x motor left\")\n print(\" Right: move x motor right\")\n print(\" Down: move y motor down\")\n print(\" Up: move y motor up\")\n print(\" + or Shift_Up: scale*2\")\n print(\" - or Shift_Down: scale/2\")\n print(\" Press q to quit.\"\n \" Press any other key to display this message.\")\n print() # Newline\n\n def edit_scale(scale, direction):\n \"\"\"Function used to change the scale.\"\"\"\n if direction in (up, shift_up, plus):\n scale = scale*2\n elif direction in (down, shift_down, minus):\n scale = scale/2\n return scale\n\n def movement(scale, direction):\n \"\"\"Function used to know when and the direction to move the motor.\"\"\"\n try:\n if direction == left:\n args[0].umvr(-scale, log=False, newline=False)\n elif direction == right:\n args[0].umvr(scale, log=False, newline=False)\n elif direction == up:\n args[1].umvr(scale, log=False, newline=False)\n elif direction == down:\n args[1].umvr(-scale, log=False, newline=False)\n except Exception as exc:\n logger.error('Error in tweak move: %s', exc)\n logger.debug('', exc_info=True)\n\n start_text = ['{} at {:.4f}'.format(mot.name, mot.wm()) for mot in args]\n logger.info('Started tweak of ' + ', '.join(start_text))\n\n # Loop takes in user key input and stops when 'q' is pressed\n is_input = True\n while is_input is True:\n show_status()\n inp = utils.get_input()\n if inp in ('q', None):\n is_input = False\n elif inp in move_keys:\n movement(scale, inp)\n elif inp in scale_keys:\n scale = edit_scale(scale, inp)\n else:\n usage()\n print()\n logger.info('Tweak complete')", "def autoscale(self, A):\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)", "def change_zoom(self, b):\n\n x_mid = int(self.ff[0].info['xres'] / 2)\n y_mid = int(self.ff[0].info['yres'] / 2)\n\n x = x_mid - self.x_crop_slider.value\n\n if self.y_crop.value is True:\n y = y_mid - self.y_crop_slider.value\n else:\n y = y_mid - self.x_crop_slider.value\n\n x0 = x_mid - x\n x1 = x_mid + x\n y0 = y_mid - y\n y1 = y_mid + y\n\n self.x_range = [x0, x1]\n self.y_range = [y0, y1]\n\n self.ax.set_xlim([x0, x1])\n self.ax.set_ylim([y0, y1])", "def scale_invert(self):", "def set_hyper_parameters(self, x):\n self.set_scale(x[0])", "def __scale_constraint(c, v):\n if c.equality:\n c.set_value((c.lower * v, c.body * v))\n else:\n c.set_value(\n (__none_left_mult(c.lower, v), c.body * v, __none_left_mult(c.upper, v))\n )", "def testDefaultDataScaling(self):\n orig_scale = util.ScaleData\n util.ScaleData = self.FakeScale\n try:\n self.AddToChart(self.chart, [2, 3, 5, 7, 11])\n self.chart.auto_scale.buffer = 0\n # This causes scaling to happen & calls FakeScale.\n self.chart.display.Url(0, 0)\n self.assertEqual(2, self.min)\n self.assertEqual(11, self.max)\n finally:\n util.ScaleData = orig_scale", "def setScaledContents(self, scaled):\n self._scaled_contents = scaled\n self.update()", "def setScaleMode(self, mode):\n if mode != self.__scale_mode and mode in (self.ScaleModeGlobal, self.ScaleModeLocal):\n self.__scale_mode = mode\n self.__scaled_datasets = None\n self.__axisDomains = None\n self.dataChanged.emit()", "def scale(self, factor):\n self.b = factor * self.b", "def testExplicitDataScaling(self):\n orig_scale = util.ScaleData\n util.ScaleData = self.FakeScale\n try:\n self.AddToChart(self.chart, [2, 3, 5, 7, 11])\n self.chart.left.min = -7\n self.chart.left.max = 49\n # This causes scaling to happen & calls FakeScale.\n self.chart.display.Url(0, 0)\n self.assertEqual(-7, self.min)\n self.assertEqual(49, self.max)\n finally:\n util.ScaleData = orig_scale", "def updateSize(self, *args):\n width = self.width.get()\n height = self.height.get()\n self.initialXScale.config(to=width)\n self.initialYScale.config(to=height)\n # error check that state is not outside bounds\n for ball, state in self.ballStates.items():\n if state[0] > width:\n state[0] = width\n if state[1] > height:\n state[1] = height", "def scale(self, sx : float, sy : float, sz : float):\n answ = self.clone()\n for i in range(len(self._elements)):\n answ._elements[i]._element = self._elements[i].element.scale(sx, sy, sz)\n\n return answ", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def scale(self, x: float, y: float):\n # Scaling is done from (0, 0), so that figure won't move after scaling\n dx = np.min(self.points[:, 0])\n dy = np.min(self.points[:, 1])\n self.points[:, 0] -= dx\n self.points[:, 1] -= dy\n\n matrix = np.array([\n [x, 0, 0],\n [0, y, 0],\n [0, 0, 1]\n ])\n\n self.points = self.points @ matrix\n self.points[:, 0] += dx\n self.points[:, 1] += dy\n\n self.points = self.points.astype(int)\n self.start_points = self.points\n self.center_point = self.midpoint()\n self.recalculate_pivots()", "def __init__(self, size=800, scale=(3. / 4, 5. / 2)):\n assert isinstance(size, int)\n assert isinstance(scale, float) or isinstance(scale, tuple)\n self.size = size\n self.scale = scale if isinstance(scale, tuple) \\\n else (1 - scale, 1 + scale)", "def __init__(self, data, pixscale = 7.77/43):\n self.data = data\n self.pixscale = pixscale", "def xscale(value):\n impl.xscale(**locals())", "def Scale_Pick( self, event ):\r\n x = event.x - cb.xorigin\r\n y = event.y\r\n #Was the position within the scale?\r\n if x < 0 and x > -2:\r\n x = 0 #low adjust\r\n if x > cb.xtotal and x < cb.xtotal+2:\r\n x = cb.xtotal #high adjust\r\n if( x >= 0 and x <= cb.xtotal ):\r\n self.filter_distance = round((cb.xtotal - float(x))/cb.xtotal*cb.longx,3)\r\n self.Draw_Scale()\r\n return", "def GetScale(self):\n ...", "def action_set_zoom(self, value):\n if value >= 0 and value < len(self.zoom_levels) and value != self.cur_zoom:\n self.cur_zoom = value\n self.apply_zoom()", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", "def setAxisScaling(scalingtype='linear', axes='XYZ'):\n scalingdict = {'linear':'LIN', 'log':'LOG'} \n dislin.axsscl(scalingdict[scalingtype],axes)", "def ikHandleDisplayScale(*args, q=True, query=True, e=True, edit=True, **kwargs)->Union[float,\n Any]:\n pass", "def scale(self):\n return self._scale", "def verticalScaleIncrease(self):\n scaleFac = float(self.qline4.text())\n self.qline4.setText(str(scaleFac * 2))\n self.model.refreshScreen()", "def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)", "def y0_scale_siby1024(self, y0_scale_siby1024):\n\n self._y0_scale_siby1024 = y0_scale_siby1024", "def zoomMap(self, scale, x=0, y=0):\n if self.zoomed:\n self.delete(self.zoomed)\n self.zoomed = self.im.zoom(scale, scale)\n zoomed_id = self.create_image(x, y, image=self.zoomed, anchor=NW)\n self.delete(self.original)\n self.scale = scale", "def setSurfaceColorScale(low,high):\n dislin.zscale(low,high)", "def normalize_plot(self):\n\n kwargs = dict(stretch = self.stretch,\n vmin = self.vmin_button.get_value(),\n vmax = self.vmax_button.get_value())\n norm = aplpy.normalize.APLpyNormalize(**kwargs)\n self.parent.aplpy_plot.image.set_norm(norm)\n self.parent.aplpy_plot.refresh()", "def is_scale_enabled(self) -> bool:\r\n ..." ]
[ "0.67285424", "0.6584716", "0.6431193", "0.6370215", "0.6340126", "0.6294655", "0.62531334", "0.6227982", "0.6212142", "0.6209266", "0.6207113", "0.6194933", "0.6148316", "0.61001164", "0.6055724", "0.60446364", "0.60115176", "0.6009035", "0.59821504", "0.59555095", "0.591816", "0.59128934", "0.58997947", "0.5884049", "0.58770686", "0.58687717", "0.58604145", "0.58489335", "0.5843493", "0.5843493", "0.5821154", "0.58137316", "0.5802023", "0.57983655", "0.57420534", "0.5736263", "0.57334167", "0.5723713", "0.5699829", "0.5693297", "0.5684227", "0.56700444", "0.5654699", "0.5654227", "0.5649397", "0.5645241", "0.56396204", "0.56244516", "0.5624152", "0.5618658", "0.561277", "0.5607489", "0.5585009", "0.5577123", "0.5543623", "0.5529559", "0.5527077", "0.5526218", "0.5519969", "0.548506", "0.548245", "0.5475481", "0.5473053", "0.5470296", "0.5467717", "0.54645115", "0.54494035", "0.5448959", "0.54421437", "0.54324573", "0.5431323", "0.54194313", "0.5417465", "0.5408142", "0.54014033", "0.53965676", "0.53932536", "0.53844714", "0.53807175", "0.53759426", "0.5371861", "0.53689134", "0.5353562", "0.5352591", "0.5342987", "0.533936", "0.5338226", "0.5320264", "0.53197175", "0.5307631", "0.53064924", "0.5305254", "0.5298939", "0.52958685", "0.52922535", "0.5291798", "0.5288045", "0.5276525", "0.5275012", "0.5272198" ]
0.67066544
1
Run probabilistic road map planning
def prm_planning(start_x, start_y, goal_x, goal_y, obstacle_x_list, obstacle_y_list, robot_radius, *, rng=None): obstacle_kd_tree = KDTree(np.vstack((obstacle_x_list, obstacle_y_list)).T) sample_x, sample_y = sample_points(start_x, start_y, goal_x, goal_y, robot_radius, obstacle_x_list, obstacle_y_list, obstacle_kd_tree, rng) if show_animation: plt.plot(sample_x, sample_y, ".b") road_map = generate_road_map(sample_x, sample_y, robot_radius, obstacle_kd_tree) rx, ry = dijkstra_planning( start_x, start_y, goal_x, goal_y, road_map, sample_x, sample_y) return rx, ry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n for task in range(1, 6):\n # get map object for the current task\n map_obj = MapObj(task=task)\n # display map\n map_obj.show_map()\n # find cost optimal path using a-star\n node = search(\n map_obj=map_obj,\n heuristic=euclidian_distance,\n moving_goal=(task == 5)\n )\n # draw optimal path on map\n map_obj.draw_path(node)\n # display the map\n map_obj.show_map()", "def main():\n test_file = 'lib/pbk411.tsp'\n limit = 1000\n milestones = [10, 25, 100, 500, 1000]\n\n lx, ly = parse_data(test_file)\n algo = Genalgo(lx, ly)\n current = 0\n\n for i in range(0, limit):\n algo.evolve_new_pop(i)\n if i == milestones[current]:\n cities, distance = get_best(algo)\n cities.append(cities[0])\n make_plot_solved(lx, ly, cities)\n save_plot('solved-' + str(milestones[current]) + '.png')\n print(str(milestones[current]) + ': ' + str(distance))\n current += 1\n\n _, best_tuple = algo.get_best_tours(algo.tours)\n best = algo.tours[best_tuple[0]]\n cities = best.cities\n cities.append(best.cities[0])\n print('Best: ' + str(best.get_cost()))\n\n make_plot_original(lx, ly)\n save_plot('original.png')\n make_plot_solved(lx, ly, cities)\n save_plot('solved.png')", "def __init__(self, my_map, paths, starts, goals, agent_goals, predictions):\n\n self.my_map = my_map\n self.paths = paths\n self.starts = starts\n self.goals = goals\n self.agent_goals = agent_goals\n self.num_of_agents = len(starts)\n self.predictions = predictions\n\n self.CPU_time = 0\n\n # compute heuristics for the low-level search\n self.heuristics = []\n for goal in self.goals:\n self.heuristics.append(compute_heuristics(my_map, goal))", "def tryEverything(g, verbose, graphname):\r\n prio = ['rku', 'random', 'BIL', 'rkd', 'cluHPS', 'rkusd', 'rkuad']\r\n placement = ['eft', 'BIM*', 'OLB', 'MET', 'DL', 'GDL']\r\n costFunction = ['mean', 'median', 'maxmax', 'minmax', 'minmin', 'maxmin']\r\n desc = ['DLS/DC', None, 'DCP']\r\n useOfBIM = [False, True]\r\n insertion = [False, True]\r\n BSA = [False, True]\r\n res: Dict[str, List[float]] = {}\r\n cnt = 0\r\n\r\n for ip, p in enumerate(prio):\r\n for ipl, pl in enumerate(placement):\r\n for ic, c in enumerate(costFunction):\r\n if p != 'BIL' or c == 'mean' or pl in ['DL', 'GDL']:\r\n for idd, d in enumerate(desc):\r\n for iu, u in enumerate(useOfBIM):\r\n for ii, i in enumerate(insertion):\r\n for ib, b in enumerate(BSA):\r\n cnt += 1\r\n name = \";\".join(map(str, [ip, ic, ipl, idd, iu, ii, ib]))\r\n\r\n # dispName = \"-\".join(map(str, [p, pl, c, d, u, i, b]))\r\n # print(\"Heuristic n°\", cnt, \"-\", dispName)\r\n # print(\"Heuristic n°\", cnt, \"-\", name)\r\n\r\n startScheduling = timeit.default_timer()\r\n try:\r\n schedule = computeSchedule(g, strategyPrio=p, costFunction=c,\r\n strategyPlacement=pl,\r\n useOfBIM=u, desc=d,\r\n insertion=i, bsa=b, verbose=verbose)\r\n verifPrec(g, schedule, verbose)\r\n endScheduling = timeit.default_timer()\r\n # print(\"Ended in :\", 1000*(endScheduling - startScheduling), \"ms\")\r\n # print(\"Ended in :\", round(1000 * (endScheduling - startScheduling),2), \"ms\")\r\n timeS = round(1000 * (endScheduling - startScheduling), 2)\r\n # print(f\"timeS : {timeS}\")\r\n if verbose:\r\n print(f\"Time : {timeS}ms\")\r\n res[name] = [round(schedule[getExitTask(g)][2], 6), timeS]\r\n except Exception as _:\r\n\r\n print(\"Error for : \" + name + \" on file \" + graphname)\r\n file = open(\"error.log\", 'a')\r\n file.write(f\"Error for {name} on file {graphname}\\n\")\r\n file.close()\r\n raise _\r\n return res\r\n return res", "def test_planning():\n\n joints1 = [0.0, 2.9, 1.3, 4.2, 1.4, 0.0]\n joints2 = [4.80, 2.92, 1.00, 4.20, 1.45, 1.32]\n\n\n path_planner = PathPlanner(\"manipulator\")\n\n print path_planner.group.get_end_effector_link()\n\n while True:\n raw_input(\"Press Enter to move to position 1\")\n plan = path_planner.plan_to_config(joints1)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)\n\n raw_input(\"Press Enter to move to position 2\")\n plan = path_planner.plan_to_config(joints2)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)", "def test_PRP(initial):\n return plan_route((initial[0],initial[1]), initial[2],\n # Goals:\n [(2,3),(3,2)],\n # Allowed locations:\n [(0,0),(0,1),(0,2),(0,3),\n (1,0),(1,1),(1,2),(1,3),\n (2,0), (2,3),\n (3,0),(3,1),(3,2),(3,3)])", "def main():\n graph_alg_eq()\n graph_points()\n graph_smooth_from_pts()\n\n return GOOD_RET # success", "def main():\n\n print('Drones capacity = {}'.format(DRONES_CAPACITY))\n\n # Instantiate the data of the problem\n data = create_data_model(MAX_POINT_DEMAND, USE_CACHE)\n\n # Create the routing index manager\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model\n routing = pywrapcp.RoutingModel(manager)\n\n # Defining weights of the edges\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Addding capacity constraints.\n def demand_callback(from_index):\n \"\"\"Returns the demand for tests of the node.\"\"\"\n from_node = manager.IndexToNode(from_index)\n return data['demands'][from_node]\n\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n\n def counter_callback(from_index):\n \"\"\"Returns the number of stops done at the node.\"\"\"\n from_node = manager.IndexToNode(from_index)\n return data['counter'][from_node]\n\n counter_callback_index = routing.RegisterUnaryTransitCallback(\n counter_callback)\n\n # Limiting the number of tests each drone can carry\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n\n # Limiting the overall number of nodes a drone can serve in one tour\n routing.AddDimensionWithVehicleCapacity(\n counter_callback_index,\n 0, # null capacity slack\n data['vehicle_max_number_of_stops'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Counter')\n\n # Setting parameters of the solver\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = HEURISTIC_TIME_LIMIT\n search_parameters.log_search = True\n\n\n print('START SOLVING')\n assignment = routing.SolveWithParameters(search_parameters)\n\n if assignment:\n print_and_save_solution(data, manager, routing, assignment)", "def main():\n args = parameter_parser()\n tab_printer(args)\n trainer = GPNTrainer(args)\n # trainer.fit()\n \"\"\"\n Scoring on the prediction and learning ability.\n \"\"\"\n trainer.score()\n \"\"\"\n Scoring on the subgraph test set.\n \"\"\"\n # trainer.score2()\n \"\"\"\n Scoring on the generalization ability.\n \"\"\"\n # trainer.score3()\n \"\"\"\n Finetuning for downstream tasks.\n \"\"\"\n # model = finetune_GPN(args, trainer.number_of_labels)\n # model.finetune()", "def main():\n # https://github.com/caelan/pddlstream/blob/master/examples/motion/run.py\n # TODO: 3D work and CSpace\n # TODO: visualize just the tool frame of an end effector\n\n np.set_printoptions(precision=3)\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--algorithm', default='rrt_connect',\n help='The algorithm seed to use.')\n parser.add_argument('-d', '--draw', action='store_true',\n help='When enabled, draws the roadmap')\n parser.add_argument('-r', '--restarts', default=0, type=int,\n help='The number of restarts.')\n parser.add_argument('-s', '--smooth', action='store_true',\n help='When enabled, smooths paths.')\n parser.add_argument('-t', '--time', default=1., type=float,\n help='The maximum runtime.')\n args = parser.parse_args()\n\n #########################\n\n obstacles = [\n create_box(center=(.35, .75), extents=(.25, .25)),\n create_box(center=(.75, .35), extents=(.225, .225)),\n create_box(center=(.5, .5), extents=(.225, .225)),\n ]\n\n # TODO: alternate sampling from a mix of regions\n regions = {\n 'env': create_box(center=(.5, .5), extents=(1., 1.)),\n 'green': create_box(center=(.8, .8), extents=(.1, .1)),\n }\n\n start = np.array([0., 0.])\n goal = 'green'\n if isinstance(goal, str) and (goal in regions):\n goal = get_box_center(regions[goal])\n else:\n goal = np.array([1., 1.])\n\n title = args.algorithm\n if args.smooth:\n title += '+shortcut'\n viewer = draw_environment(obstacles, regions, title=title)\n\n #########################\n\n #connected_test, roadmap = get_connected_test(obstacles)\n distance_fn = get_distance_fn(weights=[1, 1]) # distance_fn\n\n # samples = list(islice(region_gen('env'), 100))\n with profiler(field='cumtime'): # cumtime | tottime\n # TODO: cost bound & best cost\n for _ in range(args.restarts+1):\n start_time = time.time()\n collision_fn, cfree = get_collision_fn(obstacles)\n sample_fn, samples = get_sample_fn(regions['env'], obstacles=[]) # obstacles\n extend_fn, roadmap = get_extend_fn(obstacles=obstacles) # obstacles | []\n\n if args.algorithm == 'prm':\n path = prm(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,\n num_samples=200)\n elif args.algorithm == 'lazy_prm':\n path = lazy_prm(start, goal, sample_fn, extend_fn, collision_fn,\n num_samples=200, max_time=args.time)[0]\n elif args.algorithm == 'rrt':\n path = rrt(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,\n iterations=INF, max_time=args.time)\n elif args.algorithm == 'rrt_connect':\n path = rrt_connect(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,\n max_time=args.time)\n elif args.algorithm == 'birrt':\n path = birrt(start, goal, distance_fn=distance_fn, sample_fn=sample_fn,\n extend_fn=extend_fn, collision_fn=collision_fn,\n max_time=args.time, smooth=100)\n elif args.algorithm == 'rrt_star':\n path = rrt_star(start, goal, distance_fn, sample_fn, extend_fn, collision_fn,\n radius=1, max_iterations=INF, max_time=args.time)\n elif args.algorithm == 'lattice':\n path = lattice(start, goal, extend_fn, collision_fn, distance_fn=distance_fn)\n else:\n raise NotImplementedError(args.algorithm)\n paths = [] if path is None else [path]\n\n #paths = random_restarts(rrt_connect, start, goal, distance_fn=distance_fn, sample_fn=sample_fn,\n # extend_fn=extend_fn, collision_fn=collision_fn, restarts=INF,\n # max_time=args.time, max_solutions=INF, smooth=100) #, smooth=1000, **kwargs)\n\n # paths = exhaustively_select_portfolio(paths, k=2)\n # print(score_portfolio(paths))\n\n #########################\n\n if args.draw:\n # roadmap = samples = cfree = []\n add_roadmap(viewer, roadmap, color='black')\n add_points(viewer, samples, color='red', radius=2)\n #add_points(viewer, cfree, color='blue', radius=2)\n\n print('Solutions ({}): {} | Time: {:.3f}'.format(len(paths), [(len(path), round(compute_path_cost(\n path, distance_fn), 3)) for path in paths], elapsed_time(start_time)))\n for path in paths:\n add_path(viewer, path, color='green')\n\n if args.smooth:\n for path in paths:\n extend_fn, roadmap = get_extend_fn(obstacles=obstacles) # obstacles | []\n smoothed = smooth_path(path, extend_fn, collision_fn, iterations=INF, max_time=args.time)\n print('Smoothed distance_fn: {:.3f}'.format(compute_path_cost(smoothed, distance_fn)))\n add_path(viewer, smoothed, color='red')\n user_input('Finish?')", "def main():\r\n # Instantiate the data problem.\r\n data = create_data_model()\r\n\r\n # Create the routing index manager.\r\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']), data['num_vehicles'], data['depot'])\r\n\r\n # Create Routing Model.\r\n routing = pywrapcp.RoutingModel(manager)\r\n\r\n\r\n # Create and register a transit callback.\r\n def distance_callback(from_index, to_index):\r\n \"\"\"Returns the distance between the two nodes.\"\"\"\r\n # Convert from routing variable Index to distance matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['distance_matrix'][from_node][to_node]\r\n\r\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\r\n\r\n # Define cost of each arc.\r\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\r\n\r\n\r\n # Add Capacity constraint.\r\n def demand_callback(from_index):\r\n \"\"\"Returns the demand of the node.\"\"\"\r\n # Convert from routing variable Index to demands NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n return data['demands'][from_node]\r\n\r\n demand_callback_index = routing.RegisterUnaryTransitCallback(\r\n demand_callback)\r\n routing.AddDimensionWithVehicleCapacity(\r\n demand_callback_index,\r\n 0, # null capacity slack\r\n data['vehicle_capacities'], # vehicle maximum capacities\r\n True, # start cumul to zero\r\n 'Capacity')\r\n\r\n # Setting first solution heuristic.\r\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\r\n search_parameters.first_solution_strategy = (\r\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\r\n\r\n\r\n # Solve the problem.\r\n assignment = routing.SolveWithParameters(search_parameters)\r\n\r\n # Print solution on console.\r\n if assignment:\r\n print_solution(data, manager, routing, assignment)", "def prioritized_planning(env:RailEnv):\r\n schedules = []\r\n occupancy_map=[[] for i in range(len(env.agents))]\r\n\r\n n_timesteps = np.array([])\r\n state_schedule =[]\r\n conv = StateConverter(env)\r\n # Compute the transition and valid action table\r\n model = convert_to_transition(env, conv)\r\n\r\n # Calculate the shortest dist from one state to another state\r\n shortest = all_pairs_shortest_paths(conv.num_states, model[0])\r\n print(\"Done\")\r\n l = list(range(len(env.agents)))\r\n # Create a random order\r\n random_order_agent = random.sample(l, len(l))\r\n print(\"Agent order: \",random_order_agent)\r\n\r\n\r\n for i in random_order_agent:\r\n # Compute occupancy map\r\n occupancy_map[i] = compute_map(i, random_order_agent, n_timesteps, state_schedule, conv)\r\n\r\n # Compute schedule,state for each agent based on the occupancy map\r\n each_schedule = a_star_search(SearchEnv(env,conv,model,shortest,i).get_root_node(),occupancy_map[i])\r\n schedules.append(each_schedule[0])\r\n state_schedule.append(each_schedule[1])\r\n n_timesteps = np.append(n_timesteps, [len(each_schedule[1])])\r\n\r\n\r\n # Combine separate actions into a list\r\n actions = combine(schedules,random_order_agent,int(np.max(n_timesteps)))\r\n\r\n return actions", "def dijkstras(occupancy_map,x_spacing,y_spacing,start,goal):\n ROWS, COLS = occupancy_map.shape\n #convert physical location to index in the grid\n startNode = locToIndex(start, x_spacing, y_spacing)\n startingNodeLoc = indexToLoc(startNode, x_spacing, y_spacing)\n initialcost = math.sqrt((startingNodeLoc[0] - start[0])**2 + (startingNodeLoc[1] - start[1])**2)\n goalNode = locToIndex(goal, x_spacing, y_spacing)\n \n freelist = np.where(occupancy_map == 0)\n if occupancy_map[startNode[0], startNode[1]] != 0:\n #raise ValueError(\"start : ({}, {}) invalid, is an obstacle\".format(startNode[0], startNode[1]))\n startNode = findValidNode(startNode, start, occupancy_map, x_spacing, y_spacing)\n if occupancy_map[goalNode[0], goalNode[1]] != 0:\n #raise ValueError(\"goal: ({}, {}) invalid, is an obstacle\".format(goalNode[0], goalNode[1]))\n goalNode = findValidNode(goalNode, goal, occupancy_map, x_spacing, y_spacing)\n candidate = [ [sys.float_info.max, \n i, (freelist[0][i], freelist[1][i])] for i in range(len(freelist[0]))] \n visited = set([])\n queue = PriorityQueue(candidate)\n paths = {}\n found = False\n\n #update initial cost\n queue.remove(startNode)\n queue.insert(startNode, initialcost)\n paths[startNode] = None\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, 1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, -1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, -1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n while queue.size() > 0:\n priority, current = queue.pop()\n if current == goalNode:\n found = True\n break\n #not reaching goal node yet, for each of its neighbor, update the weight\n visited.add(current)\n update(occupancy_map, ROWS, COLS, current, 0, 1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 0, -1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, -1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n \n if not found:\n raise ValueError(\"fail to find shortest path\")\n node = goalNode\n shortestpath = []\n while node is not None:\n shortestpath.append(node)\n node = paths[node]\n #shortestpath.append(startNode)\n #print (startNode)\n #print ('*', list(reversed(shortestpath)))\n #print (goalNode)\n p = list(reversed([ indexToLoc(n, x_spacing, y_spacing) for n in shortestpath]))\n #start and final position may not fall on center of the grid\n if abs(p[0][0] - start[0]) > 0.0005 or abs(p[0][1] - start[1]) > 0.0005:\n p.insert(0, [start[0][0], start[1][0]])\n if abs(p[-1][0] - goal[0]) > 0.0005 or abs(p[-1][1] - goal[1]) > 0.0005:\n p.append([goal[0][0], goal[1][0]])\n res = np.array(p)\n print (res)\n return res", "def main():\n # Parse command line arguments\n try:\n number_of_vehicles = int(sys.argv[1])\n number_of_packages = int(sys.argv[2])\n number_of_cities = int(sys.argv[3])\n except IndexError:\n print 'Could not parse arguments, expected:'\n print 'python a1.py <vehicles> <packages> <cities>'\n print 'NOTE: <cities> must be a perfect square'\n else:\n packages = list()\n vehicles = list()\n\n cities = generate_graph(number_of_cities)\n\n # initialize the start state\n start_state = ProblemState()\n\n # # randomly pick a city to be a garage\n start_state.garage_city = random.choice(cities)\n\n # # Initialize the vehicles\n for i in range(number_of_vehicles):\n new_vehicle = Vehicle()\n new_vehicle.name = i\n new_vehicle.current_city = start_state.garage_city\n vehicles.append(new_vehicle)\n\n # # Initialize the packages\n for _ in range(number_of_packages):\n new_package = Package()\n # # # Randomly pck a source for the package\n new_package.source = random.choice(cities)\n # # # Randomly pick a DIFFERENT city for the destination\n new_package.destination = random.choice([city for city in cities if city != new_package.source])\n packages.append(new_package)\n\n # # Assign packages to trucks\n vehicles = assign_packages(vehicles, packages)\n\n total_distances = []\n total_states = []\n total_time = 0\n for vehicle in vehicles:\n search_space = [start_state]\n start_state.vehicle = vehicle\n total_states_for_vehicle = 0\n # Goal state represented by truck having no packages, and being\n # at garage\n current_state = heapq.heappop(search_space)\n start_time = time.time()\n while not current_state.is_goal_state():\n total_states_for_vehicle+=1\n # if total_states_for_vehicle < 10 or total_states_for_vehicle % 100 == 0:\n print \"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\"\n print current_state\n print \"-------------------\"\n successor_states = transition_operator(current_state)\n print \"Successors:\"\n for state in successor_states:\n # print state\n heapq.heappush(search_space, state)\n print \"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\"\n current_state = heapq.heappop(search_space)\n search_time = time.time() - start_time\n print \"Search complete for a vehicle in {time:.2f}s\".format(\n time = search_time\n )\n total_time += search_time\n total_distances.append(current_state.distance_traveled)\n total_states.append(total_states_for_vehicle)\n print \"Total distance travelled for all trucks: %s\\n \" % sum(total_distances)\n # Total number of states needed is the max number of states needed for 1 truck\n print \"Total states needed: %s\" % max(total_states)\n print \"Time taken: {time:.2f}s\".format(\n time=total_time\n )", "def main():\n # Instantiate a mixed-integer solver.\n solver = pywraplp.Solver('SolveAssignmentProblemMIP',\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n\n # Number of teams (h and i)\n n = 9\n # Number of rooms (j)\n r = 3\n # Number of timeslots (k)\n t = 4\n # Number of matches\n m = 4\n\n # List of teams\n teams = [i for i in range(9)]\n\n x = {}\n\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if (h == i):\n x[h, i, j, k] = solver.IntVar(0, 0, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n else:\n x[h, i, j, k] = solver.IntVar(0, 1, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n\n # # Objective\n # solver.Minimize(solver.Sum([cost[i][j] * x[i,j] for i in range(num_workers)\n # for j in range(num_tasks)]))\n\n # Constraints\n\n # 2 Ensures that the matrix is the same across the diagonal\n for h in range(n):\n for j in range(r):\n for k in range(t):\n solver.Add((x[h, i, j, k] == x[i, h, j, k]))\n\n # 3 No pair plays each other more than once\n for h in range(n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t)]) <= 1)\n\n # 4 No team can be in more than one place at a time\n for h in range(n):\n for k in range(t):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n) for j in range(r)]) <= 2)\n\n # 5 Each team plays exactly m matches\n for i in range(n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t) for h in range(n)]) == 2 * m)\n\n # 6 Need 3 teams in a room at each timeslot\n for j in range(r):\n for k in range(t - 1):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n - 1) for h in range(i + 1, n)]) == 3)\n\n # Need 3 teams in a room at each timeslot\n for g in range(n - 2):\n for h in range(g + 1, n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum(\n [x[g, h, j, k] + x[h, i, j, k] + x[g, i, j, k] for j in range(r) for k in range(t)]) != 2)\n\n sol = solver.Solve()\n\n print('Total cost = ', solver.Objective().Value())\n print()\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if x[h, i, j, k].solution_value() > 0:\n print('teams %i,%i assigned to room %i at time %i.' % (h, i, j, k))\n\n print()\n print(\"Time = \", solver.WallTime(), \" milliseconds\")", "def run(self):\n print(' strategies...')\n matrix_file = ''\n matrix_s, matrix_c = None, None\n # run for all but the optimal version\n item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n gt_graph = load_graph(graph)\n for strategy in Strategy.strategies:\n if strategy == 'optimal':\n continue\n print(' ', strategy)\n m_new = self.data_set.matrices[rec_type][graph][strategy][0]\n m_newc = self.data_set.matrices[rec_type][graph][strategy][1]\n debug(' ----', m_new)\n debug(' ----', m_newc)\n if not m_new:\n debug(' ---- not m_new')\n matrix_s, matrix_c, matrix_file = None, None, None\n elif matrix_file != m_new:\n matrix_s = SimilarityMatrix(item2matrix, m_new)\n matrix_c = SimilarityMatrix(item2matrix, m_newc)\n matrix_file = m_new\n debug(' ---- matrix_file != m_new')\n # for miss in self.data_set.missions[rec_type][graph][strategy]:\n for miss in Mission.missions:\n print(' ', miss)\n if 'Information Foraging' in miss or 'Berrypicking' in miss:\n matrix = matrix_c\n else:\n matrix = matrix_s\n for m in self.data_set.missions[rec_type][graph][strategy][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/',\n len(m.targets_original))\n debug(m.targets_original[ti])\n self.navigate(gt_graph, strategy, m, start,\n None, matrix)\n if ti > 0 and len(m.targets_original[ti]) == len(m.targets[0]):\n # print('breaking...')\n m.reset()\n break\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # run the simulations for the optimal solution\n print(' optimal...')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n sp_file = graph.rsplit('.', 1)[0] + '.npy'\n with open(sp_file, 'rb') as infile:\n sp = pickle.load(infile)\n for miss in self.data_set.missions[rec_type][graph]['optimal']:\n for m in self.data_set.missions[rec_type][graph]['optimal'][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/', len(m.targets_original))\n debug(m.targets_original[ti])\n self.optimal_path(m, start, sp)\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # # DEBUG\n # item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n # for rec_type in ['rbar']:\n # for graph in self.data_set.graphs[rec_type]:\n # print(' ', graph)\n # gt_graph = load_graph(graph)\n # sp_file = graph.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file, 'rb') as infile:\n # sp = pickle.load(infile)\n # m_newc = self.data_set.matrices[rec_type][graph]['title'][1]\n # matrix = SimilarityMatrix(item2matrix, m_newc)\n # sc = 'Berrypicking'\n # mc1 = self.data_set.missions[rec_type][graph]['title'][sc]\n # mc2 = self.data_set.missions[rec_type][graph]['optimal'][sc]\n # mc3 = self.data_set.missions[rec_type][graph]['random'][sc]\n # for m1, m2, m3 in zip(\n # mc1,\n # mc2,\n # mc3\n # ):\n # # evalute with title strategy\n # for ti in xrange(len(m1.targets_original)):\n # start = m1.path[-2] if m1.path else m1.start\n # debug('++++' * 16, 'mission', ti, '/', len(m1.targets_original))\n # # debug(m1.targets_original[ti])\n # self.navigate(gt_graph, 'title', m1, start, None, matrix)\n # # print(m1.path, ti, len(m1.targets_original[ti]), len(m1.targets[0]))\n # if ti > 0 and len(m1.targets_original[ti]) == len(m1.targets[0]):\n # # print('breaking...')\n # m1.reset()\n # break\n # if not (ti + 1) == len(m1.targets_original):\n # m1.path.append(u'*')\n # m1.reset()\n #\n # # evaluate with optimal strategy\n # for ti in xrange(len(m2.targets_original)):\n # start = m2.path[-2] if m2.path else m2.start\n # # debug('++++' * 16, 'mission', ti, '/', len(m2.targets_original))\n # # debug(m2.targets_original[ti])\n # self.optimal_path(m2, start, sp)\n # if not (ti + 1) == len(m2.targets_original):\n # m2.path.append(u'*')\n # m2.reset()\n # # pdb.set_trace()\n #\n # # if len(m1.path) < len(m2.path):\n # # print(len(m1.path), len(m2.path))\n # # pdb.set_trace()\n # # m1.compute_stats()\n # # m2.compute_stats()\n # # if m1.stats[-1] > m2.stats[-1]:\n # # print(m1.stats)\n # # print(m2.stats)\n # # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc1.compute_stats()\n # mc2.compute_stats()\n # print(mc1.stats[-1], mc2.stats[-1])\n # pdb.set_trace()\n\n # fname_5 = u'../data/bookcrossing/graphs/rbar_5.gt'\n # fname_20 = u'../data/bookcrossing/graphs/rbar_20.gt'\n # sp_file_5 = fname_5.rsplit('.', 1)[0] + '.npy'\n # sp_file_20 = fname_20.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file_5, 'rb') as infile:\n # sp_5 = pickle.load(infile)\n # with open(sp_file_20, 'rb') as infile:\n # sp_20 = pickle.load(infile)\n # sc = 'Berrypicking'\n # mc_5 = self.data_set.missions['rbar'][fname_5]['optimal'][sc]\n # mc_52 = self.data_set.missions['rbar'][fname_5]['title'][sc]\n # mc_20 = self.data_set.missions['rbar'][fname_20]['optimal'][sc]\n # mc_202 = self.data_set.missions['rbar'][fname_20]['title'][sc]\n # for m5, m20, m52, m202 in zip(\n # mc_5,\n # mc_20,\n # mc_52,\n # mc_202\n # ):\n # # evaluate 5 with optimal strategy\n # for ti in xrange(len(m5.targets_original)):\n # start = m5.path[-2] if m5.path else m5.start\n # self.optimal_path(m5, start, sp_5)\n # if not (ti + 1) == len(m5.targets_original):\n # m5.path.append(u'*')\n # m5.reset()\n #\n # # evaluate 20 with optimal strategy\n # for ti in xrange(len(m20.targets_original)):\n # start = m20.path[-2] if m20.path else m20.start\n # self.optimal_path(m20, start, sp_20)\n # if not (ti + 1) == len(m20.targets_original):\n # m20.path.append(u'*')\n # m20.reset()\n #\n # # if len(m5.path) < len(m20.path) or \\\n # if m5.path.count('*') > m20.path.count('*'):\n # print(len(m5.path))\n # for part in ' '.join(m5.path[2:]).split('*'):\n # print(' ', part)\n # print(len(m20.path))\n # for part in ' '.join(m20.path[2:]).split('*'):\n # print(' ', part)\n # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc_5.compute_stats()\n # mc_20.compute_stats()\n # print(mc_5.stats[-1], mc_20.stats[-1])\n #\n # for m5, m20 in zip(mc_5.missions, mc_20.missions):\n # if m5.stats[-1] > m20.stats[-1]:\n # print(m5.stats)\n # print(m20.stats)\n # pdb.set_trace()\n # pdb.set_trace()\n\n # write the results to a file\n # self.write_paths()\n self.save()", "def main():\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)", "def solve_tsp(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n drop_off_dict = {}\n car_path = []\n home_map = {}\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n\n start_in_home = start in home_indexes\n if start in home_indexes:\n home_indexes.remove(start)\n home_indexes.insert(0, start)\n home_count = 0;\n\n for home in home_indexes:\n #print(home, end = \" \")\n home_map[home_count] = home\n home_count += 1\n # Instantiate the data problem.\n #print(len(home_map))\n data = create_data_model(home_indexes, 0)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['locations']),\n data['num_vehicles'], data['depot'])\n\n #print(manager.NodeToIndex(15))\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n #print(home_map[to_index], end = \" \")\n from_index = manager.IndexToNode(from_index)\n to_index = manager.IndexToNode(to_index)\n dist_to = all_paths.get(home_map[from_index])[0][home_map[to_index]]\n #if from_index >= 25 or to_index >= 25:\n # print(\"from\" if from_index >= 25 else \"to\", end = \" \")\n #dist_to = all_paths[from_index][0][to_index]\n return dist_to\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n \"\"\"\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n \"\"\"\n\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 3\n #search_parameters.log_search = True\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # if assignment:\n # print_solution(manager, routing, assignment)\n # Print solution on console.\n\n if start in home_indexes:\n drop_off_dict[start] = [start]\n\n\n index = routing.Start(0)\n car_path.append(start)\n\n while not routing.IsEnd(index):\n previous_index = manager.IndexToNode(index)\n index = assignment.Value(routing.NextVar(index))\n\n car_path.pop();\n to_index = manager.IndexToNode(index)\n path_to = all_paths.get(home_map[previous_index])[1][home_map[to_index]]\n drop_off_dict[home_map[to_index]] = [home_map[to_index]]\n #print(to_index, end = ' ')\n car_path.extend(path_to)\n #route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n # for i in car_path:\n # print(i)\n if start in drop_off_dict.keys() and not start_in_home:\n drop_off_dict.pop(start, None)\n\n return car_path, drop_off_dict", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def greedy_MAP_assignment(theta,random_runs = 10,heur = 'first'):\r\n N = theta.shape[0]\r\n scipy.random.seed()\r\n max_p = -scipy.inf\r\n for k in range(random_runs):\r\n A = scipy.random.randint(2,size = N)\r\n improved = True\r\n p = A.dot( theta.dot(A) )\r\n while improved:\r\n improved = False\r\n if heur == 'first':\r\n p2 = -scipy.inf\r\n perm = scipy.random.permutation(N)\r\n for s in perm:\r\n #dp: change in p if A[i] bit is reversed\r\n dp = (1-2*A[s])*( A.dot(theta[s,:]+ theta[:,s]) ) + theta[s,s]\r\n if dp>0:\r\n p2 = dp\r\n break\r\n\r\n if heur == 'best':\r\n dp = (1-2*A)*( A.dot(theta + theta.T) ) + scipy.diag(theta)\r\n p2,s = dp.max(), dp.argmax()\r\n if p2 > 0:\r\n A[s] = 1-A[s]\r\n improved = True\r\n p += p2\r\n if p>max_p:\r\n greedy_A,max_p = A.copy(),p\r\n return greedy_A.astype(int),max_p", "def compute_map(current_agent_id,agent_order,number_of_timestep,state_schedules, conv :StateConverter):\r\n #Find the agent has the highest number of time steps\r\n highest_timestep = 0\r\n # Find the highest time step\r\n if len(number_of_timestep) >0:\r\n highest_timestep = np.max(number_of_timestep)\r\n occupancy_map = []\r\n # Since we don't know yet how many time step of the current id so\r\n # the number of time steps of the occupancy map == highest number of time step\r\n # of the current schedule\r\n for time_step in range(int(highest_timestep)):\r\n # Initialize the occupancy for current time step\r\n current_occupancy_map = np.zeros(conv.num_tiles)\r\n # We loop through schedule of each agent at current time step\r\n for i in range(len(state_schedules)):\r\n # Get the agent id of current schedule\r\n agent_of_schedule = agent_order[i]\r\n if time_step < len(state_schedules[i]):\r\n # The first case when the agent of current schedule is executed after the current agent\r\n if agent_of_schedule > current_agent_id:\r\n # Get the current state\r\n current_state = state_schedules[i][time_step]\r\n # Convert the current state to tile index\r\n current_tile = conv.state_to_tile(current_state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[current_tile] = 1\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n # The second case when the agent of current schedule is executed before the current agent\r\n else:\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n if time_step + 2 < len(state_schedules[i]):\r\n # Get the next 2 state\r\n next_2state = state_schedules[i][time_step+2]\r\n # Convert the current state to tile index\r\n next_2tile = conv.state_to_tile(next_2state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[next_2tile] = 1\r\n occupancy_map.append(current_occupancy_map)\r\n return occupancy_map", "def find_best_cycle(road_map):\n #Assume the best_cycle is the initial road map and calculate total distance \n best_cycle = road_map\n best_cycle_dist = compute_total_distance(road_map)\n best_attempts = [best_cycle_dist]\n # For each city in the road map\n for i in range(len(road_map)):\n for swaps in range(10000):\n #A random number between 0 and total number of cities is generated.\n number = int(len(road_map) * random.random())\n # Create a test tuple where the first field in the tuple is the road map,\n # with two cities swapped, and second field is total distance.\n # The type of swap depends on whether the random number is odd or even.\n # If even or if i is equal to number, the cities at index i and i+1 is swapped.\n # If odd and i is not equal to number, the cities at index i and number are swapped.\n # As a result, on each swap, there is\n # 50% chance of either type of swap being selected.\n if number % 2 == 1 and i != number:\n test = swap_cities(best_cycle,i,number)\n else:\n test = swap_adjacent_cities(best_cycle,i)\n # Compare the second field with current best cycle distance\n # If current best cycle distance is greater, then set best cycle\n # to the road map after swapping \n if best_cycle_dist > test[1]:\n best_cycle = test[0]\n best_cycle_dist = test[1]\n if best_attempts[len(best_attempts)-1] > best_cycle_dist:\n best_attempts.append(best_cycle_dist)\n return best_cycle, best_cycle_dist, best_attempts", "def main(self):\n\n self.nodelist = []\n\n self.probname = self.probpath.split('/')[-1].rstrip('.mps.lp.gz')\n\n model = Model(\"TreeD\")\n eventhdlr = LPstatEventhdlr()\n eventhdlr.nodelist = self.nodelist\n model.includeEventhdlr(eventhdlr, \"LPstat\", \"generate LP statistics after every LP event\")\n model.readProblem(self.probpath)\n model.setIntParam('presolving/maxrestarts', 0)\n\n for setting in self.scip_settings:\n model.setParam(setting[0], setting[1])\n\n model.optimize()\n\n self.scipversion = 'SCIP '+str(model.version())\n # self.scipversion = self.scipversion[:-1]+'.'+self.scipversion[-1]\n\n if model.getStatus() == 'optimal':\n self.optval = model.getObjVal()\n else:\n self.optval = None\n\n\n # print(\"performing Spatial Analysis on similarity of LP condition numbers\")\n # self.performSpatialAnalysis()\n\n columns = self.nodelist[0].keys()\n self.df = pd.DataFrame(self.nodelist, columns = columns)\n\n # merge solutions from cutting rounds into one node\n if not self.showcuts:\n self.df = self.df[self.df['first'] == False].drop_duplicates(subset='age', keep='last').reset_index()", "def main():\r\n # Instantiate the data problem.\r\n data = create_data_model()\r\n\r\n # Create the routing index manager.\r\n manager = pywrapcp.RoutingIndexManager(\r\n len(data['distance_matrix']), data['num_vehicles'], data['depot'])\r\n\r\n # Create Routing Model.\r\n routing = pywrapcp.RoutingModel(manager)\r\n\r\n# ADD THE DISTANCE CALLBACK\r\n # ADD THE DEMAND CALLBACK AND CAPACITY COSTRAINTS\r\n # In addition to the distance callback, the solver also requires a demand callback, \r\n # which returns the demand at each location, and a dimension for the capacity constraints.\r\n \r\n # Create and register a transit callback.\r\n def distance_callback(from_index, to_index):\r\n \"\"\"Returns the distance between the two nodes.\"\"\"\r\n # Convert from routing variable Index to distance matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['distance_matrix'][from_node][to_node]\r\n \r\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\r\n\r\n # Define cost of each arc.\r\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\r\n\r\n#!!! NB\r\n # Unlike the distance callback, which takes a pair of locations as inputs, \r\n # the demand callback only depends on the location (from_node) of the delivery.\r\n # The code also creates a dimension for capacities, we use the AddDimensionWithVehicleCapacity method, \r\n # which takes a vector of capacities.\r\n # Since all the vehicle capacities in this example are the same, you could use the the \r\n # AddDimension method, which takes a single upper bound for all vehicle quantities. \r\n # But AddDimensionWithVehicleCapacity handles the more general case in which different \r\n # vehicles have different capacities.\r\n \r\n # Add Capacity constraint.\r\n def demand_callback(from_index):\r\n \"\"\"Returns the demand of the node.\"\"\"\r\n # Convert from routing variable Index to demands NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n return data['demands'][from_node]\r\n\r\n demand_callback_index = routing.RegisterUnaryTransitCallback(\r\n demand_callback)\r\n routing.AddDimensionWithVehicleCapacity(\r\n demand_callback_index,\r\n 0, # null capacity slack, modify it if you accept unmet demand\r\n data['vehicle_capacities'], # vehicle maximum capacities set by the user\r\n True, # start cumul to zero\r\n 'Capacity')\r\n \r\n # you can find other research method here:\r\n # https://developers.google.com/optimization/routing/routing_options\r\n \r\n # Setting first solution heuristic:\r\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\r\n# search_parameters.first_solution_strategy = (\r\n# routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\r\n\r\n # Setting metaheuristic search method:\r\n search_parameters.local_search_metaheuristic = (\r\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\r\n # Setting time limit to the method\r\n search_parameters.time_limit.seconds = 30\r\n \r\n # Solve the problem.\r\n assignment = routing.SolveWithParameters(search_parameters)\r\n\r\n # Search status \r\n print('\\n')\r\n solver_index = routing.status()\r\n description = ['ROUTING_NOT_SOLVED','ROUTING_SUCCESS','ROUTING_FAIL',\r\n 'ROUTING_FAIL_TIMEOUT','ROUTING_INVALID']\r\n print(\"Solver status:\",description[solver_index],'\\n')\r\n \r\n # Print solution on console.\r\n if assignment:\r\n print_solution(data, manager, routing, assignment)", "def main(self):\n \n\tsession = db.Session()\n\tspeed = -1\n\n\t#self.checkResponseStatus()\n\n\t#secondsBetween = random.uniform(config.MIN_SCAN_DELAY, config.MIN_SCAN_DELAY + 2)\n #time.sleep(secondsBetween)\n\t\n \tstartTime = time.time()\n#\tlogger.info(\"Starting scanning at: %s\", time.asctime( time.localtime(startTime) ) )\n\n\tself.minorFailCount = 0\n for i, point in enumerate(self.points):\n\t self.minorFailCount = 0\n\t self.performMapOperations(i, point, session)\n\n endTime = time.time()\n# logger.info(\"Stopped scanning at: %s\", time.asctime( time.localtime(endTime) ) )\n\ttimeElapsed = endTime - startTime\n\tminutes = timeElapsed/60\n\tminutesRounded = math.floor(minutes)\n\tseconds = math.floor(60*(minutes-minutesRounded))\n\tlogger.info(\"Time elapsed: %d:%d\", minutesRounded, seconds)\t \n logger.info(\"Total pokemon seen: %d (average per cycle: %f)\", self.seen_per_cycle, (self.seen_per_cycle/len(self.points))) \n \n session.close()\n if self.seen_per_cycle == 0:\n self.error_code = 'NO POKEMON'", "def frozen_lake_mdp_helper(grid_size=4, p=0.8, is_slippery=True):\n n_action = 4\n\n random_map = generate_random_map(size=grid_size, p=p)\n env = gym.make(\"FrozenLake-v0\", desc=random_map, is_slippery=is_slippery)\n env.reset()\n env.render()\n open_ai_p = env.P\n # print(env.P)\n\n transition_p = np.zeros((n_action, grid_size**2, grid_size**2))\n reward = np.zeros((n_action, grid_size**2, grid_size**2))\n\n for state, state_dict in open_ai_p.items():\n for action, prob_tuple_list in state_dict.items():\n for prob_tuple in prob_tuple_list:\n probability, next_state, r, done = prob_tuple\n\n transition_p[action][state][next_state] += probability\n reward[action][state][next_state] = r*100 - 1\n # # print(r)\n # if probability != 0:\n # print(\"Found\", state, action, probability, next_state, r, done)\n # print(transition_p)\n # # print(reward)\n\n return transition_p, reward, random_map", "def Optimizer(r_grasp,PAM_r, PAM_s, object_s, object_f, object_params, phi, r_max, walls, obstacles, obstacles_PAM, current_leg, n, n_p, v_max, force_max, legs, dt):\n global action_push_pull, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned\n # assigning cost of changing from one leg to another based on the distance to the desired pose\n cost_ChangeLeg = 1\n dz_final = np.sqrt((object_s.x - object_f.x) ** 2 + (object_s.y - object_f.y) ** 2)\n if dz_final < 1:\n cost_ChangeLeg = 10\n elif dz_final < 2:\n cost_ChangeLeg = 20\n else:\n cost_ChangeLeg = 10\n\n # assigning weight for cost of predicted repositioning and cost of robot motion\n w_cost_reposition = 40\n w_cost_motion = 10\n\n # finding object's leg cordinates\n object_leg = find_corners(object_s.x, object_s.y, object_s.phi, object_params[7], object_params[8])\n\n # initialization (initializeing cost to infinity)\n cost = [float('inf'), float('inf'), float('inf'), float('inf')]\n cost_legchange = [0, 0, 0, 0]\n cost_PAM = [[0, 0],[0, 0],[0, 0],[0, 0]]\n cost_manipulation = [0, 0, 0, 0]\n cost_motion = [0, 0, 0, 0]\n force = [0, 0, 0, 0]\n path = [[[], []], [[], []], [[], []], [[], []]]\n planned_path_w = [[],[],[],[]]\n PAM_g = [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]]\n command = [[], [], [], []]\n des = [[], [], [], [], []]\n PAM_goal = state()\n\n # find the nominal trajectory for manipulation\n theta = nominal_traj([object_s.x,object_s.y,object_s.phi], [object_f.x,object_f.y,object_f.phi], v_max, walls, obstacles, n, dt)\n\n # itterate through each leg to find the leg with minimum cost\n for leg in range(4):\n phi_linear = theta\n psi_linear = [theta[k] + phi[leg] for k in range(len(theta))]\n \t# find the cost and required force for manipulation for the leg\n force[leg], cost_manipulation[leg], planned_path_w[leg], command[leg], des= OptTraj([object_s.x, object_s.y, object_s.phi, object_s.xdot, object_s.ydot, object_s.phidot], [object_f.x, object_f.y, object_f.phi, object_f.xdot, object_f.ydot, object_f.phidot], v_max, walls, obstacles, object_params[0:4], object_params[4:7], phi_linear, psi_linear, force_max, r_max[leg], n, dt, object_leg[leg])\n \t# adding cost of changing leg\n if leg != current_leg:\n cost_legchange[leg] = cost_ChangeLeg\n # adding cost of PAM motion to PAM goal pose\n phi0 = np.arctan2(object_leg[leg][1]-object_s.y,object_leg[leg][0]-object_s.x)\n # finding the better option between pulling and pushing for each leg, with the same manipulation plan\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost_PAM[leg][push_pull], path[leg][push_pull], command_pam, goal_orientation = OptPath([PAM_s.x, PAM_s.y, PAM_s.phi], PAM_g[leg][push_pull], walls, obstacles_PAM, n_p, dt)\n if cost_PAM[leg][push_pull]!= float(\"inf\"):\n PAM_s_sim = copy.deepcopy(PAM_s)\n PAM_s_sim.x, PAM_s_sim.y, PAM_s_sim.phi = [PAM_r * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], PAM_r * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n # adding cost of predicted re-positionings\n n_transition = traj_simulation(copy.deepcopy(PAM_s_sim), copy.deepcopy(object_s), force[leg], legs, leg, command[leg])\n # print(n_transition)\n cost_PAM[leg][push_pull] += w_cost_reposition*n_transition\n cost_motion[leg] += min(cost_PAM[leg])*w_cost_motion\n action_push_pull[leg] = np.argmin(cost_PAM[leg])\n else:\n phi0 = np.arctan2(force[leg][0][1], force[leg][0][0])\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost = [cost_legchange[leg] + cost_motion[leg] + cost_manipulation[leg] for leg in range(4)]\n\n if min(cost) < float(\"inf\"):\n \t[min_index, min_value] = [np.argmin(cost), min(cost)]\n \t# Finding the grasping goal pose based on the selected plan\n \tphi0 = np.arctan2(object_leg[min_index][1]-object_s.y,object_leg[min_index][0]-object_s.x)\n \tgrasping_goal = [PAM_r * np.cos(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][0], PAM_r * np.sin(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][1], np.pi * action_push_pull[min_index] + phi0]\n \tPAM_goal = state()\n \tPAM_goal.x, PAM_goal.y, PAM_goal.phi = PAM_g[min_index][action_push_pull[min_index]]\n \tobject_path_planned = Path()\n \tobject_path_planned.header.frame_id = 'frame_0'\n \tfor i in range(len(planned_path_w[min_index])):\n \t\tpose = PoseStamped()\n \t\tpose.pose.position.x = planned_path_w[min_index][i][0]\n \t\tpose.pose.position.y = planned_path_w[min_index][i][1]\n \t\tpose.pose.position.z = 0\n \t\tobject_path_planned.poses.append(pose)\n\n \tPAM_path_planned = Path()\n \tPAM_path_planned.header.frame_id = 'frame_0'\n \tif min_index != current_leg:\n \t\tfor i in range(len(path[min_index][action_push_pull[min_index]])):\n \t\t\tpose = PoseStamped()\n \t\t\tpose.pose.position.x, pose.pose.position.y, pose.pose.orientation.z =path[min_index][action_push_pull[min_index]][i]\n \t\t\tPAM_path_planned.poses.append(pose)\n else:\n \tmin_index = 5\n \tmin_value = float(\"inf\")\n if 0 < min_index and min_index <= 4:\n force_d = force[min_index][0]\n else:\n force_d = [0,0,0]\n\n return cost ,min_index, force_d, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned", "def consolidation_heuristics(to_print = False):\n # Instantiate the data problem.\n data = create_data_model()\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n def pending_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['post'][to_node]\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n pending_callback_index = routing.RegisterTransitCallback(pending_callback)\n # Define cost of each arc.\n for i in range(data['num_vehicles']-1):\n routing.SetArcCostEvaluatorOfVehicle(transit_callback_index, i) #Transit cost\n routing.SetFixedCostOfVehicle(data['fixed_cost'], i) #Fixed cost\n routing.SetArcCostEvaluatorOfVehicle(pending_callback_index, data['num_vehicles']-1) #Postponement and/or NonService cost\n # Add Capacity constraint.\n def demand_callback(from_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to demands NodeIndex.\n from_node = manager.IndexToNode(from_index) \n return data['demands'][from_node]\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n # Add time constraint.\n def time_callback(from_index,to_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to NodeIndex in time\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return time_matrix[from_node][to_node] \n time_callback_index = routing.RegisterTransitCallback(time_callback) \n routing.AddDimensionWithVehicleCapacity(\n time_callback_index,\n 0, # null capacity slack\n data['time_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Time')\n # Setting solution heuristic-procedure.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 5 #10 # 60 #20 #3000\n search_parameters.log_search = True\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n # Print solution on console.\n if assignment:\n sent, sol_results, routes_results = print_solution(data, manager, routing, assignment) \n return sent, sol_results, routes_results", "def main():\n create_sets()\n optimal_weights = genetic_algorithm()\n obtain_best_model(optimal_weights)", "def decision(grid):\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()", "def plan_path(self, start_point, end_point, map_obj):\n # STUFF FOR TESTING \n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n self.vis_pub.publish(marker)\n \n exploration_bias = 1.0 - self.goal_bias\n final_node = None\n num_existing_path_points_added = 0\n \n self.rrt_star = RRTStar(Node(start_point))\n self.max_iterations = self.rrt_star.max_size\n while self.rrt_star.size <= self.max_iterations:\n p = np.random.uniform()\n if p < exploration_bias:\n \n x_rand = self.map.sample_free_space()\n else:\n if final_node is None:\n x_rand = end_point\n else:\n x_rand = self.branched_from_existing_path(\n final_node,\n depth_underestimate=num_existing_path_points_added\n )\n num_existing_path_points_added += 1\n\n x_nearest = self.rrt_star.nearest(x_rand) # Find the nearest node to x_rand\n\n path = self.map.generate_line_path(x_nearest.value, x_rand, eta=self.eta)\n if path is not None: # no obstacles between x_nearest and x_rand\n x_new = path[-1]\n X_nearby_connectable = self.find_nearby_connectable(x_nearest, x_new)\n\n cost_min, node_min = self.find_best_parent(X_nearby_connectable, x_new)\n\n X_nearby_connectable.remove(node_min) # Remove x_new's parent node from the list of nearby nodes so it is not considered for rewiring\n \n # Create the new node at x_new!\n node_new = self.rrt_star.add_config(node_min, x_new)\n \n if self.enable_vis:\n # FOR TESTING ONLY #\n # Code to publish marker for new node\n ###########################################################################################\n TEMP = Point()\n TEMP.x = x_new[0]\n TEMP.y = x_new[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n self.vis_pub.publish(marker)\n ###########################################################################################\n\n self.rewire(cost_min, node_new, X_nearby_connectable)\n \n if np.allclose(node_new.value, end_point, .05, 0) and (final_node is None):#np.array_equal(node_new.value, end_point):\n final_node = node_new\n # reduce exploration bias so that we reinforce the existing path\n exploration_bias = .5\n if VERBOSE:\n print(\"Path found!!!!\")\n print(final_node.cost)\n if rospy.get_time() - self.start_time > self.time_thresh:\n if VERBOSE:\n print(self.rrt_star.size)\n break\n\n \n if final_node is not None:\n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n marker.points = []\n marker.colors = []\n def recur(node):\n if self.enable_vis:\n TEMP = Point()\n TEMP.x = node.value[0]\n TEMP.y = node.value[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n \n self.trajectory.points.append([node.value[0], node.value[1]])\n parent = node.parent\n if parent is not None:\n recur(parent)\n recur(final_node)\n self.trajectory.points.reverse()\n if self.enable_vis:\n self.vis_pub.publish(marker)\n if VERBOSE:\n print (final_node.depth)\n else:\n if VERBOSE:\n print(\"No path found! Please try again.\")\n \n \n \n # publish trajectory\n self.traj_pub.publish(self.trajectory.toPoseArray())\n\n # visualize trajectory Markers\n self.trajectory.publish_viz()", "def run_RL_sync(mapname,n_trials = int, seed = int,alpha = 0.15, beta = 0.2, tau = 5, gamma = 0.9, max_steps = 1000, reward_size = 100):\n\n # Softmax can't be from external file, because multiprocessing messes up the seed values\n np.random.seed(seed)\n def softmax_action(action_weights = [], tau = int):\n action_indices = list(range(len(action_weights)))\n f = np.exp((action_weights - np.max(action_weights))/tau) # shift values\n action_prob = f / f.sum(axis=0)\n action_index = np.random.choice(action_indices, 1, p=action_prob)\n return action_index[0]\n\n srate = 500 #sample rate\n \n total_time = int(1.5*srate) #total timesteps or \"time the agent gets to think about moving\"\n\n time0 = time.perf_counter()\n\n print(\"Running the RL model but with sync !\")\n srate = 500 #sample rate\n \n total_time = int(1.5*srate) #total timesteps or \"time the agent gets to think about moving\"\n\n # Learning Parameters\n parameters = {\"alpha\": alpha\n ,\"beta\": beta\n ,\"gamma\": gamma\n ,\"tau\": tau}\n n_steps = max_steps\n n_trials = n_trials\n \n sub_reward_size = 0 # no subgoals!\n # # # # # # # # # # # # # #\n # # Setting up the map # #\n # # # # # # # # # # # # # #\n \"\"\" The agent begins in a walled grid and has to find \n the goal to obtain a reward.\"\"\"\n # Grid #\n states = create_grid_from_file(map_file=mapname,goal_location = [10,3],reward_size=reward_size,sub_reward_size=sub_reward_size)\n state_set = list(range(int(states.shape[0]*states.shape[1]))) #index of states\n\n #set of actions\n move_name=[\"UP\", \"R-UP\", \"RIGHT\",\"R-DOWN\",\"DOWN\",\"L-DOWN\", \"LEFT\" ,\"LEFT-UP\"] \n moves = [[-1, 0],[-1, 1], [0, 1], [1, 1], [1, 0],[1, -1], [0, -1], [-1, -1]]\n action_set = list(range(len(moves))) #index list\n\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # Setting up the synchronization modules # #\n # # # # # # # # # # # # # # # # # # # # # # # #\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Processing module\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n # Initial variables #\n\n r2_max = 1 #maximum amplitude of nodes\n drift = .8 #rate of drift between coupling parameters\n\n cg_1 = (30/srate)*np.pi #gamma band coupling parameter for input information\n cg_2 = cg_1 + (drift/srate)*2*np.pi #gamma band coupling parameter for actions\n \n damp = 0.3 #damping parameter\n decay = 0.9 #decay parameter\n noise = 0.5 #noise parameter\n\n # Initial matrices #\n\n n_states = len(state_set)\n n_actions= len(action_set)\n\n #Setting up phase code neurons across entire task\n S_Phase = np.zeros((2,states.shape[0],states.shape[1],total_time)) #State phase code units\n A_Phase = np.zeros((2,n_actions,total_time)) #Action phase code units\n\n #Setting up rate code neurons across entire task\n S_Rate = np.zeros((states.shape[0],states.shape[1],total_time)) #State rate code units\n A_Rate = np.zeros((n_actions,total_time)) #Action rate code units\n #State-Action Weight Matrix\n W = np.zeros((states.shape[0],states.shape[1],n_actions))#*0.001 #initial state-action weights\n V = np.zeros((states.shape[0],states.shape[1]))#*0.001 #initial state weights\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Control module\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n # MFC #\n # Initial variables \n r2_MFC = 0.7 #maximum amplitude MFC node\n damp_MFC = 0.03 # damping parameter MFC\n acc_slope = 10 # MFC slope parameter ---> steepness of burst probability distribution\n ct = (5/srate)*2*np.pi #theta band coupling parameter for MFC\n\n #Setting up phase code nodes for the MFC\n MFC = np.zeros((2,total_time))\n #Setting up phase code neuron for MFC -> Bernoulli rate code\n Be = 0 \n \"\"\"When the be value as the rate code of MFC\n reaches certain threshold the MFC will send a burst to coupled neurons\"\"\"\n\n # LFC #\n #Module indicating which states should be initiate action-state synchronization\n LFC = np.zeros((states.shape[0],states.shape[1],n_steps))\n\n #Module that gives the right indices to synchronize\n LFC_sync = 0\n\n\n\n # # # # # # # # # # # # # #\n # # Simulation # #\n # # # # # # # # # # # # # #\n\n # Logging dependent variables\n Hit = np.zeros((total_time,n_steps,n_trials)) #log when there is a burst from the MFC\n # Goal_reach = np.zeros((n_steps,n_trials)) #record if goal is reached \n # Move = np.zeros((n_steps,n_trials)) #record move\n # Bernoulli = np.zeros((total_time,n_steps,n_trials)) #Logging the bernoulli process variables (should be in between -.8 and .8)\n # pred_err = np.zeros((states.shape[0],states.shape[1],n_steps,n_trials)) #logging the prediction error\n trial_length = np.zeros((n_trials))\n\n # Recording sync\n sync = np.zeros((n_states,n_actions,n_steps,n_trials)) \n\n \"\"\" L O O P \"\"\"\n\n exploration = 0\n exploration_intent =0\n sync_fail=0\n greedy=0\n for trial in range(n_trials):\n \"\"\"A trial is considered as each journey the actor makes until the goal\n or until it runs out of steps\"\"\"\n at_goal = False\n start_loc = [1,int(states.shape[1]-2)] #start in the top left\n step = 0 \n S_Phase[:,:,:,0] = (2*np.random.random_sample((2,states.shape[0],states.shape[1])))-1 # random starting points processing module\n A_Phase[:,:,0] = (2*np.random.random_sample((2,n_actions)))-1 # idem\n while not at_goal:\n #starting location at first trial\n if step == 0:\n current_loc = start_loc\n else:\n S_Phase[:,:,:,0] = S_Phase[:,:,:,total_time-1] # random starting points processing module\n A_Phase[:,:,0] = A_Phase[:,:,total_time-1] # idem\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Synchronization\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n \n #phase reset\n MFC[:,0]=np.ones((2))*r2_MFC \n\n\n # LFC setting instruction per step: each state is an input\n LFC[current_loc[0],current_loc[1],step] = 1\n\n # What we want is the lfc to indicate the state and then have the LFC sync pro actively select an action based on state action value maps\n \n action_to_sync = softmax_action(action_weights=W[current_loc[0],current_loc[1],:],tau=10)\n if action_to_sync in np.where(W[current_loc[0],current_loc[1],:]== max(W[current_loc[0],current_loc[1],:]))[0]:\n greedy+=0\n else:\n exploration_intent+=1\n \n \n #Which action does LFC sync to current state\n LFC_sync = int(action_to_sync)\n LFC_desync = list(range(len(moves)))\n LFC_desync.pop(LFC_sync) \n\n # The actor makes the move #\n for t in range(total_time-1):\n\n \n #Update phase code neurons for actions and states in processing module\n #State phase code neurons \n S_Phase[:,:,:,t+1] = update_phase(nodes=S_Phase[:,:,:,t], grid = True, radius=r2_max, damp = damp, coupling = cg_1,multiple=True )\n \n #Action phase code neurons\n A_Phase[:,:,t+1] = update_phase(nodes=A_Phase[:,:,t], grid = False, radius=r2_max, damp = damp, coupling = cg_2,multiple=True )\n\n #Update phase code untis of MFC\n MFC[:,t+1] = update_phase(nodes=MFC[:,t], grid = False, radius=r2_MFC, damp=damp_MFC, coupling=ct,multiple=False)\n\n #MFC rate code neuron-> Bernoulli process\n\n Be = 1/(1 + np.exp(-acc_slope*(MFC[0,t]-1))) # Bernoulli process \n #Bernoulli[time,step,trial] = Be # logging Be value\n\n p = random.random()\n\n if p < Be:\n\n Gaussian = np.random.normal(size = [1,2]) #noise factor as normal distribution\n #Hit[tijd,step,trial] = 1\n \n \n x, y = current_loc[1], current_loc[0]\n\n #the LFC decides which state is paired with which actions\n\n if LFC[y,x,step]:\n #The state the actor is in receives a burst because it is the only input\n S_Phase[:,y,x,t+1] = decay*S_Phase[:,y,x,t] + Gaussian\n\n # and all the actions that are to be synchronized to that state receive a burst\n if type(LFC_sync) is int:\n A_Phase[:,LFC_sync,t+1] = decay*A_Phase[:,LFC_sync,t] + Gaussian\n \n # Desynchronize all other actions !\n for node in LFC_desync:\n A_Phase[:,int(node),t+1] = decay*A_Phase[:,int(node),t] - Gaussian*noise\n\n #Updating rate code units\n #Only the rate code neuron of a single state is updated because the actor can only be in one place at the same time\n S_Rate[current_loc[0],current_loc[1],t]= (1/(1+np.exp(-5*S_Phase[0,current_loc[0],current_loc[1],t]-0.6)))\n A_Rate[:,t]=(S_Rate[current_loc[0],current_loc[1],t]*(W[current_loc[0],current_loc[1],:]+1))*(1/(1+np.exp(-5*A_Phase[0,:,t]-0.6)))\n #A_Rate[:,t]=(S_Rate[current_loc[0],current_loc[1],t])*(1/(1+np.exp(-5*A_Phase[0,:,t]-0.6)))\n \n # select action\n action_index = int(np.argmax(np.sum(A_Rate[:,:],axis=1)))\n if action_index in np.where(W[current_loc[0],current_loc[1],:] == max(W[current_loc[0],current_loc[1],:]))[0]:\n greedy+=1\n else:\n exploration+=1\n\n if action_index != LFC_sync:\n sync_fail+=1\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Learning\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n #update location\n new_loc= update_location(grid = states, loc=current_loc,move = moves[action_index])\n\n #log coordinates for weight matrices\n coordinates = [current_loc[0], current_loc[1], new_loc[0], new_loc[1], action_index] #location coordinates\n\n #update weights according to TD-learning\n V, W, delta, at_goal = update_weights(param=parameters, index=coordinates, V=V, W=W, states=states, reward_size = reward_size)\n\n\n #update_location\n current_loc = new_loc\n step+=1\n if step ==n_steps:\n #print(\"Agent did not reach goal\")\n break\n \n trial_length[trial] = step \n \n print(\"I took {0} exploratory steps and {1} greedy steps this simulation\".format(exploration,greedy))\n print(\"I intended to explore {} times\".format(exploration_intent))\n print(\"Sync of correct action failed {} times\".format(sync_fail))\n print(\"In this sim I took a total {} steps\".format(np.sum(trial_length)))\n \n time1 = time.perf_counter()\n print(\"For the second model I took {} minutes\".format((time1-time0)/60))\n return trial_length, V", "def doit(inputDir, outputDir=\"./\"):\n\n os.makedirs(outputDir)\n\n postprocessing.draw_map(\n inputDir, os.path.join(outputDir, \"initial_map.pdf\"),\n filePaths, state=\"initial\")\n postprocessing.draw_map(\n inputDir, os.path.join(outputDir, \"final_map.pdf\"),\n filePaths, state=\"final\")\n postprocessing.draw_map(\n inputDir, os.path.join(outputDir, \"hardware_map.pdf\"),\n filePaths, state=None)\n\n # Draw relaxation data\n opsData = pd.read_csv(os.path.join(inputDir, filePaths[\"anneal_ops\"]))\n determinedData = opsData[opsData[\"Determination\"] == 1]\n numberOfPoints = 100\n sliceStep = math.ceil((len(determinedData) - 1) / (numberOfPoints - 1))\n figure, axes = postprocessing.plot_fitness(\n list(determinedData[::sliceStep].index),\n list(determinedData[::sliceStep][\"Transformed Clustering Fitness\"]),\n list(determinedData[::sliceStep][\"Transformed Locality Fitness\"]))\n del determinedData\n figure.savefig(os.path.join(outputDir, \"fitness.pdf\"))\n\n # Draw determination data\n figure, axes = postprocessing.plot_determination_histogram(\n opsData[opsData[\"Determination\"] == 0].index, opsData.index.max())\n figure.savefig(os.path.join(outputDir, \"determination.pdf\"))\n\n # Draw hardware node loading data\n loadingData = pd.read_csv(os.path.join(inputDir,\n filePaths[\"h_node_loading\"]))\n figure, axes = postprocessing.plot_loading_histogram(\n loadingData[\"Number of contained application nodes\"])\n figure.savefig(os.path.join(outputDir, \"loading.pdf\"))", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def test_for_grader():\n test_map1 = np.array([\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 0, 0, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 0, 0, 1, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1]])\n x_spacing1 = 1\n y_spacing1 = 1\n start1 = np.array([[1.5], [1.5], [0]])\n goal1 = np.array([[7.5], [1], [0]])\n path1 = dijkstras(test_map1,x_spacing1,y_spacing1,start1,goal1)\n s = 0\n for i in range(len(path1)-1):\n s += np.sqrt((path1[i][0]-path1[i+1][0])**2 + (path1[i][1]-path1[i+1][1])**2)\n print(\"Path 1 length:\")\n print(s)\n\n\n test_map2 = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n start2 = np.array([[0.4], [0.4], [1.5707963267948966]])\n goal2 = np.array([[0.4], [1.8], [-1.5707963267948966]])\n x_spacing2 = 0.2\n y_spacing2 = 0.2\n path2 = dijkstras(test_map2,x_spacing2,y_spacing2,start2,goal2)\n s = 0\n for i in range(len(path2)-1):\n s += np.sqrt((path2[i][0]-path2[i+1][0])**2 + (path2[i][1]-path2[i+1][1])**2)\n print(\"Path 2 length:\")\n print(s)", "def calc_prior_path_prob(self, output_filenm=\"\"):\n logger.info(\"Calculating prior map\")\n programs_map = {}\n unique_cluster_ids = set() # have to do this since the assigned cluster ids doesnt seems to be contiguous or start from 0 or end at K-1\n for c in self.args.cluster_assignments:\n unique_cluster_ids.add(c)\n for c in unique_cluster_ids:\n for _, ((e1, r), e2_list) in enumerate(tqdm((self.train_map.items()))):\n if self.args.cluster_assignments[self.entity_vocab[e1]] != c:\n # if this entity does not belong to this cluster, don't consider.\n continue\n if c not in programs_map:\n programs_map[c] = {}\n if r not in programs_map[c]:\n programs_map[c][r] = {}\n all_paths_around_e1 = self.all_paths[e1]\n nn_answers = e2_list\n for nn_ans in nn_answers:\n programs = self.get_programs(e1, nn_ans, all_paths_around_e1)\n for p in programs:\n p = tuple(p)\n if len(p) == 1:\n if p[0] == r: # don't store query relation\n continue\n if p not in programs_map[c][r]:\n programs_map[c][r][p] = 0\n programs_map[c][r][p] += 1\n for c, r in programs_map.items():\n for r, path_counts in programs_map[c].items():\n sum_path_counts = 0\n for p, p_c in path_counts.items():\n sum_path_counts += p_c\n for p, p_c in path_counts.items():\n programs_map[c][r][p] = p_c / sum_path_counts\n\n if not output_filenm:\n dir_name = os.path.join(args.data_dir, \"data\", self.args.dataset_name, \"linkage={}\".format(self.args.linkage))\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n output_filenm = os.path.join(dir_name, \"path_prior_map.pkl\")\n\n logger.info(\"Dumping path prior pickle at {}\".format(output_filenm))\n with open(output_filenm, \"wb\") as fout:\n pickle.dump(programs_map, fout)", "def find_best_cycle(road_map):\n \n best_cycle = road_map\n best_dist = compute_total_distance(best_cycle)\n i = 0\n \n try: \n while i < 10000:\n attempt_map = shift_cities(best_cycle)\n rand_idx1 = random.randint(0, len(best_cycle)-1)\n rand_idx2 = random.randint(0, len(best_cycle)-1)\n (new_map, distance) = swap_cities(attempt_map, rand_idx1, rand_idx2)\n if distance < best_dist:\n best_dist = distance\n best_cycle = new_map\n i += 1\n except Exception as e:\n print('Error with find_best_cycle function: '+str(e))\n \n return best_cycle", "def runtrials_withmap(name, configs, test, N=20, gui=False, rosbag=True):\n print \"*** %s ***\" % name\n scriptdir = \"%s/%s\" % (os.getcwd(), name)\n if os.path.exists(scriptdir):\n print \"WARNING: directory %s already exists, skipping script\" % scriptdir\n return False\n os.mkdir(scriptdir)\n evman = init(scriptdir)\n evman.initBaseSystem(gui)\n evman.initRviz(gui)\n evman.initMapping(scriptdir)\n # --- INITIAL MAP ---\n mapdir = \"%s/map\" % os.getcwd()\n if os.path.exists(mapdir):\n print \"Found map directory, skipping map creation\"\n else:\n os.mkdir(mapdir)\n evman.initSearchMan(dl=0, log=scriptdir)\n evman.runTest()\n evman.stopSearchMan()\n evman.call(\"rosservice call /next_best_view_node/write_map %s/\" % mapdir)\n for n in range(N):\n psd = \"%s/psd\" % scriptdir\n os.mkdir(psd)\n xypose = None\n for cname, search_kwargs in configs:\n print \"# Evaluating n=%i %s\" % (n, cname)\n logdir = \"%s/%s_%i\" % (scriptdir, cname, n)\n os.mkdir(logdir)\n evman.resetGzworld()\n xypose = evman.setCamera(xypose)\n evman.call(\"rosservice call /next_best_view_node/load_map %s/\" % mapdir)\n evman.initSearchMan(psd=psd, log=logdir, **search_kwargs)\n if rosbag:\n evman.recordRosbag(logdir)\n evman.runTest(**test)\n evman.stopRosbag()\n evman.stopSearchMan()\n # --- CLEANUP ---\n try:\n shutil.rmtree(psd)\n except OSError:\n print \"WARNING: Expected to see persistent_samples, but there were none!\"\n evman.shutdown()\n return True", "def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n locations = student_utils.convert_locations_to_indices(list_of_locations, list_of_locations)\n homes = student_utils.convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n\n start_time = time.time()\n\n if params[0] == 'naive':\n car_path, drop_off = naive_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy':\n car_path, drop_off = greedy_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'three_opt':\n car_path, drop_off = three_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'ant_colony':\n car_path, drop_off = ant_colony(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_three_opt':\n car_path, drop_off = greedy_clustering_three_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'mst':\n car_path, drop_off = mst_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'two_opt':\n car_path, drop_off = two_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_two_opt':\n car_path, drop_off = greedy_clustering_two_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n else:\n pass", "def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])", "def __init__(self, constraints=[], infeasiblePoints=[], feasiblePoints=[], optimalPoint=None, costVector=None, zoom=1.0, frameTime=0.0): \n super(PacmanPlotLP, self).__init__(zoom, frameTime)\n\n xmin = 100000\n ymin = 100000\n xmax = -100000\n ymax = -100000\n\n for point in feasiblePoints:\n if point[0] < xmin:\n xmin = point[0]\n if point[0] > xmax:\n xmax = point[0]\n if point[1] < ymin:\n ymin = point[1]\n if point[1] > ymax:\n ymax = point[1]\n\n if len(feasiblePoints) == 0:\n for point in infeasiblePoints:\n if point[0] < xmin:\n xmin = point[0]\n if point[0] > xmax:\n xmax = point[0]\n if point[1] < ymin:\n ymin = point[1]\n if point[1] > ymax:\n ymax = point[1]\n\n xmin = int(math.floor(xmin)) - 3\n ymin = int(math.floor(ymin)) - 3\n xmax = int(math.ceil(xmax)) + 3\n ymax = int(math.ceil(ymax)) + 3\n width = xmax-xmin+1\n height = ymax-ymin+1\n\n# p = feasiblePoints[2]\n# print(\"p={}\".format(p))\n# print(\"feasible={}\".format(self.pointFeasible(p, constraints)))\n# g = self.cartesianToLayout(xmin, ymin, xmax, ymax, p)\n# print(\"g={}\".format(g))\n# gr = (int(round(g[0])), int(round(g[1])))\n# p2 = self.layoutToCartesian(xmin, ymin, xmax, ymax, gr)\n# print(\"p2={}\".format(p2))\n# print(\"p2 feasible={}\".format(self.pointFeasible(p2, constraints)))\n\n layoutLists = self.blankLayoutLists(width, height)\n\n self.addInfeasibleGhosts(layoutLists, constraints, xmin, ymin, xmax, ymax)\n\n layoutLists = self.changeBorderGhostsToWall(layoutLists)\n \n for point in infeasiblePoints:\n self.addCartesianPointToLayout(layoutLists, point, '.', xmin, ymin, xmax, ymax)\n\n for point in feasiblePoints:\n self.addCartesianPointToLayout(layoutLists, point, 'o', xmin, ymin, xmax, ymax)\n\n if optimalPoint is not None:\n self.addCartesianPointToLayout(layoutLists, optimalPoint, 'P', xmin, ymin, xmax, ymax)\n\n if graphicsUtils._canvas is not None:\n graphicsUtils.clear_screen()\n \n # Initialize GameStateData with blank board with axes \n self.width = width\n self.height = height\n\n self.zoom = min(30.0/self.width, 20.0/self.height)\n self.gridSize = graphicsDisplay.DEFAULT_GRID_SIZE * self.zoom\n\n maxNumGhosts = 10000\n layout = Layout(layoutLists)\n self.blankGameState = GameStateData()\n self.blankGameState.initialize(layout, maxNumGhosts)\n self.initialize(self.blankGameState)\n title = 'Pacman Plot LP'\n graphicsUtils.changeText(self.infoPane.scoreText, title)\n graphicsUtils.refresh()\n graphicsUtils.sleep(1)\n\n if costVector is not None:\n self.shadeCost(layoutLists, constraints, costVector, feasiblePoints, xmin, ymin, xmax, ymax)", "def a_star(my_map, start_locs, goal_locs, h_values, agent, constraints):\n\n ##############################\n # Task 1.1: Extend the A* search to search in the space-time domain\n # rather than space domain, only.\n # Build constraint table if there are constraints\n\n constraint_table = build_constraint_table(constraints, agent)\n\n open_list = []\n closed_list = dict()\n nodes_opened = 0\n max_opened = 500\n start_loc = start_locs[0]\n goal_loc = goal_locs[0]\n if len(start_locs) > 1: # If there is more than 1 start location then this is a multi-cell agent\n multi = True\n else:\n multi = False\n\n # determine when the last constraint is on the goal node (or any of the goal node cells in the case of multi-cell)\n earliest_goal_timestep = 0\n if len(constraint_table) != 0:\n for time in [item for item in sorted(list(constraint_table.keys()), reverse=True)]:\n flat_list = [item for sublist in constraint_table[time] for item in sublist]\n if(goal_locs[0] in flat_list):\n earliest_goal_timestep = time\n break\n elif(multi): # if multi cell check if any of the agents goal cells are constrained \n if(goal_locs[1] in flat_list): \n earliest_goal_timestep = time\n break\n\n h_value = h_values[start_loc]\n goal_orientation = orientation(goal_locs)\n\n root = {'loc': start_loc,'orientation': orientation(start_locs), 'g_val': 0, 'h_val': h_value, 'time': 0, 'parent': None}\n push_node(open_list, root)\n closed_list[(root['loc'], root['time'], root['orientation'])] = root\n\n while len(open_list ) > 0 and nodes_opened < max_opened:\n curr = pop_node(open_list)\n nodes_opened = nodes_opened + 1\n \n if curr['loc'] == goal_loc and curr['orientation'] == goal_orientation and curr['time'] >= earliest_goal_timestep:\n return get_path(curr)\n ############################\n child_orient = curr['orientation']\n for dir in range(7):\n if dir < 5:\n child_loc = move(curr['loc'], dir)\n elif not multi: \n continue\n\n if dir == 5:\n # clockwise rotation \n child_orient = curr['orientation'] - 1\n if child_orient < 1:\n child_orient = 4\n if dir == 6:\n # counter-clockwise rotation \n child_orient = curr['orientation'] + 1\n if child_orient > 4:\n child_orient = 1\n \n if test_map(my_map, child_loc[0], child_loc[1], child_orient, dir):\n continue\n \n # check if the head location is constrained \n if is_constrained(curr['loc'], child_loc, child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if this is a multi cell agent check if the tail is constrained \n if multi:\n # check the next tail location \n row_t, col_t, _, _ = find_tail_positions(curr['loc'][0], curr['loc'][1], curr['orientation'], dir)\n next_row_t, next_col_t, next_row_t_inter, next_col_t_inter = find_tail_positions(child_loc[0], child_loc[1], child_orient, dir)\n\n if is_constrained((row_t,col_t), (next_row_t, next_col_t), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if the agent is rotating check if the intermediate location is constrained\n if dir == 5 or dir == 6: \n if is_constrained((row_t,col_t), (next_row_t_inter, next_col_t_inter), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n child = {'loc': child_loc,\n 'orientation': child_orient,\n 'g_val': curr['g_val'] + 1,\n 'h_val': h_values[child_loc] + orient_cost(child_orient, goal_orientation),\n 'time': curr['time'] + 1,\n 'parent': curr}\n\n if (child['loc'], child['time'], child['orientation']) in closed_list:\n existing_node = closed_list[(child['loc'], child['time'], child['orientation'])]\n \n if compare_nodes(child, existing_node):\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n else:\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n \n return None # Failed to find solutions", "def mainSchedule():\n\timport time\n\tc1 = Content(1,5,20)\n\tc2 = Content(2,6,30)\n\tc3 = Content(3,5,25)\n\tc1_ = Content(1,1,20)\n\tc5 = Content(5,3,29)\n\tc6 = Content(6,11,50)\n\tc7 = Content(7,7,34)\n\tc1__ = Content(1,3,20)\n\tc8 = Content(8,6,10)\n\ta1 = Area('a1',1.0)\n\ta2 = Area('a2',0.5)\n\ta3 = Area('a3',0.8)\n\tcontents = [c1,c2,c3,c1_,c5,c6,c7,c1__,c8]\n\tareas = [a1,a2,a3]\n\tsol_schedule = Schedule_solution()\n\tprint \"random sampling schedule:\\n\"\n\ttime_r = time.time()\n\tschedule_sols = sol_schedule.schedule_randomSampling(contents,areas)\n\tprint \"running time,\",time.time()-time_r\n\tprint \"local search schedule:\"\n\ttime_l = time.time()\n\tschedule_sols_local = sol_schedule.schedule_localSearch(contents,areas)\n\tprint \"running time,\",time.time()-time_l\n\tsol_selection = Selection_solution()\n\tsol_selection.select_bruteforce(4,*schedule_sols) #argument unpacking", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def main(input_scenario):\n\n print(\"Program running for demand scenario \" + str(input_scenario))\n\n policy_f = open(\"./Policy/policy_result\" + str(input_scenario) + \".csv\", \"w\", newline='')\n policy_writer = csv.writer(policy_f)\n\n # Read case data\n ds, orig_fl = get_data_case(input_scenario, htime, zone)\n\n # Initialize operator location\n location = initialize_location(zone, vec)\n\n # Initialize cluster\n assignment, centre = initialize_cluster(cluster, zone, mindis)\n\n # Run\n # Timestep 0\n ds = update_ds(zone, cp, ds, mindis, orig_fl[0])\n\n # Timestep 1:\n for timestep in range(1, htime):\n\n print(\"Running for timestep \" + str(timestep))\n\n # Extract demand flows in current timestep\n train_fl = [fl[ts][timestep] for ts in range(trainsample)]\n\n # Compute total demand flows in and out of each station\n train_fl_out = [[0 for s in range(zone)] for ts in range(trainsample)]\n train_fl_in = [[0 for s in range(zone)] for ts in range(trainsample)]\n\n for ts in range(trainsample):\n for s in range(zone):\n train_fl_out[ts][s] = sum(train_fl[ts][s])\n for z in range(zone):\n train_fl_in[ts][s] += train_fl[ts][z][s]\n\n # Modify cluster based on demand flows\n assignment = modify_cluster(cluster, assignment, centre, zone, cp, ds, mindis, trainsample, train_fl)\n print(\"Clusters generated.\")\n\n # Initialize global intra-cluster rebalancing result\n yp_total = [0.0 for s in range(zone)]\n yn_total = [0.0 for s in range(zone)]\n bn = [[0 for s in range(zone)] for v in range(vec)]\n\n # Intra-cluster rebalancing\n for c in range(cluster):\n\n # Extract local data\n local_list = [s for s in range(zone) if assignment[s] == c]\n\n local_zone = len(local_list)\n\n local_cp = [cp[s] for s in local_list]\n local_ds = [ds[s] for s in local_list]\n\n local_vec_list = []\n local_vec = 0\n local_cap = []\n local_location = []\n\n for v in range(vec):\n if location[v].index(1) in local_list:\n local_vec_list.append(v)\n local_vec += 1\n local_cap.append(cap[v])\n local_location.append([1 if (s == location[v].index(1)) else 0 for s in local_list])\n \n local_dis = [[dis[s][z] for z in local_list] for s in local_list]\n\n local_train_fl_out = [[train_fl_out[ts][s] for s in local_list] for ts in range(trainsample)]\n local_train_fl_in = [[train_fl_in[ts][s] for s in local_list] for ts in range(trainsample)]\n\n # Apply local rebalance\n status, local_yp_total, local_yn_total, local_bn = intra_redeployment(local_zone, local_cp, local_ds, local_vec, local_cap, local_location, local_dis, trainsample, local_train_fl_out, local_train_fl_in, dismaxpick, dismaxdrop)\n\n # Make error report if no solution\n if status == 1:\n error_f = open(\"./error_report.csv\", \"a\", newline='')\n error_writer = csv.writer(error_f)\n error_writer.writerow([str(cluster), str(input_scenario), str(timestep), str(c)])\n error_f.close()\n\n # Translate to global indexing\n for s in range(local_zone):\n yp_total[local_list[s]] = local_yp_total[s]\n yn_total[local_list[s]] = local_yn_total[s]\n \n for v in range(local_vec):\n for s in range(local_zone):\n bn[local_vec_list[v]][local_list[s]] = local_bn[v][s]\n \n print(\"Cluster done is: \" + str(c))\n \n for s in range(zone):\n policy_writer.writerow([str(timestep), str(s), str(yp_total[s]), str(yn_total[s])])\n \n # Update ds\n update_ds(zone, cp, ds, mindis, orig_fl[timestep], yp_total, yn_total)\n\n # Update operator location\n for v in range(vec):\n for s in range(zone):\n if round(bn[v][s]) == 1:\n location[v][s] = 1\n else:\n location[v][s] = 0\n \n print(\"Timestep done is: \" + str(timestep))\n \n policy_f.close()", "def main():\r\n test = TesterNeighbour()\r\n test.setUp()\r\n test.test_result_n()\r\n print(\"result_of_algorithm_test - passed\")", "def postal_problem(grading=False):\n if grading:\n print \"n_neighbors = 1, weights = 'distance':\\t0.903\"\n print \"n_neighbors = 1, weights = 'uniform' :\\t0.903\"\n print \"n_neighbors = 4, weights = 'distance':\\t0.906\"\n print \"n_neighbors = 4, weights = 'uniform' :\\t0.89\"\n print \"n_neighbors = 10, weights = 'distance':\\t0.912 (best)\"\n print \"n_neighbors = 10, weights = 'uniform' :\\t0.903\"\n return\n\n labels, points, testlabels, testpoints = np.load('PostalData.npz').items()\n\n def trial(n, w):\n nbrs = neighbors.KNeighborsClassifier(n_neighbors=n, weights=w, p=2)\n nbrs.fit(points[1], labels[1])\n prediction = nbrs.predict(testpoints[1])\n return np.average(prediction/testlabels[1])\n\n print \"n_neighbors = 1, weights = 'distance':\\t\", trial(1, 'distance')\n print \"n_neighbors = 1, weights = 'uniform' :\\t\", trial(1, 'uniform')\n print \"n_neighbors = 4, weights = 'distance':\\t\", trial(4, 'distance')\n print \"n_neighbors = 4, weights = 'uniform' :\\t\", trial(4, 'uniform')\n print \"n_neighbors = 10, weights = 'distance':\\t\", trial(10, 'distance')\n print \"n_neighbors = 10, weights = 'uniform' :\\t\", trial(10, 'uniform')", "def main(ft_setups, ft_strategies):\n\n num_procs = 16\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-09\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize space transfer parameters\n space_transfer_params = dict()\n space_transfer_params['finter'] = True\n space_transfer_params['rorder'] = 2\n space_transfer_params['iorder'] = 6\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 30\n\n for setup in ft_setups:\n if setup == 'HEAT':\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 0.5\n problem_params['freq'] = 1\n problem_params['nvars'] = [255, 127]\n problem_params['bc'] = 'dirichlet-zero'\n\n level_params['dt'] = 0.5\n\n space_transfer_params['periodic'] = False\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = heatNd_forced # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 8.0\n\n elif setup == 'ADVECTION':\n # initialize problem parameters\n problem_params = dict()\n problem_params['c'] = 1.0\n problem_params['nvars'] = [256, 128]\n problem_params['freq'] = 2\n problem_params['order'] = 2\n problem_params['bc'] = 'periodic' # boundary conditions\n\n level_params['dt'] = 0.125\n\n space_transfer_params['periodic'] = True\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = advectionNd # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = generic_implicit # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 2.0\n\n else:\n raise NotImplementedError('setup not implemented')\n\n # do a reference run without any faults to see how things would look like (and to get maxiter/ref_niter)\n ft.strategy = 'NOFAULT'\n\n controller = controller_nonMPI_hard_faults(\n num_procs=num_procs, controller_params=controller_params, description=description\n )\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n ref_niter = max([item[1] for item in sortedlist_stats])\n\n print('Will sweep over %i steps and %i iterations now...' % (num_procs, ref_niter))\n\n # loop over all strategies\n for strategy in ft_strategies:\n ft_iter = range(1, ref_niter + 1)\n ft_step = range(0, num_procs)\n\n print('------------------------------------------ working on strategy ', strategy)\n\n iter_count = np.zeros((len(ft_step), len(ft_iter)))\n\n # loop over all steps\n xcnt = -1\n for step in ft_step:\n xcnt += 1\n\n # loop over all iterations\n ycnt = -1\n for iter in ft_iter:\n ycnt += 1\n\n ft.hard_step = step\n ft.hard_iter = iter\n ft.strategy = strategy\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n niter = max([item[1] for item in sortedlist_stats])\n iter_count[xcnt, ycnt] = niter\n\n print(iter_count)\n\n np.savez(\n 'data/' + setup + '_results_hf_' + strategy,\n iter_count=iter_count,\n description=description,\n ft_step=ft_step,\n ft_iter=ft_iter,\n )", "def a_star_planning(start_x, start_y, goal_x, goal_y, id):\n # extract the index of start node, goal node and obstacles\n start = Point(round(start_x/grid_size), round(start_y/grid_size), 0.0, -1, [0,0,0])\n goal = Point(round(goal_x/grid_size), round(goal_y/grid_size), 0.0, -1, [0,0,0])\n if not_legal(goal, id):\n print ('not a legal goal')\n return False\n \n # time.sleep(10)\n\n # create the open list and close list to store nodes\n openset, closeset = deque(), deque()\n openset.append(start)\n\n while True:\n # find out the min f node to explore\n\n current_node = min(openset,\n key=lambda node: node.g + calculate_heuristic(node,goal))\n\n # pltplt.plot(current_node.x, current_node.y, \"b*\")\n if len(closeset) % 10 == 0:\n plt.pause(0.001)\n\n if current_node.x == goal.x and current_node.y == goal.y:\n print(\"Congratulations! You have found the goal!\")\n goal.parent = current_node\n break\n\n # Remove it from the open list\n openset.remove(current_node)\n # Add it to the close list\n closeset.append(current_node)\n\n # Explore the neighbour\n for motion in motions:\n if motion == current_node.parent_motion:\n turn_cost = 0\n elif (motion[0] == -1 * current_node.parent_motion[0]) and (motion[1] == -1 * current_node.parent_motion[1]):\n turn_cost = 1.5\n else:\n turn_cost = 1\n\n node = Point(current_node.x + motion[0],\n current_node.y + motion[1],\n current_node.g + motion[2] + turn_cost,\n current_node,\n motion,\n )\n\n # ignore it if it is in the close list\n flag = False\n for item in closeset:\n if item.x == node.x and item.y == node.y:\n flag = True\n break\n if flag:\n continue\n # ignore it if it is obstacle\n\n if not_legal(node, id):\n continue\n # update its parent if it is the open list\n flag = True\n for item in openset:\n if item.x == node.x and item.y == node.y:\n flag = False\n # if closer, update the parent\n if node.g <= item.g:\n item.g = node.g\n item.parent = node.parent\n item.parent_motion = node.parent_motion\n break\n # add to the open list if it is not in the open list\n if flag:\n openset.append(node)\n\n # generate the final path\n while True:\n route = deque()\n route.append(goal)\n plt.plot(goal.x, goal.y, \"rx\")\n if goal.parent == -1:\n break\n else:\n goal = goal.parent\n route.appendleft(goal)\n # return route\n # return False\n if NEED_DRAW:\n # draw map\n for i in range(map.gridwidth):\n for j in range(map.gridheight):\n if map.grid[1,i,j] >0:\n plt.plot(i, j, \"xc\")\n\n plt.plot(start.x, start.y, \"ro\")\n plt.plot(goal.x, goal.y, \"go\")\n\n for goal in route:\n plt.plot(goal.x, goal.y, \"rx\")\n plt.show()", "def ant_colony(map, alpha=3, beta=4, m=10, rho=0.2, q=1, its_max=20):\n n = len(map)\n tau = np.ones((n, n))\n eta = 1/map.D\n for i in range(n):\n eta[i, i] = 0\n paths_array = np.zeros((m, n), int)\n its = 0\n path_best = np.zeros((its_max, n), int)\n distance_best = np.zeros(its_max)\n\n while its < its_max:\n paths_length = np.zeros(m)\n for i in range(m):\n source = np.random.randint(n)\n visited = []\n unvisited = list(range(n))\n node_now = source\n node_next = -1\n paths_array[i, 0] = source\n\n for j in range(1, n):\n visited.append(node_now)\n unvisited.remove(node_now)\n prob_roulette = np.array([0]*n, dtype=float)\n for k in unvisited:\n prob_roulette[k] = (pow(tau[node_now, k], alpha)\n * pow(eta[node_now, k], beta))\n prob_roulette = prob_roulette/sum(prob_roulette)\n cum_roulette = prob_roulette.cumsum()\n cum_roulette -= np.random.uniform(0, 1)\n node_next = list(cum_roulette >= 0).index(True)\n paths_array[i, j] = node_next\n paths_length[i] += map.D[node_now, node_next]\n node_now = node_next\n paths_length[i] += map.D[node_now, source]\n\n if its == 0:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n else:\n if distance_best[its-1] < paths_length.min():\n distance_best[its] = distance_best[its-1]\n path_best[its] = path_best[its-1].copy()\n else:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n\n add_tau = np.zeros((n, n))\n\n for i in range(m):\n for j in range(n):\n row = paths_array[i, j]\n col = paths_array[i, (j+1) % n]\n add_tau[row][col] += q/paths_length[i]\n\n tau = (1 - rho)*tau + add_tau\n\n its += 1\n\n return Hamiltonian(path_best[-1], map)", "def main():\n\n config = read_json_file(CONFIG_FILE)\n posititve_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"POSITIVE_FILENAME\"]\n )\n negative_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"NEGATIVE_FILENAME\"]\n )\n complexity_factor = config[\"main\"][\"COMPLEXITY_FACTOR\"]\n max_sequences_to_fit_pos = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_POS\"]\n max_sequences_to_fit_neg = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_NEG\"]\n\n input_organisms_path = config[\"main\"][\"INPUT_FILENAME\"]\n mean_nodes = 3.0\n mean_fitness = 150\n positive_dataset = read_fasta_file(posititve_path)\n positive_dataset.sort()\n negative_dataset = read_fasta_file(negative_path)\n print(\"{} {}\".format(len(positive_dataset), len(negative_dataset)))\n\n organism_factory = OrganismFactory(\n config[\"organism\"],\n config[\"organismFactory\"],\n config[\"connector\"],\n config[\"pssm\"],\n )\n\n a_organisms = organism_factory.import_organisms(input_organisms_path)\n # random.shuffle(negativeDataset)\n\n for org in a_organisms:\n\n # org.print()\n nodes = org.count_nodes()\n\n p_1 = org.get_seq_set_fitness(\n positive_dataset[:max_sequences_to_fit_pos]\n )\n n_1 = org.get_seq_set_fitness(\n negative_dataset[:max_sequences_to_fit_neg]\n )\n # p1 = 20\n # n1 = org.getSeqSetFitness(negativeDataset[31:32])\n c_1 = org.get_complexity(mean_nodes, mean_fitness)\n\n # Score\n fitness = p_1 - n_1\n effective_fitness = fitness - complexity_factor * c_1\n print(\n (\n \"ORG {} N: {:.2f} P: {:.2f} N: {:.2f} C: {:.2f} F: {:.2f}\"\n + \" EF: {:.2f}\\n\"\n ).format(org._id, nodes, p_1, n_1, c_1, fitness, effective_fitness)\n )\n\n export_organism(\n org,\n positive_dataset,\n \"{}positive_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )\n # exportOrganism(\n # org,\n # negativeDataset[31:32],\n # \"{}negative_{}\".format(config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org.ID),\n # organismFactory,\n # )\n\n export_organism(\n org,\n negative_dataset[:50],\n \"{}negative_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )", "def map(initial_aerosol_pops, params):\n from parcel_model.parcel import ParcelModelError\n import time\n\n ## Pull model settings from params\n T0, S0, P0 = [params[s] for s in ('T0', 'S0', 'P0')]\n z_top, dt, max_steps = params['z_top'], params['dt'], params['max_steps']\n\n ## Helper method for re-submitting jobs which fail.\n def resubmit(ps, initial_aerosols, dt, max_steps):\n x = time.time()\n\n alpha, gamma, V = ps\n\n while dt >= 0.001:\n ## Try to run the model\n activation_results = RunParcelModels.simulation_pair(ps, initial_aerosols, V, T0, S0, P0, z_top, dt, max_steps)\n ## If it didn't work, report his and cut the timestep in half\n if not activation_results:\n print \"resubmitting %r with dt=%1.2e\" % (ps, dt/2.,)\n dt = dt/2.\n max_steps = int(max_steps*3.)\n ## If it did work, we're done\n else:\n break\n ## If we still don't have a good result after cutting dt several times,\n ## then report this.\n elapsed = time.time() - x\n if not activation_results:\n print \"FAILED (%1.2e seconds) %r\" % (elapsed, ps)\n else:\n print \"SUCCESS (%1.2e seconds) %r\" % (elapsed, ps)\n return activation_results\n\n results = []\n n, initial_aerosol_pops = initial_aerosol_pops\n n_runs = len(initial_aerosol_pops)\n for i, (initial_aerosols, ps) in enumerate(initial_aerosol_pops):\n print \"EXECUTING RUN %d/%d\" % (i+1, n_runs)\n component_results = {}\n\n ## INDIVIDUAL SPECIES\n param_fail = False\n for aerosol in initial_aerosols:\n species = aerosol.species\n\n #activation_results = RunParcelModels.simulation_pair(ps, [aerosol, ], V, T0, S0, P0, z_top, dt, max_steps)\n activation_results = resubmit(ps, [aerosol, ], dt, max_steps)\n\n if not activation_results:\n results.append((ps, None))\n param_fail = True\n break\n else:\n component_results[species] = activation_results\n\n if not param_fail:\n ## FULL MIXTURE\n #activation_results = RunParcelModels.simulation_pair(ps, initial_aerosols, V, T0, S0, P0, z_top, dt, max_steps)\n activation_results = resubmit(ps, initial_aerosols, dt, max_steps)\n\n if not activation_results:\n results.append((ps, None))\n continue\n\n component_results['mixture'] = activation_results\n results.append((ps, component_results))\n yield (n, results)", "def run_and_evaluate():\n tsp_problems = read_all_problems()\n # Empty list of metrics\n results = []\n for problem in tqdm.tqdm(tsp_problems):\n # As random factors are involved repeat experiments a couple of times\n best_routes_base = []\n best_routes_af = []\n best_routes_ms = []\n base_times = []\n af_times = []\n ms_times = []\n for i in range(10):\n # Base solution\n start_time = timeit.default_timer()\n best_route_base = solve_tsp_basic(problem)\n base_time = timeit.default_timer() - start_time\n best_routes_base.append(Fitness(route=best_route_base).route_distance())\n base_times.append(base_time)\n\n # AF clustering solution\n start_time = timeit.default_timer()\n best_route_af = solve_tsp_affinity_propagation(problem)\n af_time = timeit.default_timer() - start_time\n best_routes_af.append(Fitness(route=best_route_af).route_distance())\n af_times.append(af_time)\n\n # MS solution\n start_time = timeit.default_timer()\n best_route_ms = solve_mean_shift(problem)\n ms_time = timeit.default_timer() - start_time\n best_routes_ms.append(Fitness(route=best_route_ms).route_distance())\n ms_times.append(ms_time)\n\n results.append(\n {\n \"problem name\": problem.name,\n \"optimal solution\": find_route_optimal_route_length(problem),\n \"baseline tour length\": mean(best_routes_base),\n \"af clustering tour length\": mean(best_routes_af),\n \"ms clustering tour length\": mean(best_routes_ms),\n \"baseline algorithm time\": mean(base_times),\n \"af clustering algorithm time\": mean(af_times),\n \"ms clustering algorithm time\": mean(ms_times),\n }\n )\n # Create dataframe and safe results\n df = pd.DataFrame(results)\n df.to_csv(\"results.csv\", index=False)\n return df", "def main():\n\n ocp = prepare_ocp(\n biorbd_model_path=\"models/cube_and_line.bioMod\",\n n_shooting=30,\n final_time=2,\n initialize_near_solution=True,\n )\n\n # --- Solve the program --- #\n sol = ocp.solve(Solver.IPOPT(show_online_optim=platform.system() == \"Linux\"))\n\n # --- Show results --- #\n sol.animate()", "def main_script(input_vertices, input_num, input_prob, input_run=0):\n\n\tinput_vert_map = choose_values(input_vertices, input_run)\n\tinput_Q = coupler(input_vert_map, input_num)\n\tinput_results = solve_on_isakov(input_Q)\n\tinput_run_first_group, input_run_second_group = obtain_groups(input_results, input_vertices)\n\tinput_num_edges = count_edges(input_run_first_group, input_run_second_group, input_prob)\n\treturn input_num_edges, input_run_first_group, input_run_second_group", "def test():\n test_map1 = np.array([\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n x_spacing1 = 0.13\n y_spacing1 = 0.2\n start1 = np.array([[0.3], [0.3], [0]])\n goal1 = np.array([[0.6], [1], [0]])\n path1 = dijkstras(test_map1,x_spacing1,y_spacing1,start1,goal1)\n true_path1 = np.array([\n [ 0.3 , 0.3 ],\n [ 0.325, 0.3 ],\n [ 0.325, 0.5 ],\n [ 0.325, 0.7 ],\n [ 0.455, 0.7 ],\n [ 0.455, 0.9 ],\n [ 0.585, 0.9 ],\n [ 0.600, 1.0 ]\n ])\n if np.array_equal(path1,true_path1):\n print(\"Path 1 passes\")\n\n test_map2 = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 0, 0, 1],\n [1, 0, 0, 1, 1, 0, 0, 1],\n [1, 0, 0, 1, 1, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n start2 = np.array([[0.5], [1.0], [1.5707963267948966]])\n goal2 = np.array([[1.1], [0.9], [-1.5707963267948966]])\n x_spacing2 = 0.2\n y_spacing2 = 0.2\n path2 = dijkstras(test_map2,x_spacing2,y_spacing2,start2,goal2)\n true_path2 = np.array([[ 0.5, 1.0],\n [ 0.5, 1.1],\n [ 0.5, 1.3],\n [ 0.5, 1.5],\n [ 0.7, 1.5],\n [ 0.9, 1.5],\n [ 1.1, 1.5],\n [ 1.1, 1.3],\n [ 1.1, 1.1],\n [ 1.1, 0.9]])\n if np.array_equal(path2,true_path2):\n print(\"Path 2 passes\")", "def main():\n\n ocp = prepare_ocp(biorbd_model_path=\"models/pendulum.bioMod\", final_time=1, n_shooting=100)\n\n # --- Solve the program --- #\n solver = Solver.ACADOS()\n solver.set_maximum_iterations(500)\n sol = ocp.solve(solver=solver)\n\n # --- Show results --- #\n sol.print_cost()\n sol.graphs()\n sol.animate()", "def find_roads(\n probability_map,\n *,\n input_threshold=0.3,\n max_roads=None,\n min_strength=0.17, #0.2,\n num_angles=720,\n roads_min_angle=np.pi/8,\n roads_min_distance=50,\n debugimage=None, # for debugging ...\n debugprint=None): # for debugging ...\n\n # shorthand\n im = probability_map\n\n # the angles to be used in the Hough transform\n theta = np.linspace(-np.pi/2, np.pi/2, num_angles)\n\n # normalize almost anything to grayscale\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:,:,:3] # throw away alpha\n im = im.mean(axis=2) # convert RGB to grayscale\n\n if debugimage: debugimage('original', im, 0, 1, 'jet')\n\n assert im.ndim == 2\n\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n\n # create monochrome/binary input map\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n\n if debugimage: debugimage('threshold_applied', im)\n\n # Hough transform\n hspace, angles, distances = hough_line(im, theta)\n\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max() # normalize\n\n if debugimage: debugimage('hough_hspace', hspace)\n\n # convolution filters, rectangular, tuned for widths of 12, 32, 48 pixels\n w12 = np.concatenate([-np.ones((6)), np.ones((12)), -np.ones((6))])\n w32 = np.concatenate([-np.ones((16)), np.ones((32)), -np.ones((16))])\n w48 = np.concatenate([-np.ones((24)), np.ones((48)), -np.ones((24))])\n\n # convolve\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n\n # normalize signal strengths for different road widths\n im12 /= 12\n im32 /= 32\n im48 /= 48\n\n ca = (None, None, 'jet',)\n if debugimage: debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage: debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage: debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined',\n np.hstack([im12, im32, im48]), *ca)\n\n # compute possible roads of all widths, sorted by signal strength\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((\n seq,\n np.tile(np.tile(angles, distances.shape[0]), 3),\n np.tile(np.repeat(distances, angles.shape[0]), 3),\n np.repeat([12, 32, 48], distances.shape[0] * angles.shape[0])\n ))[sor][::-1]\n\n # columns: strength, angle, distance, width\n found_roads = np.asarray([]).reshape(0, 4)\n\n # find as many as strong roads as desired, while dropping roads that are too\n # similar to roads already found (non-max suppression)\n for i in range(roads.shape[0]):\n if roads[i,0] < min_strength:\n break\n a = roads[i,1]\n d = roads[i,2]\n close = (\n np.logical_or(\n np.logical_and(\n np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]-d) < roads_min_distance),\n np.logical_and(\n np.pi - np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]+d) < roads_min_distance)))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n\n return found_roads, im.shape", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict", "def find_trajectory(self):\n\n translation,_ = self.trans_listener.lookupTransform(\"/map\", \"/base_footprint\", rospy.Time(0))\n self.x = translation[0]\n self.y = translation[1]\n \n cell_x = int(np.floor(self.x / self.metadata.resolution) + self.w / 2) - self.x_offset\n cell_y = int(np.floor(self.y / self.metadata.resolution) + self.h / 2) - self.y_offset\n\n visited = np.zeros(self.costmap.shape)\n visited[cell_y,cell_x] = 1\n\n to_explore = self.add_neighbors(visited, Node(cell_x,cell_y,0,None))\n to_explore.sort(key=operator.attrgetter('cost'))\n\n # Run modified Dijkstra algorithm\n while to_explore: \n next_node = to_explore.pop(0)\n if next_node.cost == -1:\n print(\"Found goal!\")\n\t\tself.send_final_pose(next_node)\n self.number_of_fails = 0\n self.get_trajectory(next_node)\n return\n \n to_explore = to_explore + self.add_neighbors(visited, next_node)\n to_explore.sort(key=operator.attrgetter('cost'))\n\n self.number_of_fails += 1\n print(\"Failed: %d times % self.number_of_fails\")\n\n if self.number_of_fails >= NUMBER_OF_FAILS:\n print(\"Exiting!\")\n msg = Bool()\n msg.data = True\n self.exp_complete_pub.publish(msg)", "def main():\n try_again = 'y'\n road_map = read_cities(\"city-data.txt\")\n while try_again in {'y', 'Y', 'yes', 'Yes'}:\n print(\"City data\")\n print(\"=========\")\n print_cities(road_map)\n print('\\n')\n print('Searching for best solution...')\n print('\\n')\n best_cycle, best_cycle_dist, best_attempts = find_best_cycle(road_map)\n print(\"Total distance for best attempts\")\n print(\"================================\")\n print(best_attempts)\n print('\\n')\n print(\"Best cycle\")\n print(\"==========\")\n print_map(best_cycle)\n print(\"Total distance = \",round(best_cycle_dist,2))\n print('\\n')\n road_map = best_cycle\n try_again = input(\"Do you want to try again using best cycle so far?\")", "def evaluate_tracking(self) -> Dict[str, Any]:\n eval_scenes = create_splits_scenes(verbose=False)[self.eval_set]\n for scene in tqdm(eval_scenes, disable=not self.verbose):\n scene = self.nusc.get('scene', self.scene_name2tok[scene])\n cur_token, last_token = scene['first_sample_token'], scene['last_sample_token']\n pred_sem, pred_inst, label_sem, label_inst = [None], [None], [None], [None]\n\n while True:\n cur_sample = self.nusc.get('sample', cur_token)\n sd_token = cur_sample['data']['LIDAR_TOP']\n\n # Load the ground truth labels for the point cloud, filter evaluation classes.\n gt_label_file = os.path.join(self.nusc.dataroot, self.nusc.get('panoptic', sd_token)['filename'])\n panoptic_label = load_bin_file(gt_label_file, type='panoptic')\n label_sem.append(self.mapper.convert_label(panoptic_label // 1000))\n label_sem = label_sem[-2:]\n label_inst.append(panoptic_label)\n label_inst = label_inst[-2:]\n\n # Load predictions for the point cloud, filter evaluation classes.\n pred_file = os.path.join(self.results_folder, 'panoptic', self.eval_set, sd_token + '_panoptic.npz')\n panoptic_pred = load_bin_file(pred_file, type='panoptic')\n pred_sem.append(panoptic_pred // 1000)\n pred_sem = pred_sem[-2:]\n pred_inst.append(panoptic_pred)\n pred_inst = pred_inst[-2:]\n\n # Get the confusion matrix between the ground truth and predictions. Update the confusion matrix for\n # the sample data into the confusion matrix for the eval set.\n self.evaluator['tracking'].add_batch(scene['name'], pred_sem, pred_inst, label_sem, label_inst)\n if cur_token == last_token:\n break\n cur_token = cur_sample['next']\n\n pat, mean_pq, mean_tq = self.evaluator['tracking'].get_pat()\n mean_ptq, class_all_ptq, mean_sptq, class_all_sptq = self.evaluator['tracking'].get_ptq()\n mean_iou, class_all_iou = self.evaluator['tracking'].getSemIoU()\n lstq, s_assoc = self.evaluator['tracking'].get_lstq()\n mean_motsa, mean_s_motsa, mean_motsp = self.evaluator['tracking'].get_motsa()\n\n results = self.wrap_result_mopt(pat=pat,\n mean_pq=mean_pq,\n mean_tq=mean_tq,\n mean_ptq=mean_ptq,\n class_all_ptq=class_all_ptq,\n mean_sptq=mean_sptq,\n class_all_sptq=class_all_sptq,\n mean_iou=mean_iou,\n class_all_iou=class_all_iou,\n lstq=lstq,\n s_assoc=s_assoc,\n mean_motsa=mean_motsa,\n mean_s_motsa=mean_s_motsa,\n mean_motsp=mean_motsp)\n\n return results", "def main():\r\n start = input(\"Enter the starting state: \").replace(\" \", \"\") # Remove whitespace from states\r\n goal = input(\"Enter the goal state: \").replace(\" \", \"\")\r\n if validate_input(start, goal) == False:\r\n return print(\"Invalid input.\")\r\n\r\n start_list = [] # Convert into list for efficieny\r\n for i in start:\r\n start_list.append(int(i))\r\n\r\n goal_list = [] # Convert into list for efficieny\r\n for i in goal:\r\n goal_list.append(int(i))\r\n\r\n state_inversions = inversions(start_list)\r\n print(\"\\nStart state inversions:\", state_inversions)\r\n goal_inversions = inversions(goal_list)\r\n print(\"Goal state inversions:\", goal_inversions)\r\n\r\n if state_inversions % 2 != goal_inversions % 2: # If polarity is uneven, not solvable\r\n print(\"Unsolvable configurations.\\nPolarities do not match.\")\r\n exit(1)\r\n\r\n heuristic = -1 # Set heuristic option for A* search\r\n while heuristic != 0 and heuristic != 1:\r\n try:\r\n heuristic = int(input(\"\\nEnter heuristic function: \\n0 for Manhattan\\n1 for Eucledian\\n= \"))\r\n except (EOFError, KeyboardInterrupt):\r\n print('Terminating.')\r\n exit()\r\n except (KeyError, ValueError):\r\n print('Invalid option.')\r\n\r\n start_time = time.time() # Start search time\r\n nodes_searched = astar_search(start_list, goal_list, heuristic)\r\n end_time = time.time() # Stop search timer\r\n\r\n print(\"\\nSolved in: {:.2f}\".format(end_time-start_time), \"seconds\")\r\n print(\"Nodes explored:\", nodes_searched)\r\n exit()", "def solve(self):\n\n tracks_copy = self.tracks.copy()\n vehicles_sorted = sorted(self.vehicles, key=lambda x: x.departure_time)\n\n vehicles_added = 0\n while len(vehicles_sorted) != 0:\n best_ratio = - sys.maxsize - 1\n best_track = None\n best_vehicle = None\n shuffle(tracks_copy)\n\n for vehicle in vehicles_sorted:\n for track in tracks_copy:\n if track.add_vehicle(vehicle, self.tracks):\n self.grader.reinitialize_grader()\n goal1 = self.grader.calculate_first_global_goal()\n goal2 = self.grader.calculate_second_global_goal()\n ratio = goal2 / goal1\n if ratio > best_ratio:\n best_ratio = ratio\n best_track = track\n best_vehicle = vehicle\n\n track.remove_last()\n\n if best_vehicle is not None and best_track is not None:\n vehicles_added += 1\n best_track.add_vehicle(best_vehicle, self.tracks)\n vehicles_sorted.remove(best_vehicle)\n else:\n self.grader.reinitialize_grader()\n goal1 = self.grader.calculate_first_global_goal()\n goal2 = self.grader.calculate_second_global_goal()\n if goal1 < self.optimal_gg1 and goal2 > self.optimal_gg2:\n self.optimal_gg1 = goal1\n self.optimal_gg2 = goal2\n self.optimal_tracks = self.tracks\n return False\n\n self.grader.reinitialize_grader()\n goal1 = self.grader.calculate_first_global_goal()\n goal2 = self.grader.calculate_second_global_goal()\n print(\"Success:\", goal1, goal2)\n if goal1 < self.best_gg1 and goal2 > self.best_gg2:\n self.best_gg1 = goal1\n self.best_gg2 = goal2\n self.best_tracks = self.tracks\n\n return True", "def main():\n target = 'Coding:Level1'\n output_root = f'problem_5_output/{target.replace(\":\", \"_\")}'\n if not os.path.exists(output_root):\n os.makedirs(output_root, exist_ok=True)\n\n # dictionary of parameter grids, one for each process\n param_grids = {\n 'early_stopping': ParameterGrid([\n {\n 'patience': [15], # , 20, 40]\n },\n ]),\n 'fit': ParameterGrid([\n {\n 'batch_size': [128], # , 64, 128, 256],\n 'epochs': [16], # 20, 50],\n },\n ]),\n 'model_preprocessor': ParameterGrid([\n {\n 'num_unique_words': [5000], # 4000, 1000, 6000, 10000],\n 'max_sequence_length': [150], # 50, 75, 100, 125, 150, 200],\n },\n ]),\n 'model': ParameterGrid([\n # {\n # Dense single hidden layer model hyperparameters:\n # 'name': ['dense_h1'],\n # 'embedded_dims': [8], # , 16, 32, 64, 128, 256],\n # 'num_units_h1': [8], # , 16, 32, 64, 128, 256],\n # 'drop_h1': [None], # , 0.1, 0.2, 0.25, 0.5, 0.75],\n # 'optimizer': ['nadam', 'adam'],\n # 'learning_rate': [None], # , 0.01, 0.001],\n # 'activation': ['relu', 'tanh'],\n # },\n # {\n # Dense double hidden layer model hyperparameters:\n # 'name': ['dense_h2'],\n # 'embedded_dims': [64],\n # 'num_units_h1': [128],\n # 'num_units_h2': [128],\n # 'drop_h1': [None],\n # 'drop_h2': [0.5],\n # 'optimizer': ['nadam'],\n # 'activation': ['relu'],\n # 'learning_rate': [0.01],\n # },\n # {\n # CNN single hidden layer model hyperparameters\n # 'name': ['conv_h1'],\n # 'embedded_dims': [64],\n # 'num_units_h1': [32], # , 64, 256],\n # 'k_conv_h1': [2], # , 3, 4],\n # 'drop_embed': [0.2], # , 0.5],\n # 'activation': ['relu', 'tanh'],\n # 'optimizer': ['adam', 'nadam']\n # },\n # {\n # CNN double hidden layer model hyperparameters\n # 'name': ['conv_h2'],\n # 'embedded_dims': [128], # , 64, 32, 16, 8],\n # 'num_units_h1': [32], # , 64, 128],\n # 'drop_h2': [0.5], # , 0.75, 0.25, 0.1],\n # 'k_conv_h1': [2], # , 3, 4],\n # 'num_units_h2': [128], # , 64, 32, 16, 8],\n # 'drop_embed': [0.2], # , 0.50],\n # 'activation': ['relu'],\n # 'optimizer': ['adam'], # , 'nadam'],\n # },\n # {\n # CNN double hidden layer model hyperparameters\n # 'name': ['conv_h2.1'],\n # 'embedded_dims': [64],\n # 'num_units_h1': [32], # , 64, 128],\n # 'k_conv_h1': [2], # , 3, 4],\n # 'drop_embed': [0.2], # , 0.5],\n # 'activation': ['relu'],\n # 'optimizer': ['adam'], # , 'nadam']\n # },\n # {\n # RNN single hidden layer model hyperparameters\n # 'name': ['rnn_h1'],\n # 'embedded_dims': [64],\n # 'drop_embed': [0.2],\n # 'num_units_h1': [128],\n # 'optimizer': ['nadam'],\n # 'learning_rate': [0.01]\n # },\n # {\n # # LSTM double hidden layer (second layer dense FC) model hyperparameters\n # 'name': ['lstm_h1'],\n # 'embedded_dims': [64],\n # 'drop_embed': [0.2],\n # 'drop_h1': [0.5],\n # 'num_units_h1': [128],\n # 'optimizer': ['nadam'],\n # },\n # {\n # LSTM double hidden layer (second layer dense FC) model hyperparameters\n # 'name': ['lstm_h2'],\n # 'embedded_dims': [64],\n # 'drop_embed': [0.2],\n # 'num_units_h1': [128],\n # 'drop_h1': [0.5],\n # 'num_units_h2': [128],\n # 'optimizer': ['nadam'],\n # 'activation': ['relu']\n # },\n # {\n # # Bi-directional LSTM single hidden layer model hyperparameters\n # 'name': ['bi_lstm_h1'],\n # 'embedded_dims': [32], # , 64, 128],\n # 'drop_embed': [0.2], # , 0.25, 0.5],\n # 'num_units_h1': [32], # , 64, 128],\n # 'drop_h1': [0.2], # , 0.25, 0.5],\n # 'optimizer': ['nadam', 'adam']\n # },\n # {\n # Bi-directional LSTM double hidden layer (second layer Bi-LSTM) model hyperparameters\n # 'name': ['bi_lstm_h2'],\n # 'embedded_dims': [32], # , 64, 128],\n # 'num_units_h1': [32], # , 64, 128],\n # 'num_units_h2': [32], # , 64, 128],\n # 'drop_h1': [0.25, 0.5],\n # 'drop_h2': [0.25, 0.5],\n # 'optimizer': ['nadam', 'adam']\n # },\n {\n # Multi Convolutional model hyperparameters\n 'name': ['multi_conv_h3_s2'],\n 'drop_embed': [0.5], # , 0.3],\n 'embedded_dims': [128], # , 64, 128, 256],\n 'num_units_h1': [128], # , 64, 128, 256],\n 'num_units_h2': [128], # , 64, 128, 256],\n 'num_units_h3': [128], # , 64, 128, 256],\n 'num_units_h4': [128], # , 64, 128, 256],\n 'k_conv_h1': [3],\n 'k_conv_h2': [2],\n 'activation': ['relu'], # , 'tanh'],\n 'drop_h3': [0.2], # , 0.2, 0.25, 0.5, 0.75],\n 'optimizer': ['adam'], # 'nadam']\n },\n # {\n # # Multi Convolutional model hyperparameters\n # 'name': ['multi_conv_h3_s3'],\n # 'drop_embed': [0.5], # , 0.3],\n # 'embedded_dims': [32], # , 64, 128, 256],\n # 'num_units_h1': [32], # , 64, 128, 256],\n # 'num_units_h2': [32], # , 64, 128, 256],\n # 'num_units_h3': [32], # , 64, 128, 256],\n # 'num_units_h4': [32], # , 64, 128, 256],\n # 'k_conv_h1': [3],\n # 'k_conv_h2': [2],\n # 'k_conv_h3': [4],\n # 'k_conv_h4': [4],\n # 'activation': ['relu', 'tanh'],\n # 'drop_h4': [0.1], # , 0.2, 0.25, 0.5, 0.75],\n # 'optimizer': ['adam', 'nadam']\n # },\n ]),\n 'preprocessor': ParameterGrid([\n # {\n # 'do_clean': [False],\n # 'pad_type': ['pre', 'post'],\n # 'trunc_type': ['pre', 'post'],\n # },\n {\n 'do_clean': [True],\n 'pad_type': ['post'], # , 'post'],\n 'trunc_type': ['post'], # 'post'],\n 'omit_stopwords': [False],\n 'ignore_urls': [False],\n 'fix_contractions': [True],\n 'stem': [True],\n 'remove_foreign_characters': [False], # , False],\n 'lower': [True], # , False],\n 'remove_punctuation': [True], # , False],\n 'bigrams': [True], # , False]\n },\n ])\n }\n\n def prod(a):\n if len(a) == 0:\n return 1\n return a[0] * prod(a[1:])\n\n num_models = prod([len(pg) for pg in param_grids.values()])\n\n param_grid_names = sorted(list(param_grids.keys()))\n param_grid_list = [param_grids[k] for k in param_grid_names]\n\n all_params, best_params = assemble_results(output_root)\n\n if CHECK_ONLY:\n for i, params in enumerate(itertools.product(*param_grid_list[3:5])):\n params = {k: v for k, v in zip(param_grid_names[3:5], params)}\n print(i, params)\n Preprocessor(**params['preprocessor'], **params['model_preprocessor'])\n\n for i, params in enumerate(itertools.product(*param_grid_list[2:4])):\n params = {k: v for k, v in zip(param_grid_names[2:4], params)}\n print(i, params)\n build_fn(num_classes=3, **params['model'], **params['model_preprocessor'])\n return\n\n for i, params in enumerate(itertools.product(*param_grid_list)):\n mem = psutil.virtual_memory()\n percent_used = 1 - mem.available / mem.total\n print(f'{percent_used:.2%} memory used')\n if percent_used > 0.80:\n # exit failure\n print('Exiting (-1)')\n exit(-1)\n\n params = {k: v for k, v in zip(param_grid_names, params)}\n print(f'\\n{i + 1}/{num_models}: {params}\\n')\n\n if params in all_params:\n # skip this one because we already ran it.\n continue\n\n if best_params is not None:\n # print best performance so far\n print(f'best params: {best_params}')\n print(f'best val loss: {best_params[\"results\"][\"valid\"][\"loss\"]:.6f}')\n print(f'best val acc: {best_params[\"results\"][\"valid\"][\"accuracy\"]:.4%}')\n\n # create a new output directory with path to model file.\n date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d-%H.%M.%S.%f\")\n output_dir = os.path.join(output_root, date)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_file = os.path.join(output_dir, 'model.h5')\n\n # get the preprocessed training and validation data\n preprocess_time = time.time()\n classes, data_sets, set_names = get_xy(Preprocessor(**params['preprocessor'], **params['model_preprocessor']),\n target=target)\n ((x_train, y_train), (x_valid, y_valid)) = data_sets\n preprocess_time -= time.time()\n\n # build and compile model\n model = build_fn(num_classes=len(classes), **params['model'], **params['model_preprocessor'])\n\n # setup callbacks\n early_stopping = EarlyStopping(monitor='val_loss', verbose=1, **params['early_stopping'])\n model_checkpoint = ModelCheckpoint(\n filepath=model_file,\n save_weights_only=False, save_freq='epoch',\n save_best_only=True, monitor='val_loss', verbose=1)\n callbacks = [early_stopping, model_checkpoint]\n\n # Use sample weights to treat classes equally in loss and accuracy.\n sample_weight = get_sample_weight(y_train)\n sample_weight_valid = get_sample_weight(y_valid)\n\n # fit the model\n train_time = time.time()\n model.fit(x=x_train, y=y_train, sample_weight=sample_weight, verbose=1,\n validation_data=(x_valid, y_valid, sample_weight_valid), callbacks=callbacks, **params['fit'])\n train_time -= time.time()\n\n # load the best model (last one saved)\n model = load_model(model_file, compile=True)\n\n # compute results\n results = get_performance(model, data_sets, set_names)\n results['time'] = {'train': train_time, 'preprocess': preprocess_time}\n\n print(pd.DataFrame(data=results).T)\n params['results'] = results\n\n # save params and results\n with open(os.path.join(output_dir, 'params.json'), 'w') as fp:\n json.dump(params, fp)\n\n # save a copy of *this* Python file.\n shutil.copyfile(__file__, os.path.join(output_dir, 'roatan.py'))\n\n # for convenience, show the validation loss and accuracy in a file name in the same directory.\n result_file_name = f'{params[\"results\"][\"valid\"][\"loss\"]:.6f}_{params[\"results\"][\"valid\"][\"accuracy\"]:.4f}.out'\n with open(os.path.join(output_dir, result_file_name), 'w'):\n pass\n\n # check_model(output_dir)\n\n if best_params is None or (params['results']['valid']['loss'] < best_params['results']['valid']['loss']):\n best_params = params\n\n # assemble results from all runs into one CSV file in output root.\n assemble_results(output_root)", "def MPC(self, prediction_times, predicted_demand, current_supply, incoming_supply):\n # save the data for experimentation\n\n source_data = {'prediction_times': prediction_times,\n 'predicted_demand': predicted_demand,\n 'current_supply': current_supply,\n 'incoming_supply': incoming_supply}\n # # save as pickle\n # global c_counter\n # with open(f'data_{c_counter}.pickle', 'wb') as handle:\n # pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n # c_counter += 1\n\n print(\"Running the Gurobi code\")\n reb_cost = gb.tupledict()\n pic_cost = gb.tupledict()\n den_cost = gb.tupledict()\n # TODO Ideally these should be generate within the class definition, but how to pass the prediction times?\n for i in ZONE_IDS:\n for j in ZONE_IDS:\n for t in prediction_times:\n ds = my_dist_class.return_distance(i, j)\n reb_cost[(i, j, t)] = self.rebalancing_cost * my_dist_class.return_distance(i,\n j) # ds * FUEL_COST # # distance * fuel\n pic_cost[(i, j, t)] = self.pickup_revenue\n den_cost[(i, j, t)] = self.denied_cost\n\n if 'm' in globals():\n del m\n m = gb.Model(\"rebalancer\")\n rebal = m.addVars(self.ODs, prediction_times, name=\"rebalancing_flow\", vtype=GRB.INTEGER)\n pickup = m.addVars(self.ODs, prediction_times, name=\"pickup_flow\", vtype=GRB.INTEGER)\n denied = m.addVars(self.ODs, prediction_times, name=\"denied_flow\", vtype=GRB.INTEGER)\n # define the objective function\n obj = gb.LinExpr()\n obj.add(rebal.prod(reb_cost))\n obj.add(pickup.prod(pic_cost))\n obj.add(denied.prod(den_cost))\n\n m.addConstrs(\n (pickup[(i, j, t)] + denied[(i, j, t)] == predicted_demand[(i, j, t)]\n for i, j, t in rebal.keys()), \"conservation of pax\"\n )\n\n supply = gb.tupledict()\n for i in ZONE_IDS:\n for idx, t in enumerate(prediction_times):\n if idx == 0:\n supply[(i, t)] = current_supply[i] + incoming_supply[(i, t)]\n else:\n supply[(i, t)] = incoming_supply[(i, t)]\n # print(\"max slack value is \", np.max([v for k, v in supply.items()]))\n # print(\"min slack value is \", np.min([v for k, v in supply.items()]))\n # construct veh_to_be_available list\n pickup_to_be_avail = {}\n for t_end in prediction_times:\n for zone in ZONE_IDS:\n add_ct = False\n pickup_to_be_avail[(zone, t_end)] = 0 # initialize\n ct = gb.LinExpr()\n for origin, destination, pickup_time in pickup.keys():\n if (pickup_time + (\n my_travel_time_class.return_travel_time_15_min_bin(origin, destination)) == t_end) \\\n and (destination == zone):\n # bingo\n ct.add(pickup[(origin, destination, pickup_time)])\n add_ct = True\n # ct.add(rebal[(j, i, t)])\n for origin, destination, move_time in rebal.keys():\n if (move_time + (my_travel_time_class.return_travel_time_15_min_bin(origin, destination)) == t_end) \\\n and (destination == zone):\n # bingo\n ct.add(rebal[(origin, destination, move_time)])\n add_ct = True\n if add_ct:\n pickup_to_be_avail[(zone, t_end)] = ct\n\n m.addConstrs(\n (pickup.sum(i, '*', t) + rebal.sum(i, '*', t) - pickup_to_be_avail[(i, t)] == supply[(i, t)]\n for i in ZONE_IDS for t in prediction_times), \"conservation of incoming flows\")\n\n # self.logger.info(f\"total demand is {sum(predicted_demand.values())}\")\n # self.logger.info(f\"total supply is {np.sum(supply.values())}\")\n # self.logger.info(f\"current supply is {sum(current_supply.values())}\")\n # self.logger.info(f\"incoming supply is {sum(incoming_supply.values())}\")\n\n m.setParam('OutputFlag', 0) # Also dual_subproblem.params.outputflag = 0\n # print(obj.size())\n m.setObjective(obj, GRB.MAXIMIZE)\n m.update()\n # print(m)\n m.optimize()\n if m.status == 2:\n print(f\"obj value is {m.objVal}\")\n # self.logger.info(f\"obj value is {m.objVal}\")\n\n sol_p = m.getAttr(\"x\", pickup)\n sol_d = m.getAttr(\"x\", denied)\n sol_r = m.getAttr(\"x\", rebal)\n # print(\"total non-empty assignment solutions: \", len([v for k, v in sol_p.items() if v > 0]))\n # print(\"total non-empty denied solution: \", len([v for k, v in sol_d.items() if v > 0]))\n # print(\"total non-empty rebal solution: \", len([v for k, v in sol_r.items() if v > 0]))\n # print(\"total assignment revenue: \", np.sum([v * self.pickup_revenue for k, v in sol_p.items() if v > 0]))\n # print(\"total rebal loss: \", np.sum([v * self.rebalancing_cost for k, v in sol_r.items() if v > 0]))\n # print(\"total denied loss: \", np.sum([v * self.denied_cost for k, v in sol_d.items() if v > 0]))\n #\n # self.logger.info(f\"total assignment revenue: {np.sum([v * self.pickup_revenue for k, v in sol_p.items() if v > 0])}\")\n # self.logger.info(f\"total rebal loss: {np.sum([v * self.rebalancing_cost for k, v in sol_r.items() if v > 0])}\")\n # self.logger.info(f\"total denied loss: {np.sum([v * self.denied_cost for k, v in sol_d.items() if v > 0])}\")\n # self.logger.info(\"*\" * 10)\n return sol_p, sol_d, sol_r, m.objVal, source_data\n else:\n print(\"Gurobi's status is NOT 2, instead is \", m.status)\n return None, None, None, None, None", "def main():\n\n # initialize a random 3x3 TileGame problem\n tg = TileGame(3)\n # print(TileGame.board_to_pretty_string(tg.get_start_state()))\n # compute path using dfs\n path1 = id_astar(tg, tilegame_heuristic)\n path = ids(tg)\n print(tg.get_start_state())\n # display path\n print('ids')\n # TileGame.print_pretty_path(path)\n print('astar')\n TileGame.print_pretty_path(path1)\n print((time.time() - start_time))\n\n # initialize a small DGraph\n small_dgraph = DGraph([[None, 1], [1, None]], {1})\n # print the path using ids\n # print(ids(small_dgraph))", "def run_all(prob: str='', algo: str='', timelimit: int=3600):\n if prob:\n prob = prob.upper()\n if algo:\n algo = algo.lower()\n probs = (prob,) if prob in PROBS else PROBS\n algos = (algo,) if algo in ALGOS else ALGOS\n\n if not os.path.exists(DAT_PATH):\n raise Exception(\"{} directory does not exist.\".format(DAT_PATH))\n\n if not os.path.exists(RES_PATH):\n os.makedirs(RES_PATH)\n\n for prob in probs:\n path = DAT_PATH + prob\n inss = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n inss.sort()\n for algo in algos:\n wfile = \"{}rst_{}_{}.csv\".format(RES_PATH, prob, algo)\n with open (wfile, 'w') as f:\n f.write(\"instance,objective value,time to find best soltuion\")\n if algo == 'bc':\n f.write(\",lower bound\")\n if algo[:3] == 'ids':\n f.write(\",number of total iterations,number of iterations to find best soltuion\")\n f.write(\"\\n\")\n for ins in inss:\n mod = Model(problem=prob, filename=os.path.join(path, ins))\n mod.solve(algorithm=algo, timelimit=timelimit)\n with open (wfile, 'a') as f:\n f.write(\"{},{},{:.2f}\".format(ins,mod.objval, mod.ttb))\n if algo == 'bc':\n f.write(\",{}\".format(mod.lb))\n if algo[:3] == 'ids':\n f.write(\",{},{}\".format(mod.num_ite, mod.bes_ite))\n f.write(\"\\n\")", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass", "def _prefit(self,costfunc,sample):\n # Here we obtain 10 initial candidates to best fitting\n #opt=100.\n if self.long_fitting[sample]:\n l1=np.linspace(-20,0,16)\n l2=np.linspace(-10,-0.3,16)\n l3=np.linspace(-0.5,0.5,16)\n l4=np.linspace(-0.015,0.015,16)\n else:\n l1=np.linspace(-20,0,5)\n l2=np.linspace(-10,-0.3,5)\n l3=np.linspace(-0.5,0.5,5)\n l4=np.linspace(-0.015,0.015,5)\n l1,l2,l3,l4=np.meshgrid(l1,l2,l3,l4)\n l1,l2,l3,l4=l1.ravel(),l2.ravel(),l3.ravel(),l4.ravel()\n #self._message(\"(%s) Prefitting data (grid search) - %d tests.\"%(sample,l1.size))\n costs=np.ones_like(l1)\n #optpar=np.array((l1[8],l2[8],l3[8],l4[8]))\n for j in range(l1.size):\n ll=np.array((l1[j],l2[j],l3[j],l4[j]))\n costs[j]=costfunc(ll)\n idx=np.argsort(costs)\n opt=costs[idx]\n optpars=np.column_stack((l1[idx],l2[idx],l3[idx],l4[idx]))\n #self._message(str(optpars.shape))\n #self._message(\"(%s) Prefitting done. Error achieved after prefitting: %s.\"%(sample,str(opt)))\n #self._message(\"(%s) Prefitting done. Error achieved after prefitting: %g.\"%(sample,opt[0]))\n if opt[0]>=1:\n self._message(\"Warning: (%s) Prefitting did not find a good initial condition (%g).\"%(sample,opt[0]))\n return optpars,opt", "def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results", "def search_for_plans(start, exits, pig_neighbours, moves, state, actions):\n goals = exits + pig_neighbours\n paths, _ = GamePlanner.astar_multi_search(start=start,\n goals=goals,\n state=state,\n actions=actions)\n plans = GamePlanner.paths_to_plans(paths=paths,\n exits=exits,\n pig_neighbours=pig_neighbours,\n moves=moves)\n return plans", "def run():\n if am_i_root():\n\n print(\"*** initializing...\")\n\n # Print parameters\n print(\"N_DIMS = \" + str(N_DIMS))\n print(\"LAMBDA_OVER_DX = \" + str(LAMBDA_OVER_DX))\n print(\"R_DT = \" + str(R_DT))\n print(\"MU0_POISSON = \" + str(MU0_POISSON))\n print(\"NORM_POISSON = \" + NORM_POISSON)\n print(\"N_GRID = \" + str(N_GRID))\n print(\"N_HITS = \" + str(N_HITS))\n print(\"POLICY = \" + str(POLICY))\n if POLICY == -1:\n print(\"MODEL_PATH = \" + str(MODEL_PATH))\n else:\n print(\"STEPS_AHEAD = \" + str(STEPS_AHEAD))\n print(\"EPSILON = \" + str(EPSILON))\n print(\"STOP_t = \" + str(STOP_t))\n print(\"STOP_p = \" + str(STOP_p))\n print(\"N_PARALLEL = \" + str(N_PARALLEL))\n print(\"WITH_MPI = \" + str(WITH_MPI))\n print(\"ADAPTIVE_N_RUNS = \" + str(ADAPTIVE_N_RUNS))\n print(\"REL_TOL = \" + str(REL_TOL))\n print(\"MAX_N_RUNS = \" + str(MAX_N_RUNS))\n print(\"N_RUNS(input) = \" + str(N_RUNS))\n sys.stdout.flush()\n\n # Perform runs\n if am_i_root():\n print(\"*** generating episodes...\")\n\n N_runs = N_RUNS\n if ADAPTIVE_N_RUNS or WITH_MPI:\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n N_runso = 0\n\n if WITH_MPI:\n cdf_t_tot_loc = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot_loc = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_loc = np.nan * np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n failed_loc = - np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n else:\n cdf_t_tot = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_episodes = np.nan * np.ones(MAX_N_RUNS, dtype=float)\n failed_episodes = - np.ones(MAX_N_RUNS, dtype=float)\n\n while True:\n if WITH_MPI: # MPI\n if N_runs % N_PARALLEL != 0:\n raise Exception(\"N_runs must be multiple of N_PARALLEL with MPI\")\n COMM.Barrier()\n # Decomposition\n Nepisodes = N_runs // N_PARALLEL\n episode_list = range(N_runso + ME, N_runs, N_PARALLEL)\n # Run episodes and reduce locally\n ind = N_runso // N_PARALLEL\n for episode in episode_list:\n cdf_t, cdf_h, mean_t_loc[ind], failed_loc[ind] = Worker(episode)\n cdf_t_tot_loc += cdf_t\n cdf_h_tot_loc += cdf_h\n ind += 1\n\n # Reduce globally the mean_t and failed\n mean_t_episodes = np.empty([N_runs], dtype=float)\n failed_episodes = np.empty([N_runs], dtype=float)\n COMM.Barrier()\n COMM.Allgather([mean_t_loc[:ind], Nepisodes, MPI.DOUBLE], [mean_t_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Allgather([failed_loc[:ind], Nepisodes, MPI.DOUBLE], [failed_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Barrier()\n elif N_PARALLEL > 1: # multiprocessing\n # Run episodes in parallel\n pool = multiprocessing.Pool(N_PARALLEL)\n result = pool.map(Worker, range(N_runso, N_runs))\n pool.close()\n pool.join()\n # Reduce\n ind = N_runso\n for cdf_t, cdf_h, mean_t, failed in result:\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n elif N_PARALLEL == 1: # sequential\n ind = N_runso\n for episode in range(N_runso, N_runs):\n cdf_t, cdf_h, mean_t, failed = Worker(episode)\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n else:\n raise Exception(\"Problem with N_PARALLEL: must be an int >= 1\")\n\n # estimate of the error\n mean_ep = np.mean(mean_t_episodes[:N_runs])\n sigma_ep = np.std(mean_t_episodes[:N_runs])\n std_error_mean = sigma_ep / np.sqrt(N_runs)\n rel_std_error_mean = std_error_mean / mean_ep\n\n # break clause\n if not ADAPTIVE_N_RUNS:\n break\n else:\n if rel_std_error_mean < REL_TOL:\n break\n elif N_runs >= MAX_N_RUNS:\n break\n else:\n N_runso = N_runs\n N_runs = int(np.ceil(1.05 * (sigma_ep / mean_ep / REL_TOL) ** 2))\n N_runs = min(N_runs, MAX_N_RUNS)\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n if am_i_root():\n print(\"N_RUNS(performed) = \" + str(N_runs))\n sys.stdout.flush()\n\n # Reduce\n if am_i_root():\n print(\"*** post-processing...\")\n if WITH_MPI:\n # locally\n cdf_t_tot_loc /= N_runs\n cdf_h_tot_loc /= N_runs\n # Reduce globally\n cdf_t_tot = np.empty([LEN_CDF_T], dtype=float)\n cdf_h_tot = np.empty([LEN_CDF_H], dtype=float)\n COMM.Barrier()\n COMM.Allreduce(cdf_t_tot_loc, cdf_t_tot, op=MPI.SUM)\n COMM.Allreduce(cdf_h_tot_loc, cdf_h_tot, op=MPI.SUM)\n COMM.Barrier()\n else:\n cdf_t_tot /= N_runs\n cdf_h_tot /= N_runs\n mean_t_episodes = mean_t_episodes[:N_runs]\n failed_episodes = failed_episodes[:N_runs]\n\n # Further post-processing, save and plot\n if am_i_root():\n\n # from cdf to pdf\n pdf_t_tot = cdf_to_pdf(cdf_t_tot)\n pdf_h_tot = cdf_to_pdf(cdf_h_tot)\n\n # compute stats of number of steps and number of hits\n t_bins = np.arange(BIN_START_T, BIN_END_T, BIN_SIZE_T) + 0.5 * BIN_SIZE_T\n mean_t, sigma_t, skew_t, kurt_t, p_found = stats_from_pdf(t_bins, pdf_t_tot)\n p25_t, p50_t, p75_t, p90_t, p95_t, p99_t, _ = stats_from_cdf(t_bins, cdf_t_tot)\n\n h_bins = np.arange(BIN_START_H, BIN_END_H, BIN_SIZE_H) + 0.5 * BIN_SIZE_H\n mean_h, sigma_h, skew_h, kurt_h, _ = stats_from_pdf(h_bins, pdf_h_tot)\n p25_h, p50_h, p75_h, p90_h, p95_h, p99_h, _ = stats_from_cdf(h_bins, cdf_h_tot)\n\n print(\"probability that the source is never found : %.10f\" % (1.0 - p_found, ))\n print(\"mean number of steps to find the source : %.3f +/- %.3f\" % (mean_t, 1.96 * std_error_mean))\n print(\"number of steps to find the source with 50 %% probability: %.3f\" % p50_t)\n print(\"number of steps to find the source with 99 %% probability: %.3f\" % p99_t)\n nb_failed = np.sum(failed_episodes)\n if np.any(failed_episodes < 0):\n nb_failed = -1\n print(\"problem while recording failures\")\n else:\n print(\"number of failed episodes : %d / %d (%f %%)\"\n % (nb_failed, N_runs, nb_failed / N_runs * 100))\n sys.stdout.flush()\n\n # save all parameters to txt file\n inputs = {\n \"N_DIMS\": N_DIMS,\n \"LAMBDA_OVER_DX\": LAMBDA_OVER_DX,\n \"R_DT\": R_DT,\n \"MU0_POISSON\": MU0_POISSON,\n \"NORM_POISSON\": NORM_POISSON,\n \"N_GRID\": N_GRID,\n \"N_HITS\": N_HITS,\n \"POLICY\": POLICY,\n \"STEPS_AHEAD\": STEPS_AHEAD,\n \"MODEL_PATH\": MODEL_PATH,\n \"STOP_t\": STOP_t,\n \"STOP_p\": STOP_p,\n \"ADAPTIVE_N_RUNS\": ADAPTIVE_N_RUNS,\n \"REL_TOL\": REL_TOL,\n \"MAX_N_RUNS\": MAX_N_RUNS,\n \"N_RUNS_PERFORMED\": N_runs,\n \"BIN_START_T\": BIN_START_T,\n \"BIN_END_T\": BIN_END_T,\n \"BIN_SIZE_T\": BIN_SIZE_T,\n \"BIN_START_H\": BIN_START_H,\n \"BIN_END_H\": BIN_END_H,\n \"BIN_SIZE_H\": BIN_SIZE_H,\n \"EPSILON\": EPSILON,\n }\n param_txt_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_parameters\" + \".txt\"))\n with open(param_txt_file, 'w') as out:\n for key, val in inputs.items():\n print(key + \" = \" + str(val), file=out)\n\n # save stats\n stats_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_statistics\" + \".txt\"))\n with open(stats_file, \"w\") as sfile:\n sfile.write(\"p_not_found\\t%+.4e\\n\" % (1 - p_found,))\n for varname in \\\n ('mean_t', 'sigma_t', 'skew_t', 'kurt_t', 'p25_t', 'p50_t', 'p75_t', 'p90_t', 'p95_t', 'p99_t'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n for varname in \\\n ('mean_h', 'sigma_h', 'skew_h', 'kurt_h', 'p25_h', 'p50_h', 'p75_h', 'p90_h', 'p95_h', 'p99_h'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n\n # save CDF of number of steps\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nsteps\" + \".npy\"))\n np.save(table_file, np.vstack((t_bins, cdf_t_tot)))\n\n # save CDF of number of hits\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nhits\" + \".npy\"))\n np.save(table_file, np.vstack((h_bins, cdf_h_tot)))\n\n # create and save figures\n if POLICY == -1:\n specifics = \"MODEL = \" + os.path.basename(MODEL_PATH)\n else:\n specifics = \"STEPS_AHEAD = \" + str(STEPS_AHEAD)\n subtitle = (\n \"N_DIMS = \"\n + str(N_DIMS)\n + \", \"\n + \"LAMBDA_OVER_DX = \"\n + str(LAMBDA_OVER_DX)\n + \", \"\n + \"R_DT = \"\n + str(R_DT)\n + \", \"\n + \"POLICY = \"\n + str(POLICY)\n + \", \"\n + specifics\n + \", \"\n + \"N_GRID = \"\n + str(N_GRID)\n + \", \"\n + \"N_HITS = \"\n + str(N_HITS)\n + \", \"\n + \"N_RUNS = \"\n + str(N_runs)\n + \"\\n\"\n )\n\n # plot PDF(nsteps), CDF(nsteps), PDF(nhits), CDF(nhits)\n fig, ax = plt.subplots(2, 2, figsize=(12, 10))\n plt.subplots_adjust(left=0.08, bottom=0.06, right=0.96, top=0.92, hspace=0.35, wspace=0.30)\n kwargs = {'xycoords': 'axes fraction', 'fontsize': 8, 'ha': \"right\"}\n for row, varname in enumerate([\"number of steps\", \"number of hits\"]):\n if varname == \"number of steps\":\n bins = t_bins\n cdf_tot = cdf_t_tot\n pdf_tot = pdf_t_tot\n mean = mean_t\n sigma = sigma_t\n skew = skew_t\n kurt = kurt_t\n p50 = p50_t\n p75 = p75_t\n p90 = p90_t\n p99 = p99_t\n filesuffix = 'nsteps'\n color = \"tab:blue\"\n else:\n bins = h_bins\n cdf_tot = cdf_h_tot\n pdf_tot = pdf_h_tot\n mean = mean_h\n sigma = sigma_h\n skew = skew_h\n kurt = kurt_h\n p50 = p50_h\n p75 = p75_h\n p90 = p90_h\n p99 = p99_h\n filesuffix = 'nhits'\n color = \"tab:orange\"\n max_x = bins[np.nonzero(pdf_tot)[0][-1]]\n for col, fct in enumerate([\"PDF\", \"CDF\"]):\n if fct == \"PDF\":\n ydata = pdf_tot\n ylim = (0.0, 1.02 * np.max(pdf_tot))\n elif fct == \"CDF\":\n ydata = cdf_tot\n ylim = (0.0, 1.0)\n\n ax[row, col].plot(bins, ydata, \"-o\", color=color, markersize=2, linewidth=1)\n ax[row, col].set_title(fct + \" of \" + varname)\n ax[row, col].set_xlabel(varname + \" to find the source\")\n ax[row, col].set_xlim((0, max_x))\n ax[row, col].set_ylim(ylim)\n\n if fct == \"PDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"mean = \" + \"{:.3e}\".format(mean), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"std = \" + \"{:.3e}\".format(sigma), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"skew = \" + \"{:.3e}\".format(skew), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"ex. kurt = \" + \"{:.3e}\".format(kurt), xy=(0.98, 0.44), **kwargs)\n elif fct == \"CDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"P50 = \" + \"{:.3e}\".format(p50), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"P75 = \" + \"{:.3e}\".format(p75), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"P90 = \" + \"{:.3e}\".format(p90), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"P99 = \" + \"{:.3e}\".format(p99), xy=(0.98, 0.44), **kwargs)\n plt.grid(False)\n plt.figtext(0.5, 0.985, subtitle, fontsize=7, ha=\"center\", va=\"top\")\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_distributions.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # plot mean nb steps vs number of episodes\n number_episodes = range(1, N_runs + 1)\n cum_mean_t_episodes = np.cumsum(mean_t_episodes) / number_episodes\n if N_runs >= 100:\n number_episodes = number_episodes[20:]\n cum_mean_t_episodes = cum_mean_t_episodes[20:]\n fig, ax = plt.subplots()\n ax.plot(number_episodes, cum_mean_t_episodes, color=\"r\")\n ax.set_title(\"Convergence of the mean number of steps\")\n ax.set_xlabel(\"number of episodes\")\n ax.set_ylabel(\"mean number of steps\")\n plt.figtext(0.5, 0.985, subtitle, fontsize=5, ha=\"center\", va=\"top\")\n plt.grid(False)\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_convergence.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # save monitoring information (concatenate episodes files)\n monitoring_episodes_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_episodes.txt\"))\n filenames = [os.path.join(DIR_TMP, str(\"monitoring_episode_\" + str(episode) + \".txt\")) for episode in range(N_runs)]\n with open(monitoring_episodes_file, \"w\") as mfile:\n mfile.write(\"# episode\\thit_init\\tstop_flag\\tboundary_flag\\t\"\n \"p_not_found\\t\\tmean_nsteps\\t\\ttime_elapsed(sec)\\n\")\n for fname in filenames:\n if os.path.isfile(fname):\n with open(fname) as infile:\n mfile.write(infile.read())\n os.remove(fname)\n else:\n print(\"Unexpected: Missing episode file: \" + str(fname))\n\n # clean up tmp dirs\n if len(os.listdir(DIR_TMP)) != 0:\n print(\"Unexpected: The directory '\" + DIR_TMP\n + \"' is not removed, because it should be empty but is not.\")\n else:\n os.rmdir(DIR_TMP)\n if len(os.listdir(PARENT_DIR_TMP)) == 0:\n os.rmdir(PARENT_DIR_TMP)\n\n # summary\n monitoring_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_summary\" + \".txt\"))\n with open(monitoring_file, \"w\") as mfile:\n mfile.write(\"*** initial hit ***\\n\")\n first_hit = np.loadtxt(monitoring_episodes_file, usecols=1, dtype='int')\n hit_max = np.max(first_hit)\n hit_hist, _ = np.histogram(first_hit, bins=np.arange(0.5, hit_max + 1.5), density=True)\n for h in range(1, hit_max + 1):\n mfile.write(\"hit=%1d: %6.2f %% \\n\" % (h, hit_hist[h - 1] * 100))\n\n mfile.write(\"\\n*** stats convergence ***\\n\")\n mfile.write(\"number of episodes simulated : %d\\n\" % N_runs)\n mfile.write(\"standard error of the mean (estimate): %.4e = %5.2f %%\\n\"\n % (std_error_mean, rel_std_error_mean * 100))\n\n stopping_reason = np.loadtxt(monitoring_episodes_file, usecols=2, dtype='int')\n stop_max = np.max(stopping_reason)\n stopping_hist, _ = np.histogram(stopping_reason, bins=np.arange(0.5, stop_max + 1.5), density=True)\n mfile.write(\"\\n*** reason for stopping (1 is success, anything else is failure) ***\\n\")\n for stop in range(1, stop_max + 1):\n mfile.write(\"stop=%1d: %6.2f %% \\n\" % (stop, stopping_hist[stop - 1] * 100))\n\n mfile.write(\"\\n*** probability that the source is not found at the end of the episodes ***\\n\")\n p_not_found = np.loadtxt(monitoring_episodes_file, usecols=4)\n p_gtr_stop = p_not_found[p_not_found > STOP_p]\n p_not_found_max = np.max(p_not_found)\n mfile.write(\"criteria (STOP_p): %.5e\\n\" % STOP_p)\n mfile.write(\"max(p) : %.5e\\n\" % p_not_found_max)\n mfile.write(\"number of episodes where p > STOP_p: %7d (%8.4f %%)\\n\"\n % (len(p_gtr_stop), len(p_gtr_stop) / N_runs * 100))\n\n near_boundaries = np.loadtxt(monitoring_episodes_file, usecols=3, dtype='int')\n near_boundaries = np.count_nonzero(near_boundaries)\n mfile.write(\"\\n*** agent near boundaries ***\\n\")\n mfile.write(\"number of episodes where it happened: %7d (%8.4f %%)\\n\"\n % (near_boundaries, near_boundaries / N_runs * 100))\n\n episode_elapsed = np.loadtxt(monitoring_episodes_file, usecols=5)\n mfile.write(\"\\n*** computational cost per episode ***\\n\")\n mfile.write(\"avg elapsed seconds per episode: %.5e\\n\" % (np.mean(episode_elapsed)))\n mfile.write(\"max elapsed seconds per episode: %.5e\\n\" % (np.max(episode_elapsed)))\n\n elapsed_time_0 = (time.monotonic() - start_time_0) / 3600.0\n mfile.write(\"\\n*** computational cost ***\\n\")\n mfile.write(\"N_PARALLEL = %d\\n\" % N_PARALLEL)\n mfile.write(\"total elapsed hours : %.5e\\n\" % elapsed_time_0)\n mfile.write(\"cost in hours = total elapsed time * N_PARALLEL: %.5e\\n\" % (elapsed_time_0 * N_PARALLEL))\n\n print(\">>> Results saved in the directory: \" + DIR_OUTPUTS)\n\n sys.stdout.flush()", "def print_solution(data, manager, routing, assignment):\n\n html_name = './app/templates/res.html'\n depot_latlon = str2ll(depot)\n gmap = gmplot.GoogleMapPlotter(depot_latlon[0], depot_latlon[1], 15, api)\n\n total_distance = 0\n total_load = 0\n\n routes = []\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n\n route = []\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n\n route.append(node_index)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n route.append(0)\n\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\n route_load)\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n print(plan_output)\n\n total_distance += route_distance\n total_load += route_load\n routes.append([route, route_distance, route_load])\n \n # coordinates = []\n # for i in range(len(route)-1):\n # src = data['addresses'][route[i]]\n # dst = data['addresses'][route[i+1]]\n \n # latlon = str2ll(src)\n # if route[i] == 0:\n # color = 'pink'\n # else:\n # color = colors[vehicle_id%len(colors)]\n\n # gmap.marker(latlon[0], latlon[1], color)\n \n # now = datetime.now()\n # directions_result = gmaps.directions(src,\n # dst,\n # mode=\"driving\",\n # departure_time=now)[0]\n \n # polyline = directions_result['overview_polyline']['points']\n # coordinates = np.asarray(decode_polyline(polyline))\n # gmap.plot(coordinates[:, 0], coordinates[:, 1], colors[vehicle_id%len(colors)], edge_width=2)\n \n\n \n # if gmap is not None:\n # #save plot as html\n # gmap.draw(html_name)\n\n print('Total distance of all routes: {}m'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))\n\n return routes\n\n #draw html for this", "def run_genetic_algorithm(generations=5000, population_size=100):\n\n #todo: copy adj_matrix to device, it does not get modified.\n d_adj_matrix = cuda.to_device(adj_matrix)\n\n best = 0\n\n #runtime accumulators\n fitness_time = 0\n mutate_time = 0\n r_timer = runtimeTimer()\n total_timer = runtimeTimer()\n total_timer.start()\n\n population_subset_size = int(population_size / 10.)\n generations_10pct = int(generations / 10.)\n\n # Create a random population of `population_size` number of solutions.\n population = generate_random_population(population_size)\n\n # For `generations` number of repetitions...\n for generation in range(generations):\n\n r_timer.start()\n # Compute the fitness of the entire current population\n population_fitness = {}\n\n\n #print population\n #convert python type to np array\n np_population = np.zeros(shape=[len(population), len(all_waypoints)], dtype=int)\n #print np_population\n idx = 0\n for agent_genome in population:\n np_population[idx] = agent_genome\n idx +=1\n\n # print np_population\n # print np_population[0][0]\n\n griddim = len(population)\n blockdim = 1 #start with only one thread per block\n\n output = np.zeros(shape=[len(population), 1])\n # #TODO: copy data to device from host.\n d_input = cuda.to_device(np_population)\n d_output = cuda.to_device(output)\n\n compute_adj_matrix_fitness_CUDA[griddim, blockdim](d_input, d_output, d_adj_matrix, len(population), len(all_waypoints))\n d_output.to_host()\n\n #todo: convert this back to python types.\n print output[10]\n\n # idx = 0\n # for agent_genome in population:\n # population_fitness[agent_genome] = output[idx][0]\n # idx+=1\n\n fitness_time += r_timer.stop()\n\n\n r_timer.start()\n\n # Take the top 10% shortest road trips and produce offspring each from them\n new_population = []\n for rank, agent_genome in enumerate(sorted(population_fitness,\n key=population_fitness.get)[:population_subset_size]):\n\n if (generation % generations_10pct == 0 or generation == generations - 1) and rank == 0:\n print(\"Generation %d best: %d | Unique genomes: %d\" % (generation,\n population_fitness[agent_genome],\n len(population_fitness)))\n best = population_fitness[agent_genome]\n print(agent_genome)\n print(\"\")\n\n # Create 1 exact copy of each of the top road trips\n new_population.append(agent_genome)\n\n # Create 2 offspring with 1-3 point mutations\n for offspring in range(2):\n new_population.append(mutate_agent(agent_genome, 3))\n\n # Create 7 offspring with a single shuffle mutation\n for offspring in range(7):\n new_population.append(shuffle_mutation(agent_genome))\n\n # Replace the old population with the new population of offspring\n for i in range(len(population))[::-1]:\n del population[i]\n\n mutate_time += r_timer.stop()\n\n population = new_population\n\n # out_file = open(\"runtime_data-np_5000gen_50inputs.txt\", 'a')\n #\n # total_time = total_timer.stop()\n # #out_file.write(\"\\n\\ngenetic algorithm was run on CPU %s\\n\" % cpuinfo.get_cpu_info()['brand'])\n # out_file.write(\"\\n\\ngenetic algorithm was run on CPU \\n\")\n # out_file.write(\"%i generations, %i population_size and %i inputs\\n\" % (generations, population_size, len(all_waypoints)))\n # out_file.write( \"total runtime was %f seconds\\n\" % total_time)\n # out_file.write( \"\\t total fitness time was %0.2f \\n\" % (fitness_time*1000))\n # out_file.write( \"\\t total mutation time was %0.2f milliseconds\\n\" % (mutate_time*1000))\n # out_file.write( \"\\t average fitness time was %0.2f milliseconds\\n\" % ((fitness_time / generations)*1000))\n # out_file.write( \"\\t average mutate time was %0.2f milliseconds\\n\" % ((mutate_time / generations)*1000))\n # out_file.write( \"\\t %0.3f percent of the total runtime was fitness\\n\" % ((fitness_time / total_time) * 100))\n # out_file.write( \"\\t %0.3f percent of the total runtime was mutations\\n\" % ((mutate_time / total_time) * 100))\n # out_file.write(\" best solution was fitness %d\" % best )", "def get_pathologic_covering_routes(n_pl, n_target, attacker_strategy, target_values):\n # computes the coefficient used by the greedy oracle to choose routes\n targets_coeff = np.transpose(np.multiply(attacker_strategy, target_values))\n\n # randomly selects the player for which the non optimal choice will be made\n wrong_pl = randint(1, n_pl)\n\n # generate the non optimal route randomly\n n_covered_targets = randint(n_pl,n_target-1)\n non_opt_action = np.zeros(n_target)\n for i in range(0, n_covered_targets):\n random_covered_target = randint(0, n_target-1)\n non_opt_action[random_covered_target] = 1\n\n # compute the value of the non optimal route\n non_opt_val = get_value_single_route(non_opt_action, targets_coeff)\n\n # generate routes that have, as a single, values smaller than the best greedy route but taken togher perform\n # at least as well. [[0,1,...],[...],...] a[r][t]=1 iff t is covered by r.\n # The returned list should have n_pl - 1 routes\n opt_routes = get_opt_routes(n_pl, non_opt_action)\n\n I={}\n for pl in range(1, n_pl+1):\n\n n_r = randint(0, MAX_ROUTES)\n temp = lil_matrix((n_r+1, n_target), dtype='int8')\n\n if pl == wrong_pl:\n # put the non opt route in the bucket\n for t in non_opt_action.nonzero():\n temp[0,t] = 1\n else:\n for t in opt_routes.pop().nonzero():\n temp[0,t] = 1\n\n # generate other random routes with single value less than the non_opt_value\n for r in range(1, n_r):\n new_route = get_r_limited_val(non_opt_val, targets_coeff)\n\n for t in new_route.nonzero():\n temp[r,t] = 1\n\n I[pl] = temp.tocsr()\n\n return I", "def ppi_network_comparative_analysis(options):\n\n # Start marker for time measure\n start = time.time()\n\n\n #----------------------#\n # DEFINE THE PATHS #\n #----------------------#\n\n # Get the program path\n main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n pickles_dir = os.path.join(main_path, 'NetworkAnalysis/pickles')\n main_dir = os.path.join(options.workspace, 'main_networks') # Path to store the main networks\n create_directory(main_dir)\n tissue_spec_dir = os.path.join(options.workspace, 'tissue_specific_networks') # Path to store the tissue specific networks\n create_directory(tissue_spec_dir)\n\n # Define the results variable\n results_table = {}\n output_table_file = os.path.join(options.workspace, 'tissue_specific_results.tsv')\n\n\n\n ###################################\n ## CREATION OF THE MAIN NETWORKS ##\n ###################################\n\n # Define the main network\n network_file = options.network_file\n type_id = 'biana'\n network_format = options.network_format\n main_network = NA.Network(network_file, type_id, network_format)\n\n # Analyze the main network\n print('BIANA main network')\n print('Number of edges: {}'.format(len(main_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(main_network.get_nodes())))\n methodid2interactions = main_network.get_methodid2interactions() # Get the methods in the network and the number of interactions per method\n main_network.get_numpmids2interactions() # Get the number of interactions in which we have a given number of pubmed IDs\n main_network.get_database2interactions() # Get the database and the number of interactions\n\n\n # Create a network filtered by methods\n psimi2score_file = os.path.join(pickles_dir, 'psimi2score.pcl')\n psimi2score = cPickle.load(open(psimi2score_file))\n method_ids_excluded = []\n method_ids_included = []\n\n # We exclude a method if it is not in the HIPPIE scoring system or if it is in \n # the HIPPIE scoring system but with a score lower than 3\n for psimi in methodid2interactions:\n if psimi in psimi2score.keys():\n if psimi2score[psimi] <=3:\n method_ids_excluded.append(psimi)\n elif psimi2score[psimi] > 3:\n method_ids_included.append(psimi)\n else:\n method_ids_excluded.append(psimi)\n\n method_ex_network_file = os.path.join(main_dir, 'human_edges.eAFF.biana.nov17.method_ex.txt')\n\n # If using names to exclude, there may be errors because the excluded affinity methods are there in names (but not in ids)\n method_ex_network = main_network.filter_network_by_method(methods_excluded=None, method_ids_excluded=method_ids_excluded, methods_included=None, method_ids_included=None, output_network_file=method_ex_network_file)\n\n # Analyze the methods network\n print('Method-exclusive-filtered main network')\n print('Excluded methods: {}'.format(method_ids_excluded))\n print('Number of edges: {}'.format(len(method_ex_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(method_ex_network.get_nodes())))\n\n # method_inc_network_file = os.path.join(main_dir, 'human_edges.eAFF.biana.nov17.method_inc.txt')\n\n # # If using names to exclude, there may be errors because the excluded affinity methods are there in names (but not in ids)\n # method_inc_network = main_network.filter_network_by_method(methods_excluded=None, method_ids_excluded=None, methods_included=None, method_ids_included=method_ids_included, output_network_file=method_inc_network_file)\n\n # # Analyze the methods network\n # print('Method-inclusive-filtered main network')\n # print('Included methods: {}'.format(method_ids_included))\n # print('Number of edges: {}'.format(len(method_inc_network.get_edges())))\n # print('Number of nodes: {}\\n'.format(len(method_inc_network.get_nodes())))\n\n\n # Create a network filtered by number of pubmed IDs\n pmid_network_file = os.path.join(main_dir, 'human_edges.eAFF.biana.nov17.pmid.txt')\n min_num_pubmeds = 2\n pmid_network = main_network.filter_network_by_number_pubmeds(min_num_pubmeds, output_network_file=pmid_network_file)\n\n # Analyze the pubmed network\n print('Pubmed-filtered main network')\n print('Number of edges: {}'.format(len(pmid_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(pmid_network.get_nodes())))\n\n\n # Create a network filtered by database\n databases = {\n 'intact' : 'intact [release 2017_04 of 05-apr-2017]',\n 'biogrid': 'biogrid [release 3.4.147 (31-mar-2017)]',\n 'irefindex' : 'irefindex [14.0 (last edited in 2016-07-23)]',\n 'hippie': 'hippie [v2.0 (06/24/2016)]',\n 'hprd' : 'hprd [release 2010_04 of 13-apr-2010]',\n 'dip': 'dip [release 2017_02 of 05-feb-2017]',\n 'gpcr' : 'gpcr [1-apr-2017]'\n }\n intgrid_network_file = os.path.join(main_dir, 'human_edges.eAFF.biana.nov17.intgrid.txt')\n databases_included = [databases['intact'], databases['biogrid']]\n intgrid_network = main_network.filter_network_by_database(databases_included, intgrid_network_file)\n\n # Analyze the pubmed network\n\n print('Database-filtered main network')\n print('Number of edges: {}'.format(len(intgrid_network.get_edges())))\n print('Number of nodes: {}\\n'.format(len(intgrid_network.get_nodes())))\n\n\n # We still have to implement the tissue-specificity to filter by microarray or RNAseq in the case of Human Protein Atlas data!!!\n\n ##############################################\n ## CREATION OF THE TISSUE-SPECIFIC NETWORKS ##\n ##############################################\n\n # Define the tissue liver\n tissue_terms_hpa = 'liver'\n tissue_terms_jensen = 'liver'\n liver = NA.Tissue(tissue_terms_hpa, tissue_terms_jensen, jensen_conf=3, hpa_level='low', hpa_rel='approved', pickles_path=pickles_dir)\n #print(liver.tissue_terms_hpa)\n #print(liver.tissue_terms_jensen)\n #print(liver.all_tissue_user_entities)\n #print(liver.all_tissue_proteins)\n\n # Define the tissue kidney\n tissue_terms_hpa = 'kidney'\n tissue_terms_jensen = 'kidney'\n kidney = NA.Tissue(tissue_terms_hpa, tissue_terms_jensen, jensen_conf=3, hpa_level='low', hpa_rel='approved', pickles_path=pickles_dir)\n\n # Define the tissue brain\n tissue_terms_hpa = ['cerebral cortex','cerebellum']\n tissue_terms_jensen = 'brain'\n brain = NA.Tissue(tissue_terms_hpa, tissue_terms_jensen, jensen_conf=3, hpa_level='low', hpa_rel='approved', pickles_path=pickles_dir)\n\n # Define the tissue heart\n tissue_terms_hpa = 'heart muscle'\n tissue_terms_jensen = 'heart'\n heart = NA.Tissue(tissue_terms_hpa, tissue_terms_jensen, jensen_conf=3, hpa_level='low', hpa_rel='approved', pickles_path=pickles_dir)\n\n # Define the tissue heart\n tissue_terms_hpa = 'pancreas'\n tissue_terms_jensen = 'pancreas'\n pancreas = NA.Tissue(tissue_terms_hpa, tissue_terms_jensen, jensen_conf=3, hpa_level='low', hpa_rel='approved', pickles_path=pickles_dir)\n\n\n # Define the housekeeping genes object\n translation_id = 'geneid'\n hk_genes = TS.HouseKeepingGenes(translation_id, pickles_dir)\n all_hk_genes = hk_genes.all_hk_genes\n hpa_hk_genes = hk_genes.hpa_hk_genes\n eisenberg_hk_genes = hk_genes.eisenberg_hk_genes\n\n # Define the Wang liver network\n wang_liver_file = os.path.join(pickles_dir, 'wang_liver_network.pcl')\n wang_liver_network = cPickle.load(open(wang_liver_file))\n wang_edges_file = os.path.join(main_dir, 'wang_liver_edges.geneid.txt')\n NA.write_network_file_from_networkx_graph(wang_liver_network, wang_edges_file, 'raw')\n wang_network = NA.Network(wang_edges_file, 'geneid', 'raw')\n\n\n for network, abbr in [ (main_network, 'complete'), (method_ex_network, 'method_ex'), (pmid_network, 'pmid'), (intgrid_network, 'intgrid') ]:\n\n results_table.setdefault(abbr, [])\n\n #### Translate the main network ####\n translation_file = options.translation_file\n translation_id = 'geneid'\n translated_network = os.path.join(main_dir, 'human_edges.eAFF.geneid.nov17.{}.txt'.format(abbr))\n geneid_net = network.translate_network(translation_file, translation_id, network_format, translated_network)\n\n # Calculate coverage of house-keeping genes in the main network\n num_all_hk_network = float(geneid_net.get_number_of_housekeeping_genes(all_hk_genes))\n num_hpa_hk_network = float(geneid_net.get_number_of_housekeeping_genes(hpa_hk_genes))\n num_eis_hk_network = float(geneid_net.get_number_of_housekeeping_genes(eisenberg_hk_genes))\n num_all_nonhk_network = float(len(set(geneid_net.get_nodes()))) - num_all_hk_network\n per_all_hk_network = num_all_hk_network / float(len(all_hk_genes)) * 100\n per_hpa_hk_network = num_hpa_hk_network / float(len(hpa_hk_genes)) * 100\n per_eis_hk_network = num_eis_hk_network / float(len(eisenberg_hk_genes)) * 100\n\n print('{} GeneID network'.format(abbr.upper()))\n print('Number of edges: {}'.format(len(geneid_net.get_edges())))\n print('Number of nodes: {}'.format(len(geneid_net.get_nodes())))\n print('Percentage of (all) housekeeping genes in the main network: {:.2f}%\\t{:.0f} of {} genes'.format(per_all_hk_network, num_all_hk_network, len(all_hk_genes)))\n print('Percentage of (HPA) housekeeping genes in the main network: {:.2f}%\\t{:.0f} of {} genes'.format(per_hpa_hk_network, num_hpa_hk_network, len(hpa_hk_genes)))\n print('Percentage of (Eisenberg) housekeeping genes in the main network: {:.2f}%\\t{:.0f} of {} genes\\n'.format(per_eis_hk_network, num_eis_hk_network, len(eisenberg_hk_genes)))\n\n results_table[abbr].append(len(geneid_net.get_edges()))\n results_table[abbr].append(len(geneid_net.get_nodes()))\n\n for tissue, abbr_tis in [ (liver, 'liver'), (kidney, 'kidney'), (brain, 'brain'), (heart, 'heart'), (pancreas, 'pancreas') ]:\n\n tissue_dir = os.path.join(tissue_spec_dir,abbr_tis)\n if not os.path.exists(tissue_dir):\n os.makedirs(tissue_dir)\n\n #### Create tissue-specific network ####\n tissue_network_file = os.path.join(tissue_dir, '{}_edges.biana.nov17.{}.txt'.format(abbr_tis,abbr))\n permission = 0\n tissue_network = network.filter_network_by_tissue(tissue_network_file, tissue, permission)\n\n #### Translate the tissue-specific network to geneID ####\n translated_network = os.path.join(tissue_dir, '{}_edges.geneid.nov17.{}.txt'.format(abbr_tis,abbr))\n geneid_tissue_network = tissue_network.translate_network(translation_file, translation_id, network_format, translated_network)\n\n # Get info from tissue-specific network\n hpa_edges=geneid_tissue_network.hpa_edges\n jensen_edges=geneid_tissue_network.jensen_edges\n union=geneid_tissue_network.get_union()\n intersection=geneid_tissue_network.get_intersection()\n\n # Calculate coverage of housekeeping genes in the tissue-specific network\n num_all_hk_tissue = float(geneid_tissue_network.get_number_of_housekeeping_genes(all_hk_genes))\n num_hpa_hk_tissue = float(geneid_tissue_network.get_number_of_housekeeping_genes(hpa_hk_genes))\n num_eis_hk_tissue = float(geneid_tissue_network.get_number_of_housekeeping_genes(eisenberg_hk_genes))\n num_all_nonhk_tissue = float(len(set(geneid_tissue_network.get_nodes()))) - num_all_hk_tissue\n per_all_hk_tissue = num_all_hk_tissue / num_all_hk_network * 100\n per_hpa_hk_tissue = num_hpa_hk_tissue / num_hpa_hk_network * 100\n per_eis_hk_tissue = num_eis_hk_tissue / num_eis_hk_network * 100\n\n contingency_table = np.array([[num_all_hk_tissue, num_all_nonhk_tissue], [num_all_hk_network, num_all_nonhk_network]])\n chi2, pval, dof, expected = NA.calculate_contingency_table(contingency_table)\n\n print('{} {}-specific network'.format(abbr.upper(), abbr_tis.upper()))\n print('Number of edges: {}'.format(len(geneid_tissue_network.get_edges())))\n print('Number of nodes: {}'.format(len(geneid_tissue_network.get_nodes())))\n print('Interactions using only Tissues (Jensen lab): {}'.format(len(jensen_edges)))\n print('Interactions using only Human Protein Atlas: {}'.format(len(hpa_edges)))\n print('Intersection: {}'.format(len(intersection)))\n print('Union: {}\\n'.format(len(union)))\n print('Percentage of (all) HK genes in the tissue with respect to main network: {:.2f}%\\t{:.0f} of {:.0f} genes'.format(per_all_hk_tissue, num_all_hk_tissue, num_all_hk_network))\n print('Percentage of (HPA) HK genes in the tissue with respect to main network: {:.2f}%\\t{:.0f} of {:.0f} genes'.format(per_hpa_hk_tissue, num_hpa_hk_tissue, num_hpa_hk_network))\n print('Percentage of (Eisenberg) HK genes in the tissue with respect to main network: {:.2f}%\\t{:.0f} of {:.0f} genes'.format(per_eis_hk_tissue, num_eis_hk_tissue, num_eis_hk_network))\n print('Contingency table: {}'.format(contingency_table))\n print('Contingency table result:\\tchi2: {}\\tp-value: {}\\tdegrees of freedom: {}\\n'.format(chi2, pval, dof))\n\n results_table[abbr].append(len(geneid_tissue_network.get_edges()))\n results_table[abbr].append(len(geneid_tissue_network.get_nodes()))\n results_table[abbr].append(per_all_hk_tissue)\n results_table[abbr].append(per_hpa_hk_tissue)\n results_table[abbr].append(per_eis_hk_tissue)\n results_table[abbr].append(pval)\n\n\n if abbr_tis == 'liver':\n\n # Calculate coverage of Wang liver network genes in the main network\n # and in our tissue-specific netwrok\n wang_nodes_intersection_network = NA.get_nodes_intersection_of_two_networks(geneid_net, wang_network)\n wang_nodes_intersection_liver = NA.get_nodes_intersection_of_two_networks(geneid_tissue_network, wang_network)\n num_wang_network = float(len(wang_nodes_intersection_network))\n num_nonwang_network = float(len(set(geneid_net.get_nodes()))) - num_wang_network\n num_wang_liver = float(len(wang_nodes_intersection_liver))\n num_nonwang_liver = float(len(set(geneid_tissue_network.get_nodes()))) - num_wang_liver\n per_wang_network = num_wang_network / float(len(wang_network.get_nodes())) * 100\n per_wang_tissue = num_wang_liver / num_wang_network * 100\n\n contingency_table = np.array([[num_wang_liver, num_nonwang_liver], [num_wang_network, num_nonwang_network]])\n chi2, pval, dof, expected = NA.calculate_contingency_table(contingency_table)\n print('Percentage of Wang liver genes in the main network: {:.2f}%\\t{:.0f} of {:.0f} genes'.format(per_wang_network, num_wang_network, float(len(wang_network.get_nodes()))))\n print('Percentage of Wang liver genes in the liver-specific network with respect to main network: {:.2f}%\\t{:.0f} of {:.0f} genes'.format(per_wang_tissue, num_wang_liver, num_wang_network))\n print('Contingency table: {}'.format(contingency_table))\n print('Contingency table result:\\tchi2: {}\\tp-value: {}\\tdegrees of freedom: {}\\n'.format(chi2, pval, dof))\n\n results_table[abbr].append(per_wang_tissue)\n results_table[abbr].append(pval)\n\n # Calculate coverage of Wang liver network interactions in the main\n # network and in our tissue-specific netwrok\n wang_intersection_network = NA.get_edges_intersection_of_two_networks(geneid_net, wang_network)\n wang_intersection_liver = NA.get_edges_intersection_of_two_networks(geneid_tissue_network, wang_network)\n num_wang_network = float(len(wang_intersection_network))\n num_nonwang_network = float(len(set(geneid_net.get_edges()))) - num_wang_network\n num_wang_liver = float(len(wang_intersection_liver))\n num_nonwang_liver = float(len(set(geneid_tissue_network.get_edges()))) - num_wang_liver\n per_wang_network = num_wang_network / float(len(wang_network.get_edges())) * 100\n per_wang_tissue = num_wang_liver / num_wang_network * 100\n\n contingency_table = np.array([[num_wang_liver, num_nonwang_liver], [num_wang_network, num_nonwang_network]])\n chi2, pval, dof, expected = NA.calculate_contingency_table(contingency_table)\n print('Percentage of Wang liver interactions in the main network: {:.2f}%\\t{:.0f} of {:.0f} edges'.format(per_wang_network, num_wang_network, float(len(wang_network.get_edges()))))\n print('Percentage of Wang liver interactions in the liver-specific network with respect to main network: {:.2f}%\\t{:.0f} of {:.0f} edges'.format(per_wang_tissue, num_wang_liver, num_wang_network))\n print('Contingency table: {}'.format(contingency_table))\n print('Contingency table result:\\tchi2: {}\\tp-value: {}\\tdegrees of freedom: {}\\n'.format(chi2, pval, dof))\n\n results_table[abbr].append(per_wang_tissue)\n results_table[abbr].append(pval)\n\n output_table_fd = open(output_table_file, 'w')\n\n for method in results_table:\n\n output_table_fd.write('{}'.format(method))\n print(method)\n for result in results_table[method]:\n output_table_fd.write('\\t{}'.format(result))\n print(result)\n output_table_fd.write('\\n')\n\n output_table_fd.close()\n print(results_table)\n\n\n\n # End marker for time\n end = time.time()\n print('\\nTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\\n'.format(end - start, (end - start) / 60))\n\n return", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def solve(n_vec, m_vec, p_vec, repeat, dns_level, seed, solver='gurobi'):\n\n print(\"Solving random problems with solver %s\\n\" % solver)\n\n # Define statistics to record\n std_solve_time = np.zeros(len(n_vec))\n avg_solve_time = np.zeros(len(n_vec))\n min_solve_time = np.zeros(len(n_vec))\n max_solve_time = np.zeros(len(n_vec))\n\n n_prob = len(n_vec)\n\n # Store also OSQP time\n if solver == 'miosqp':\n # Add OSQP solve times statistics\n avg_osqp_solve_time = np.zeros(len(n_vec))\n\n # reset random seed\n np.random.seed(seed)\n\n for i in range(n_prob):\n\n # Get dimensions\n n = n_vec[i]\n m = m_vec[i]\n p = p_vec[i]\n\n print(\"problem n = %i, m = %i, p = %i\" % (n, m, p))\n\n # Define vector of cpu times\n solve_time_temp = np.zeros(repeat)\n\n # Store also OSQP time\n if solver == 'miosqp':\n osqp_solve_time_temp = np.zeros(repeat)\n\n for j in tqdm(range(repeat)):\n # for j in range(repeat):\n\n # Generate random vector of indeces\n i_idx = np.random.choice(np.arange(0, n), p, replace=False)\n\n # Generate random Matrices\n Pt = spa.random(n, n, density=dns_level)\n P = spa.csc_matrix(np.dot(Pt, Pt.T))\n q = sp.randn(n)\n A = spa.random(m, n, density=dns_level)\n u = 2 + sp.rand(m)\n l = -2 + sp.rand(m)\n\n # Enforce [0, 1] bounds on variables\n i_l = np.zeros(p)\n i_u = np.ones(p)\n # A, l, u = miosqp.add_bounds(i_idx, 0., 1., A, l, u)\n\n if solver == 'gurobi':\n # Solve with gurobi\n prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u)\n res_gurobi = prob.solve(solver=mpbpy.GUROBI,\n verbose=False, Threads=1)\n if res_gurobi.status != 'optimal':\n import ipdb\n ipdb.set_trace()\n solve_time_temp[j] = 1e3 * res_gurobi.cputime\n\n elif solver == 'miosqp':\n # Define problem settings\n miosqp_settings = {\n # integer feasibility tolerance\n 'eps_int_feas': 1e-03,\n # maximum number of iterations\n 'max_iter_bb': 1000,\n # tree exploration rule\n # [0] depth first\n # [1] two-phase: depth first until first incumbent and then best bound\n 'tree_explor_rule': 1,\n # branching rule\n # [0] max fractional part\n 'branching_rule': 0,\n 'verbose': False,\n 'print_interval': 1}\n\n osqp_settings = {'eps_abs': 1e-03,\n 'eps_rel': 1e-03,\n 'eps_prim_inf': 1e-04,\n 'verbose': False}\n\n model = miosqp.MIOSQP()\n model.setup(P, q, A, l, u, i_idx, i_l, i_u,\n miosqp_settings,\n osqp_settings)\n res_miosqp = model.solve()\n\n # DEBUG (check if solutions match)\n # prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u)\n # res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False)\n # if (np.linalg.norm(res_gurobi.x - res_miosqp.x) /\n # np.linalg.norm(res_gurobi.x)) > 1e-02:\n # import ipdb; ipdb.set_trace()\n#\n # import ipdb; ipdb.set_trace()\n\n if res_miosqp.status != miosqp.MI_SOLVED:\n import ipdb\n ipdb.set_trace()\n \n # Solution time \n solve_time_temp[j] = 1e3 * res_miosqp.run_time\n\n # Store OSQP time in percentage\n if solver == 'miosqp':\n osqp_solve_time_temp[j] = \\\n 100 * (res_miosqp.osqp_solve_time / res_miosqp.run_time)\n\n # Get time statistics\n std_solve_time[i] = np.std(solve_time_temp)\n avg_solve_time[i] = np.mean(solve_time_temp)\n max_solve_time[i] = np.max(solve_time_temp)\n min_solve_time[i] = np.min(solve_time_temp)\n\n # Store also OSQP time\n if solver == 'miosqp':\n avg_osqp_solve_time[i] = np.mean(osqp_solve_time_temp)\n\n # Create pandas dataframe for the results\n df_dict = {'n': n_vec,\n 'm': m_vec,\n 'p': p_vec,\n 't_min': min_solve_time,\n 't_max': max_solve_time,\n 't_avg': avg_solve_time,\n 't_std': std_solve_time}\n\n # Store also OSQP time\n if solver == 'miosqp':\n df_dict.update({'t_osqp_avg': avg_osqp_solve_time})\n\n timings = pd.DataFrame(df_dict)\n\n return timings", "def prob4(d = 500): \n #import the plane data\n planeData = np.load(\"plane.npy\")\n \n tplane = planeData[:,0]\n alpha = np.deg2rad(planeData[:,1])\n beta = np.deg2rad(planeData[:,2])\n \n l = len(tplane)\n \n #define x and y functions\n def x(n):\n# Gives x position\n return d * np.tan(beta[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n def y(n):\n# Gives y position\n return d * np.tan(beta[n]) * np.tan(alpha[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n \n #define x and y prime as we will see them\n def xprime(n):\n# Gives the approximate derivative of x\n if n == 0:\n return fdq1(x, n, h = 1)\n elif n == l-1:\n return bdq1(x, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(x, n, h = 1)\n else:\n return 0\n \n def yprime(n):\n# Gives the approximate derivative of y\n if n == 0:\n return fdq1(y, n, h = 1)\n elif n == l-1:\n return bdq1(y, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(y, n, h = 1)\n else:\n return 0\n \n #define speed from x and y prime\n def speed(n):\n# print(\"speed(n) where n = \" + str(n))\n return np.sqrt((xprime(n))**2 + (yprime(n))**2)\n \n #Finally get the speed from the information we have\n spd = []\n X = []\n Y = []\n for i in range(0, l):\n spd.append(speed(i))\n X.append(x(i))\n Y.append(y(i))\n \n return spd\n \n raise NotImplementedError(\"Problem 4 Incomplete\")", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n\n pq = PriorityQueue()\n visited = []\n start = problem.getStartState()\n mapper = {}\n \n mapper[problem.getStartState()] = None\n pq.push(problem.getStartState(), 1)\n\n while (not pq.isEmpty()):\n point = pq.pop()\n if problem.isGoalState(point):\n current = point\n l = []\n while mapper[current] != None:\n tup = mapper[current]\n l.append(tup[1])\n current = tup[0]\n l.reverse()\n print l\n return l\n #util.raiseNotDefined()\n if not (point in visited):\n visited.append(point)\n succs = problem.getSuccessors(point)\n succs.reverse()\n for child in succs:\n if not (child[0] in mapper):\n pq.push(child[0], child[2]) #child has (xy, direction, weight)\n mapper[child[0]] = point, child[1]\n # util.raiseNotDefined()", "def run_grid_search():\n\n best_score = 0\n best_learning_rate = 0\n best_discount_rate = 0\n best_initial_q_hat = 0\n trial_results = []\n number_of_trials = 30\n # TODO These ought to be done with numpy.arange but I don't have that package installed at the moment\n for learning_rate_raw in range(5, 50, 5):\n for discount_rate_raw in range(5, 20, 5):\n for initial_q_hat in range(0, 10, 1):\n learning_rate = learning_rate_raw * 0.01\n discount_rate = discount_rate_raw * 0.05\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=number_of_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n score = a.get_score()\n if score > best_score:\n best_score = score\n best_learning_rate = learning_rate\n best_discount_rate = discount_rate\n best_initial_q_hat = initial_q_hat\n trial_results.append((learning_rate, discount_rate, initial_q_hat, score, a.get_proportion_of_states_visited(), len(a.get_failed_trials())/ float(number_of_trials)))\n print \"Gridsearch finished, best learning rate: %.2f, best discount rate: %.2f, best initial q hat %i\" % (best_learning_rate, best_discount_rate, best_initial_q_hat)\n\n with open('gridsearch_results.csv', 'wb') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow(('learning rate', 'discount factor', 'initial q-hat value', 'score', 'states visited', 'failed trials'))\n for result in trial_results:\n spamwriter.writerow(result)", "def calc_precision_map(self, output_filenm=\"\"):\n logger.info(\"Calculating precision map\")\n success_map, total_map = {}, {} # map from query r to a dict of path and ratio of success\n # not sure why I am getting RuntimeError: dictionary changed size during iteration.\n train_map = [((e1, r), e2_list) for ((e1, r), e2_list) in self.train_map.items()]\n for ((e1, r), e2_list) in tqdm(train_map):\n c = self.args.cluster_assignments[self.entity_vocab[e1]]\n if c not in success_map:\n success_map[c] = {}\n if c not in total_map:\n total_map[c] = {}\n if r not in success_map[c]:\n success_map[c][r] = {}\n if r not in total_map[c]:\n total_map[c][r] = {}\n paths_for_this_relation = self.args.path_prior_map_per_relation[c][r]\n for p_ctr, (path, _) in enumerate(paths_for_this_relation.items()):\n ans = self.execute_one_program(e1, path, depth=0, max_branch=100)\n if len(ans) == 0:\n continue\n # execute the path get answer\n if path not in success_map[c][r]:\n success_map[c][r][path] = 0\n if path not in total_map[c][r]:\n total_map[c][r][path] = 0\n for a in ans:\n if a in e2_list:\n success_map[c][r][path] += 1\n total_map[c][r][path] += 1\n\n precision_map = {}\n for c, _ in success_map.items():\n for r, _ in success_map[c].items():\n if c not in precision_map:\n precision_map[c] = {}\n if r not in precision_map[c]:\n precision_map[c][r] = {}\n for path, s_c in success_map[c][r].items():\n precision_map[c][r][path] = s_c / total_map[c][r][path]\n\n if not output_filenm:\n dir_name = os.path.join(args.data_dir, \"data\", self.args.dataset_name, \"linkage={}\".format(self.args.linkage))\n output_filenm = os.path.join(dir_name, \"precision_map.pkl\")\n logger.info(\"Dumping precision map at {}\".format(output_filenm))\n with open(output_filenm, \"wb\") as fout:\n pickle.dump(precision_map, fout)\n logger.info(\"Done...\")", "def main(): #Main Control Loop (as prototyped on 2/26 in Glennan Lounge)\n\t# Create listener to receive info from UI\n\tui_listener = pso_network.UIListener()\n\tui_listener.daemon = True\n\tui_listener.start()\n\tui_state = ui_listener.get_ui()\n\t\n\t# Create listener to recieve waypoints and corrections from planner.\n\tplanner_listener = pso_network.PlannerListener()\n\tplanner_listener.daemon = True\n\tplanner_listener.start()\n\twaypoint = cv.CreateMat(4, 1, cv.CV_32FC1)\n\t\n\t#Instatiate Drone Objects (defined in Drone.py)\n\tmyDrone = Drone(\"192.168.1.1\")\n\t\n\t\n\t#Preset flags\n\trunning = True\n\twait_on_emergency = False\n\twait_on_liftoff = False\n\twait_on_land = False\n\t\n\t#Create Kalman filter, state, and command vectors\n\tkalman = PsoKalman()\n\tu = cv.CreateMat(4, 1, cv.CV_32FC1)\n\tz = cv.CreateMat(5, 1, cv.CV_32FC1)\n\tsys_time = time.time()\n\t\n\t#Create PID controllers for each axis\n\tyaw_pid = pso_pid.PID()\n\tyaw_pid.k = 1.5\n\tyaw_pid.t_i = 1.\n\tyaw_pid.angular = True\n\tyaw_pid.deadband = .05\n\t\n\tz_pid = pso_pid.PID()\n\tz_pid.k = .00075\n\tz_pid.i_enable = False\n\tz_pid.t_i = 10.\n\tz_pid.deadband = 150\n\t\n\troll_pid = pso_pid.PID()\n\troll_pid.k = .00025\n\troll_pid.i_enable = False\n\troll_pid.deadband = 50\n\t\n\tpitch_pid = pso_pid.PID()\n\tpitch_pid.k = .00025\n\tpitch_pid.i_enable = False\n\tpitch_pid.deadband = 50\n\t\n\t#Logger puts state in csv for matlab-y goodness\n\tlogger = debuglogger.Logger()\n\t\n\t#Fig bucking loop\n\twhile(running):\n\t\ttime.sleep(.05)\n\t\tos.system(\"clear\")\n\t\t\n\t\t#Get command state from UI\n\t\tprev_ui_state = ui_state\n\t\tui_state = ui_listener.get_ui()\n\t\t\t\t\n\t\tif ui_state[EMERGENCY]:\n\t\t\tmyDrone.emergency()\n\t\t\n\t\tif ui_state[SHUTDOWN]:\n\t\t\t#UI has ordered shutdown\n\t\t\tprint \"Shutting down control loop...\"\n\t\t\tui_listener.stop()\n\t\t\tmyDrone.kill()\n\t\t\trunning = False\n\t\t\n\t\tif ui_state[TRIM]:\n\t\t\tmyDrone.trim()\n\t\t\tui_listener.clear_flag(TRIM)\n\t\t\tprint \"\\nTRIM\\n\"\n\t\t\n\t\tif ui_state[FLYING]:\n\t\t\tmyDrone.takeoff()\n\t\t\tprint \"Taking Off/Flying\"\n\t\t\tif not prev_ui_state[FLYING]:\n\t\t\t\twait_on_liftoff = 5\n\t\telse:\n\t\t\tmyDrone.land()\n\t\t\tprint \"Landing/Landed\"\n\t\t\tif prev_ui_state[FLYING]:\n\t\t\t\twait_on_land = 5\n\t\t\n\t\tif ui_state[RESET]:\n\t\t\tmyDrone.reset_emergency()\n\t\t\tmyDrone.reset()\n\t\t\tyaw_pid.flush()\n\t\t\tz_pid.flush()\n\t\t\troll_pid.flush()\n\t\t\tpitch_pid.flush()\n\t\t\tui_listener.clear_flag(RESET)\n\t\t\n\t\t#Get navdata\n\t\tnav = myDrone.get_navdata()\n\t\t\n\t\t#Print out Drone State\n\t\tif nav.check_state(navdata.EMERGENCY):\n\t\t\tprint \"Emergency!\"\n\t\telif not nav.check_state(navdata.COM_WATCHDOG):\n\t\t\tprint \"WATCHDOG\"\n\t\telif nav.check_state(navdata.COMMAND):\n\t\t\tprint \"Watchdog cleared. Not yet ready for commands.\"\n\t\telse:\n\t\t\tprint \"Ready to Fly\\n\"\n\t\tprint \"\\t\\tECACAVNAPCUWAPTHLGCMBNTTTCUACVVF\\n{0}\\t\\t{1:32b}\".format(nav.seq,nav.state) #Print navdata state\n\t\t\n\t\t#Update State (Kalman)\n\t\tdt = time.time()-sys_time\n\t\tprint \"dt:\\t\",dt\n\t\tsys_time = time.time()\n\t\tz[0, 0], z[1, 0], z[2, 0], z[3, 0], z[4, 0] = nav.vx, nav.vy, nav.z, nav.vz, nav.psi\n\t\t#z and u need to be cv matrices!!!!\n\t\tsys_state = myDrone.get_state()\n\t\tprint \"\\nDrone Kalman State:\"\n\t\tprint \"x:\\t{0}\".format(sys_state[0, 0])\n\t\tprint \"y:\\t{0}\".format(sys_state[2, 0])\n\t\tprint \"z:\\t{0}\".format(sys_state[4, 0])\n\t\tprint \"vx:\\t{0}\".format(sys_state[1, 0])\n\t\tprint \"vy:\\t{0}\".format(sys_state[3, 0])\n\t\tprint \"vz:\\t{0}\".format(sys_state[5, 0])\n\t\tprint \"theta:\\t{0}\".format(sys_state[6, 0])\n\t\tprint \"vtheta:\\t{0}\".format(sys_state[7, 0])\n\t\t\n\t\tprint \"\\nNavdata Euler Angles:\"\n\t\tprint \"theta:\\t\",nav.theta\n\t\tprint \"phi:\\t\",nav.phi\n\t\tprint \"psi:\\t\",nav.psi\n\t\tprint \"\\nNavdata Stuff:\"\n\t\tprint \"z:\\t\",nav.z\n\t\tprint \"vx:\\t\",nav.vx\n\t\tprint \"vy:\\t\",nav.vy\n\t\tui_listener.set_state(sys_state, nav)\n\t\t#logger.log(sys_state)\n\t\t\n\t\tif wait_on_liftoff>0:\n\t\t\tprint \"Waiting for liftoff to finish\"\n\t\t\twait_on_liftoff -= dt\n\t\t\tu[0, 0], u[1, 0], u[2, 0], u[3, 0] = 0, 0, 1, 0#Assume drone goes full speed up when taking off\n\t\telif ui_state[FLYING]:\n\t\t\tprint \"\" #Blank line to everything lines up\n\t\t\t#If Drone is in waypoint mode, compute command\n\t\t\tif not ui_state[OVERRIDE]:\n\t\t\t\t#Get waypoint\n\t\t\t\tif not planner_listener.waypoints.empty():\n\t\t\t\t\twaypoint = planner_listener.waypoints.get()\n\t\t\t\tprint \"\\nNext Waypoint:\"\n\t\t\t\tprint \"X:\\t\", waypoint[0, 0]\n\t\t\t\tprint \"Y:\\t\", waypoint[1, 0]\n\t\t\t\tprint \"Z:\\t\", waypoint[2, 0]\n\t\t\t\tprint \"θ:\\t\", waypoint[3, 0]\n\t\t\t\t#Compute command\n\t\t\t\t(roll_des, pitch_des) = world2drone(waypoint[0, 0]-sys_state[0, 0], waypoint[1, 0]-sys_state[2, 0], sys_state[6, 0])\n\t\t\t\tprint \"Desired Roll:\\t\", roll_des\n\t\t\t\tprint \"Desired Pitch:\\t\", pitch_des\n\t\t\t\tu[0, 0] = pitch_pid.update(0, pitch_des)\n\t\t\t\tu[1, 0] = roll_pid.update(0, roll_des)\n\t\t\t\tu[2, 0] = z_pid.update(sys_state[4, 0], waypoint[2, 0])\n\t\t\t\tu[3, 0] = yaw_pid.update(sys_state[6, 0], waypoint[3, 0])\n\t\t\t\tmyDrone.go(u[0, 0], u[1, 0], u[3, 0], u[2, 0])\n\t\t\telse: #Manual override: Use command from UI state\n\t\t\t\tprint \"\\nManual override mode\\n\\n\\n\"\n\t\t\t\tmyDrone.go(ui_state[COMMAND][0], ui_state[COMMAND][1], ui_state[COMMAND][2], ui_state[COMMAND][3])\n\t\t\t\tu[0, 0], u[1, 0], u[2, 0], u[3, 0] = ui_state[COMMAND]\n\t\telse:\n\t\t\tprint \"\\nLanded\"\n\t\t\n\t\t#Print out commands\n\t\tprint \"\\nCommands:\\npitch:\\t\",u[0, 0]\n\t\tprint \"roll:\\t\", u[1, 0]\n\t\tprint \"gaz:\\t\", u[2, 0]\n\t\tprint \"yaw:\\t\", u[3, 0]", "def exercise():\n pi_good = get_pdb_inputs(pdb_str=pdb_str_answer, restraints=False)\n map_data = get_map(xrs=pi_good.xrs)\n xrs_good = pi_good.xrs.deep_copy_scatterers()\n pi_good.ph.write_pdb_file(file_name=\"answer.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())\n #\n pi_poor = get_pdb_inputs(pdb_str=pdb_str_poor, restraints=True)\n pi_poor.ph.write_pdb_file(file_name=\"poor.pdb\")\n xrs_poor = pi_poor.xrs.deep_copy_scatterers()\n #\n d = xrs_good.distances(other=xrs_poor)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)>2\n assert flex.mean(d)>0.7\n #\n xrs_refined = xrs_poor\n for i in range(3):\n ero = individual_sites.easy(\n map_data = map_data,\n xray_structure = xrs_refined,\n pdb_hierarchy = pi_poor.ph,\n geometry_restraints_manager = pi_poor.grm)\n xrs_refined = ero.xray_structure\n # comapre\n d = xrs_good.distances(other=xrs_refined)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)<0.15\n assert flex.mean(d)<0.03\n ero.pdb_hierarchy.write_pdb_file(file_name=\"refined.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())", "def run_scenario(self):\n self.initialize_random_map()\n self.visualize_environment('initial')\n self.get_tower_target_coverages()\n self.solve_environment()\n self.visualize_environment('solved')", "def generate_parameters_random_walk():\r\n\r\n logging.info('Loading distances_nets from disk...')\r\n\r\n\r\n\r\n sum_weights = {}\r\n\r\n amount_edges = {}\r\n\r\n\r\n\r\n layer = 0\r\n\r\n while is_pickle('distances_nets_weights-layer-' + str(layer)):\r\n\r\n logging.info('Executing layer {}...'.format(layer))\r\n\r\n weights = restore_variable_from_disk('distances_nets_weights-layer-' + str(layer))\r\n\r\n\r\n\r\n for node, list_weights in weights.items():\r\n\r\n if layer not in sum_weights:\r\n\r\n sum_weights[layer] = 0\r\n\r\n if layer not in amount_edges:\r\n\r\n amount_edges[layer] = 0\r\n\r\n\r\n\r\n for w in list_weights:\r\n\r\n sum_weights[layer] += w\r\n\r\n amount_edges[layer] += 1\r\n\r\n\r\n\r\n logging.info('Layer {} executed.'.format(layer))\r\n\r\n layer += 1\r\n\r\n\r\n\r\n average_weight = {}\r\n\r\n for layer in sum_weights.keys():\r\n\r\n average_weight[layer] = sum_weights[layer] / amount_edges[layer]\r\n\r\n\r\n\r\n logging.info(\"Saving average_weights on disk...\")\r\n\r\n save_variable_on_disk(average_weight, 'average_weight')\r\n\r\n\r\n\r\n amount_neighbours = {}\r\n\r\n\r\n\r\n layer = 0\r\n\r\n while is_pickle('distances_nets_weights-layer-' + str(layer)):\r\n\r\n logging.info('Executing layer {}...'.format(layer))\r\n\r\n weights = restore_variable_from_disk('distances_nets_weights-layer-' + str(layer))\r\n\r\n\r\n\r\n amount_neighbours[layer] = {}\r\n\r\n\r\n\r\n for node, list_weights in weights.items():\r\n\r\n cont_neighbours = 0\r\n\r\n for w in list_weights:\r\n\r\n if w > average_weight[layer]:\r\n\r\n cont_neighbours += 1\r\n\r\n amount_neighbours[layer][node] = cont_neighbours\r\n\r\n\r\n\r\n logging.info('Layer {} executed.'.format(layer))\r\n\r\n layer += 1\r\n\r\n\r\n\r\n logging.info(\"Saving amount_neighbours on disk...\")\r\n\r\n save_variable_on_disk(amount_neighbours, 'amount_neighbours')", "def run(self):\n best_score = float('inf')\n best_route = None\n best_nr_iterations = None\n best_tabu_list_size = None\n for i in range(self.range_iterations_start, self.range_iterations_end, 10):\n for j in range(self.range_tabu_list_start, self.range_tabu_list_end):\n print('testing for nr_iterations', i, ' and tabu list size', j)\n self.hc.generate_initial_solution(use_seed=True)\n score, route, iteration = self.hc.solve(tabu=self.tabu, with_time_windows=self.with_time_windows,\n nr_iterations=i, tabu_size=j,\n allow_infeasibilites=self.allow_infeasibilites)\n\n if score < best_score:\n best_score = score\n best_route = route\n best_nr_iterations = i\n best_tabu_list_size = j\n\n print('best results with sore', best_score, best_nr_iterations, best_tabu_list_size )\n return best_score, best_route, best_tabu_list_size", "def run_monte_carlo(runs, pool, goals=False):\n total_ranking = {\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: []\n }\n for run in range(runs):\n if goals:\n curr_score = run_one_pool(pool, True)\n else:\n curr_score = run_one_pool(pool)\n total_ranking = rank_teams_of_curr_run(curr_score, total_ranking)\n return total_ranking", "def computer_driver_heuristic(self, pc):\n if pc == self.departure_location:\n return self.nearest_neigbor(pc)\n else:\n # encode state: State -> Generalized One hot vector\n # print(len(self.idx_to_pc)+1)\n encoded_vector = np.zeros(len(self.idx_to_pc)+1)\n\n # indices of locations FOR ENCODING\n pickup_jobs_idx = [self.pc_to_idx[p]+1 for p in list(self.state.P_k.keys())] # +1 is to make room for the time dim\n deliv_jobs_idx = [self.pc_to_idx[p]+1 for p in list(self.state.D_k.keys())]\n\n # indices of locations FOR PC READING\n pickup_jobs_idx_read = [self.pc_to_idx[p] for p in list(self.state.P_k.keys())]\n deliv_jobs_idx_read = [self.pc_to_idx[p] for p in list(self.state.D_k.keys())]\n tasks = set(pickup_jobs_idx_read + deliv_jobs_idx_read)\n\n if len(tasks) > 0:\n # set appropriate values at the index corresponding to the location\n encoded_vector[pickup_jobs_idx] = -0.5\n encoded_vector[deliv_jobs_idx] = 0.5\n encoded_vector[self.pc_to_idx[pc]+1] = 1\n\n # # current time encoded as nb of seconds between 12pm and now/nb seconds between 12pm and 12am\n total_nb_seconds = datetime.timedelta(hours=12, minutes=0)\n cur_time = self.state.t_k.time()\n cur_time = datetime.timedelta(hours=cur_time.hour, minutes=cur_time.minute) # nb of seconds from 12am\n # # TODO this can further be noramlized as most values will be >0 (>6am)\n cur_time = 2 * cur_time.seconds / total_nb_seconds.seconds - 1 # normalized time in [-1,1]\n encoded_vector[0] = cur_time\n\n # predict decision\n pred = self.heuristic_model.predict_proba(encoded_vector.reshape(1,-1))\n\n # take the most probable location among the remaining jobs\n # # set proba to 0 if location not among remaining jobs\n # print(\"##############\")\n # print(\"shape of pred \", pred.shape)\n # print(\"Number of locations considered : \", len(self.idx_to_pc))\n print(\"Possible indices to choose from : \", tasks)\n pred[0, list(set(range(0, len(self.idx_to_pc))) - set(pickup_jobs_idx_read + deliv_jobs_idx_read))] = 0\n\n idx_opt = np.argsort(pred[0,:])[-1] # most probable location (by its index) among remaining jobs\n print(\"Index chosen : \", idx_opt )\n return self.idx_to_pc[idx_opt]\n\n elif len(tasks) == 0:\n return 0\n\n else:\n raise ValueError('Problem with tasks, which has negative length...')", "def main(target_clusters, clusters_per_lane, project_id, dest_plate_list, allow_non_dupl_struct): \n couch = connection()\n structure = proj_struct(couch, project_id, target_clusters)\n [lane_maps, clusters_rem, clusters_expr] = parse_indata(structure, target_clusters)\n if allow_non_dupl_struct:\n aggregator(lane_maps,clusters_rem,clusters_per_lane)\n else:\n simple_unique_set(lane_maps)\n [ideal_ratios, req_lanes, total_lanes] = sample_distributor(lane_maps, clusters_rem, clusters_per_lane)\n acc_ratios = correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes)\n generate_output(project_id, dest_plate_list, total_lanes, req_lanes, lane_maps, acc_ratios)", "def main():\n\n # first lets test with a already created csp:\n csp = create_map_csp()\n solution = backtracking(csp)\n #solution2,assigned = minimum_remaining_values(csp)\n print(solution)\n #print assigned\n\n # and now with our own generated sudoku CSP\n \"\"\"sudokus = read_sudokus()\n csp = create_sudoku_csp(sudokus[1])\n solution = backtracking(csp)\n print sudoku_csp_to_array(solution)\n\"\"\"", "def Scheduler():\n courses = \"cs108 cs112 cs214 stat343 cs336 cs300\".split()\n profs = \"norman adams schuurman pruim vanderlinden\".split()\n slots = \"mwf900 mwf1130 tth1030 tth130\".split()\n rooms = \"sb354 nh064\".split()\n \n variables = courses\n assignments = {}\n assignments['cs108'] = \"norman\"\n assignments['cs112'] = \"adams\"\n assignments['cs214'] = \"adams\"\n assignments['stat343'] = \"pruim\"\n assignments['cs336'] = \"vanderlinden\"\n assignments['cs300'] = \"schuurman\"\n neighbors = parse_neighbors(\"\"\"\n cs108: norman; cs112: adams; \n cs214: adams; stat343: pruim; \n cs336: vanderlinden; cs300: schuurman\n \"\"\", variables)\n domains = {}\n for course in courses:\n domains[course] = []\n for course in courses:\n for prof in profs:\n for room in rooms:\n for slot in slots:\n domains[course].append(prof + \" \" + room + \" \" + slot)\n \n for type in [courses]:\n for A in type:\n for B in type:\n if A != B:\n if B not in neighbors[A]:\n neighbors[A].append(B)\n if A not in neighbors[B]:\n neighbors[B].append(A)\n\n def scheduler_constraints(A, a, B, b, recurse=0):\n ADomain = a.split()\n BDomain = b.split()\n A_Prof = ADomain[0]\n B_Prof = BDomain[0]\n A_Room = ADomain[1]\n B_Room = BDomain[1]\n A_Slot = ADomain[2]\n B_Slot = BDomain[2]\n A_Course = A\n B_Course = B\n \n if(A_Prof == B_Prof and A_Slot == B_Slot):\n return False\n if(A_Room == B_Room and A_Slot == B_Slot):\n return False\n\n if('norman' in a and A == 'cs108'):\n return True\n if('adams' in a and A == 'cs112'):\n return True\n if('adams' in a and A == 'cs214'):\n return True\n if('pruim' in a and A == 'stat343'):\n return True\n if('vanderlinden' in a and A == 'cs336'):\n return True\n if('schuurman' in a and A == 'cs300'):\n return True\n if(A in courses and B in courses):\n return False\n if(recurse == 0):\n return scheduler_constraints(B, b, A, a, 1)\n return True\n \n return CSP(variables, domains, neighbors, scheduler_constraints)", "def main(cls, args):\n #cls.trainOfflineAndTest(100, 0.1, 0.1, 0.9);\n #cls.trainOfflineAndTest(500, 0.1, 0.1, 1.0);\n\n cls.trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10, 0.8, 1.0 ,1.0, 0.0, 0.3, True, True,True);\n cls.trainer.teachActiveAndSaveStatistics(\"path\", 10, 0.0, 0.0, 0.0, 0.0, 0.0, True, False, False)\n\n #trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,true, true, true);\n # \t\ttrainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,\n # \t\t\t\tfalse, true, true);\n # \t\t\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, true);\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10000, true);\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, False)\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10, False)\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1.net\", 10000, false);", "def test_mining_train():\n g = clondike_transshipment_problem()\n assert isinstance(g, Graph)\n\n equipment_deliveries = [\n (\"L-1\", \"L-1-1\"),\n (\"L-1\", \"L-1-2\"), # origin, destination\n (\"L-1\", \"L-1-3\"),\n (\"L-1\", \"L-1-4\")\n ]\n\n mineral_deliveries = [\n (\"L-1-1\", \"L-1\"),\n (\"L-1-2\", \"L-1\"),\n (\"L-1-3\", \"L-1\"),\n (\"L-1-4\", \"L-1\"),\n ]\n\n access_nodes = {\"L-1\", \"L-1-1\", \"L-1-2\", \"L-1-3\", \"L-1-4\"}\n\n train = Train(rail_network=g, start_location=\"L-1\", access=access_nodes)\n\n s1 = train.schedule(equipment_deliveries)\n s2 = train.schedule(mineral_deliveries)\n s3 = train.schedule(equipment_deliveries[:] + mineral_deliveries[:])\n\n s1_expected = [\n ('L-1', 'L-1-1'), ('L-1', 'L-1-2'), ('L-1', 'L-1-3'), ('L-1', 'L-1-4')\n ] # shortest jobs first.!\n\n s2_expected = [\n ('L-1-1', 'L-1'), ('L-1-2', 'L-1'), ('L-1-3', 'L-1'), ('L-1-4', 'L-1')\n ] # shortest job first!\n\n s3_expected = [\n ('L-1', 'L-1-1'), ('L-1-1', 'L-1'), # circuit 1\n ('L-1', 'L-1-2'), ('L-1-2', 'L-1'), # circuit 2\n ('L-1', 'L-1-3'), ('L-1-3', 'L-1'), # circuit 3\n ('L-1', 'L-1-4'), ('L-1-4', 'L-1') # circuit 4\n ] # shortest circuit first.\n\n assert s1 == s1_expected\n assert s2 == s2_expected\n assert s3 == s3_expected", "def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)", "def run_genetic_algorithm(bayes_params):\n\n print('Running genetic algorithm')\n\n # Unpacks parameters (unfortunately can't feed dataframe (or series or\n # array) data into a function with hyperopt, so am having to pickle the\n # parameters not being optimised with hyperopt\n params_file = '{}/Program_input/Input_params.pkl'.format(\n bayes_params['workingdirectory']\n )\n with open(params_file, 'rb') as f:\n fixed_params = pickle.load(f)\n if not type(fixed_params) in [dict, OrderedDict]:\n raise TypeError('Data in {} is not a pickled dictionary'.format(params_file))\n params = {**bayes_params, **fixed_params}\n\n # Records sequences and their fitnesses after each generation\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'w') as f:\n f.write('Tracking GA optimisation progress\\n')\n\n ga_calcs = run_ga_calcs(params)\n\n # Defines whether sequences are compared by their raw or rank propensities.\n # Since BUDE scores and frequency values have to be compared by their rank\n # values, have made the decision to also compare propensity values by their\n # rankings.\n \"\"\"\n if params['matingpopmethod'] in ['fittest', 'roulettewheel']:\n raw_or_rank = 'raw'\n elif params['matingpopmethod'] in ['rankroulettewheel']:\n raw_or_rank = 'rank'\n \"\"\"\n raw_or_rank = 'rank'\n\n # Calculates propensity and/or BUDE energy of input structure\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('Input structure\\n')\n\n if params['fitnessscoremethod'] == 'alternate':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, propensity_score, frequency_score,'\n ' BUDE energy, clashscore\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency,\n params['inputpdbenergy'], params['inputpdbclash']\n ))\n f.write('\\n')\n\n if params['fitnessscoremethod'] == 'propensity':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, propensity_score, frequency_score\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency\n ))\n f.write('\\n')\n\n elif params['fitnessscoremethod'] == 'allatom':\n network_energies = ga_calcs.measure_fitness_allatom(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, BUDE energy\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n energy = network_energies[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, energy))\n f.write('\\n')\n\n elif params['fitnessscoremethod'] == 'molprobity':\n network_clashes = ga_calcs.measure_fitness_clashscore(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, clashscore\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n clashscore = network_clashes[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, clashscore))\n f.write('\\n')\n\n # Runs GA cycles\n gen = params['startgen']\n while gen < params['stopgen']:\n gen += 1\n print('Generation {}'.format(gen))\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('\\n\\n\\n\\n\\nGeneration {}\\n'.format(gen))\n\n\n all_networks_list = [params['sequencesdict']]\n pop_sizes = [params['populationsize']]\n\n for index, networks_dict in enumerate(all_networks_list):\n # Measures fitness of sequences in starting population.\n if (\n (params['fitnessscoremethod'] == 'propensity')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 2 == 1)\n ):\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(networks_dict)\n network_fitness_scores = ga_calcs.combine_prop_and_freq_scores(\n network_propensity_scores, network_frequency_scores, raw_or_rank\n )\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, propensity, frequency, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency, probability\n ))\n f.write('Total: {}, {}, {}'.format(\n sum(network_propensity_scores.values()),\n sum(network_frequency_scores.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n elif (\n (params['fitnessscoremethod'] == 'allatom')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 4 == 2)\n ):\n # Runs BUDE energy scoring on parallel processors\n network_energies = ga_calcs.measure_fitness_allatom(networks_dict)\n (network_fitness_scores\n ) = ga_calcs.convert_energies_to_probabilities(network_energies)\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, BUDE score, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n energy = network_energies[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, energy, probability\n ))\n f.write('Total: {}, {}'.format(\n sum(network_energies.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n\n elif (\n (params['fitnessscoremethod'] == 'molprobity')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 4 == 0)\n ):\n # Runs MolProbity scoring on parallel processors\n network_clashes = ga_calcs.measure_fitness_clashscore(networks_dict)\n (network_fitness_scores\n ) = ga_calcs.convert_clashscores_to_probabilities(network_clashes)\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, clashscore, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n clash = network_clashes[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, clash, probability\n ))\n f.write('Total: {}, {}'.format(\n sum(network_clashes.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n\n # Selects subpopulation for mating\n if params['matingpopmethod'] == 'fittest':\n mating_pop_dict = ga_calcs.create_mat_pop_fittest(\n networks_dict, network_fitness_scores, pop_sizes[index],\n params['unfitfraction']\n )\n elif params['matingpopmethod'] in ['roulettewheel', 'rankroulettewheel']:\n mating_pop_dict = ga_calcs.create_mat_pop_roulette_wheel(\n networks_dict, network_fitness_scores, pop_sizes[index], params['']\n )\n\n # Performs crossover of parent sequences to generate child sequences\n if params['crossovermethod'] == 'uniform':\n crossover_pop_dict = ga_calcs.uniform_crossover(mating_pop_dict)\n elif params['crossovermethod'] == 'segmented':\n crossover_pop_dict = ga_calcs.segmented_crossover(mating_pop_dict)\n\n # Mutates child sequences\n if params['mutationmethod'] == 'swap':\n mutated_pop_dict = ga_calcs.swap_mutate(crossover_pop_dict)\n elif params['mutationmethod'] == 'scramble':\n mutated_pop_dict = ga_calcs.scramble_mutate(crossover_pop_dict)\n\n # Combines parent and child sequences into single generation\n merged_networks_dict = ga_calcs.add_children_to_parents(\n mutated_pop_dict, mating_pop_dict\n )\n\n random_order = [n for n in range(len(merged_networks_dict))]\n random.shuffle(random_order)\n shuffled_merged_networks_dict = OrderedDict(\n {list(merged_networks_dict.keys())[n]:\n list(merged_networks_dict.values())[n] for n in random_order}\n )\n params['sequencesdict'] = shuffled_merged_networks_dict\n\n # Calculates fitness of output sequences and filters population to maintain\n # the fittest 50%, plus sums the probabilities of the retained sequences and\n # returns this value (to be minimised with hyperopt)\n summed_fitness = 0\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('\\n\\n\\n\\n\\nOutput generation\\n')\n\n if params['fitnessscoremethod'] != 'allatom':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['sequencesdict'])\n network_fitness_scores = ga_calcs.combine_prop_and_freq_scores(\n network_propensity_scores, network_frequency_scores, raw_or_rank\n )\n elif params['fitnessscoremethod'] == 'allatom':\n network_energies = ga_calcs.measure_fitness_allatom(params['sequencesdict'])\n (network_fitness_scores\n ) = ga_calcs.convert_energies_to_probabilities(network_energies)\n\n # Records sequences output from this generation and their associated\n # fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n if params['fitnessscoremethod'] != 'allatom':\n f.write('network, sequence, propensity, frequency\\n')\n elif params['fitnessscoremethod'] == 'allatom':\n f.write('network, sequence, BUDE score\\n')\n for network, G in params['sequencesdict'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n if params['fitnessscoremethod'] != 'allatom':\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency\n ))\n elif params['fitnessscoremethod'] == 'allatom':\n energy = network_energies[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, energy))\n if params['fitnessscoremethod'] != 'allatom':\n f.write('Total: {}, {}'.format(\n sum(network_propensity_scores.values()),\n sum(network_frequency_scores.values())\n ))\n elif params['fitnessscoremethod'] == 'allatom':\n f.write('Total: {}'.format(sum(network_energies.values())))\n f.write('\\n')\n\n params['sequencesdict'] = ga_calcs.create_mat_pop_fittest(\n params['sequencesdict'], network_fitness_scores,\n params['populationsize'], unfit_fraction=0\n )\n\n for network in params['sequencesdict'].keys():\n # Higher propensity is more likely, so add because output from\n # measure_fitness_propensity is sum of -log(propensity) values, and\n # hyperopt minimises output score\n # Can't combine propensity and frequency scores without first converting\n # to a probability, so for calculating output combined fitness can only\n # use combined propensity scores to rank the structures\n if params['fitnessscoremethod'] != 'allatom':\n summed_fitness += network_propensity_scores[network]\n # Lower score is more likely, so add because hyperopt minimises output\n # score\n elif params['fitnessscoremethod'] == 'allatom':\n summed_fitness += network_energies[network]\n\n with open('{}/Program_output/GA_output_sequences_dict.pkl'.format(\n bayes_params['workingdirectory']), 'wb') as f:\n pickle.dump(params['sequencesdict'], f)\n\n print(summed_fitness)\n\n return summed_fitness", "def main():\n\n # Run all the requirements for part A\n ##############################\n # Question 3\n # runs naive A*\n question_3()\n\n ##############################\n # Question 5\n # runs online A*\n question_5()\n\n ##############################\n # Question 7\n # runs online A* on fine grid\n question_7()\n\n\n # Run all the requirements for part B\n ##############################\n # Question 9\n question_9()\n\n ##############################\n # Question 10\n question_10()\n\n ##############################\n # Question 1\n question_11()", "def train_GPs_on_position(list_of_input_trajectories, list_of_output_trajectories, times_array):\r\n # get list of Xs that line up with inputs and outputs and are limited to them.\r\n # for each input:\r\n # train a GP\r\n # compare actual outputs to predicted using defined function for MSLL\r\n # store MSLL in array size that is the same size as the inputs and the outputs.\r\n # For the last line, if you do it for all inputs, can end up with a square array of inputs to outputs\r\n # Then I need some method of choosing the maximum combination of inputs and outputs. Research this...\r\n\r\n cost_matrix = np.zeros((len(list_of_input_trajectories),len(list_of_output_trajectories)))\r\n for i, input_trajectory_masked in enumerate(list_of_input_trajectories):\r\n input_mask = np.ma.getmask(input_trajectory_masked)\r\n input_trajectory = np.array(input_trajectory_masked[~input_mask].reshape(3,-1))\r\n times_input_mask = input_mask[0,:]\r\n times_input_masked = np.ma.masked_array(times_array, times_input_mask)\r\n input_times = np.array(times_input_masked[~times_input_mask])\r\n\r\n # REFORMAT THE ARRAY TO BE SUITABLE FOR GPy\r\n Y_List = GPy_reformat_3D(input_trajectory) # make sure input_trajectory has shape (3, n_timesteps)\r\n X_List = GPy_reformat_3D(input_times) # times should have shape (n_timesteps)\r\n\r\n\r\n icm = GPy.util.multioutput.ICM(input_dim=1, num_outputs=3, kernel=GPy.kern.RBF(1))\r\n # print(icm)\r\n\r\n gp = GPy.models.GPCoregionalizedRegression(X_List, Y_List, kernel=icm)\r\n gp['.*rbf.var'].constrain_fixed(1.) # For this kernel, B.kappa encodes the variance now.\r\n # gp.optimize(messages=True)\r\n gp.optimize(messages=False)\r\n\r\n # FINDING INDIVIDUAL COSTS\r\n for j, output_trajectory_masked in enumerate(list_of_output_trajectories):#\r\n output_mask = np.ma.getmask(output_trajectory_masked)\r\n output_trajectory = np.array(output_trajectory_masked[~output_mask].reshape(3,-1))\r\n times_output_mask = output_mask[0,:]\r\n times_output_masked = np.ma.masked_array(times_array, times_output_mask)\r\n output_times = np.array(times_output_masked[~times_output_mask])\r\n cost_matrix[i,j] = individual_cost_function(gp, output_trajectory, output_times)\r\n\r\n\r\n # ARRAY OF ROW INDICES, ARRRAY OF COLUMN INDICES\r\n # CALL COMBINED COSTS\r\n # INPUT ARRAY[OUTPUT ARRAY NO MASK] = OUTPUT ARRAY[OUTPUT ARRAY NO MASK]\r\n\r\n return" ]
[ "0.66793424", "0.64969844", "0.63846135", "0.6238592", "0.61426294", "0.6089071", "0.60473144", "0.6015712", "0.5973049", "0.5957199", "0.5950292", "0.59151304", "0.58676654", "0.585917", "0.5856333", "0.5838859", "0.5832239", "0.58292663", "0.58258164", "0.58152735", "0.5795236", "0.57874644", "0.5739971", "0.57284147", "0.5716334", "0.5708935", "0.5707861", "0.5697579", "0.5697296", "0.56928647", "0.5662669", "0.5654815", "0.5654586", "0.5645787", "0.56396", "0.5633883", "0.5624533", "0.56195694", "0.56114125", "0.5597305", "0.5588425", "0.55816674", "0.55503136", "0.55442524", "0.5537724", "0.5529982", "0.5526227", "0.5525193", "0.5523028", "0.5521588", "0.55211335", "0.55172473", "0.5508544", "0.5493981", "0.5490223", "0.54825026", "0.5475561", "0.5471474", "0.54699695", "0.5466773", "0.54589754", "0.54508114", "0.5439032", "0.54281545", "0.542415", "0.5423889", "0.5421291", "0.54204196", "0.5415086", "0.5401588", "0.5399849", "0.5392795", "0.5392711", "0.53907436", "0.5389779", "0.5383804", "0.5377555", "0.5376671", "0.537469", "0.537441", "0.5373967", "0.5370115", "0.5368607", "0.53641313", "0.53623426", "0.5356972", "0.5355428", "0.53538966", "0.5353327", "0.5346356", "0.5346027", "0.5340495", "0.5338947", "0.5335403", "0.53341204", "0.5329619", "0.53230876", "0.5312974", "0.5310641", "0.5310049" ]
0.5963495
9
Removes all values of arg from the given string
def pippo(value): return value.replace('BPM', '<abbr title="Banca Popolare di Milano">BPM</abbr>').replace('Rino Snaidero Scientific Foundation', '<a href="http://www.snaiderofoundation.org/">Rino Snaidero Scientific Foundation</a>')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_value(value, arg):\n return value.replace(arg, '')", "def cut_string(value, arg):\n\n return value.replace(arg, '')", "def cut(value, arg):\n return value.replace(arg, '')", "def cut(value, arg):\n return value.replace(arg, '')", "def cut(value,arg):\n return value.replace(arg, '')", "def cut(value, arg):\n return value.replace(arg, '') # we can replace arg with ''. We also need to register it", "def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()", "def cut_str(value, arg):\n\n return value.replace(arg,'')", "def _FindAndRemoveArgWithValue(command_line, argname):\n if argname not in command_line:\n return ''\n location = command_line.index(argname)\n value = command_line[location + 1]\n command_line[location:location + 2] = []\n return value", "def mycut(value, arg):\r\n return value.replace(arg, '')", "def remove(text, *args):\n\n chars = \"\".join(args)\n for char in chars:\n text = text.replace(char, \"\")\n\n return text", "def cut(value,arg):\n return value.replace(arg,'')", "def cut(value,arg):\n return value.replace(arg,'')", "def cut(value,arg):\n return value.replace(arg,'')", "def cut(value,arg):\n return value.replace(arg,'')", "def cut(value,arg):\n return value.replace(arg,'')", "def rep_with_blank(value, arg):\n return value.replace(arg, '')", "def cut(value,arg):\n return cut.replace(arg,\"\")", "def strip_quotes(arg):\n quote_chars = '\"' + \"'\"\n\n if len(arg) > 1 and arg[0] == arg[-1] and arg[0] in quote_chars:\n arg = arg[1:-1]\n return arg", "def strip_quotes(arg):\n quote_chars = '\"' + \"'\"\n\n if len(arg) > 1 and arg[0] == arg[-1] and arg[0] in quote_chars:\n arg = arg[1:-1]\n return arg", "def cut_words(value, arg):\n\treturn value.replace(arg, '')", "def strip_variables(*args):\n return [arg.strip(\" '\\\"\") if arg is not None else arg for arg in args]", "def loope(value,arg):\r\n return value.replace(arg,'')", "def remove(html, *args):\n reobj = re.compile(\"|\".join(args), re.IGNORECASE)\n return reobj.sub(\" \", html)", "def ccut(value,arg):\n return value.replace(arg, '')", "def delete_params(s, *params):\n patt = '(?s)' + '|'.join(\n r'(?<=\\n)' + s + r'\\s*:.+?\\n(?=\\S+|$)' for s in params)\n return re.sub(patt, '', '\\n' + s.strip() + '\\n').strip()", "def _arg_to_empty(name: str) -> str:\n arg = \"{\" + name + \"}\"\n return arg", "def delete_params_s(s, params):\n patt = \"(?s)\" + \"|\".join(\"(?<=\\n)\" + s + \"\\s*:.+?\\n(?=\\S+|$)\" for s in params)\n return re.sub(patt, \"\", \"\\n\" + s.strip() + \"\\n\").strip()", "def anything_but_chars(*args:List[str]) -> str:\n # TODO uniq\n chars = \"\".join(args)\n return f\"[^{chars}]\"", "def strip_action_str(string: str) -> str:", "def clean_command_line(args):\n args = vars(args)\n # solo devuelvo los items que tienen datos en el runstring\n ret = {}\n for item in args:\n if args[item]:\n ret[item] = args[item]\n return ret", "def removeBadAA(mer,badaa=None):\n if badaa is None:\n badaa = BADAA\n if not mer is None:\n return re.sub('[%s]' % badaa, '', mer)\n else:\n return mer", "def replace_all(string, args):\n try:\n string = str(string)\n arg_list = args.split(',')\n\n substring_to_replace = str(arg_list[0])\n replaced_substring_value = str(arg_list[1])\n except (ValueError, TypeError):\n pass\n safe = isinstance(string, SafeData)\n string = string.replace(substring_to_replace, replaced_substring_value)\n if safe and ';' not in (args[0], args[1]):\n return mark_safe(string)\n return string", "def remove_extra_space_from_args(args):\n return {key: value.strip() for (key, value) in args.items() if value and len(value.strip()) > 0}", "def delete_kwargs(s, args=None, kwargs=None):\n if not args and not kwargs:\n return s\n types = []\n if args is not None:\n types.append(r'`?`?\\*%s`?`?' % args)\n if kwargs is not None:\n types.append(r'`?`?\\*\\*%s`?`?' % kwargs)\n return delete_types(s, *types)", "def cutit(value,arg):\n return value.replace(arg, ' replaced text ')", "def handle_strings(x):\r\n x = str(x).upper()\r\n x = ''.join(ch for ch in x if ch not in exclude)\r\n return x", "def _filter_unused_argument_dash_m(self, arguments):\n return [argument for argument in arguments if argument != '-m']", "def remove_letter(letter, strng):", "def cutting(value,arg):\n return value.replace(arg,'working')", "def fix_args(string):\n # Hide default values\n defs = re.compile('<span class=\"sig-paren\">\\(</span>(?P<args>[^\\)]*)<span class=\"sig-paren\">\\)</span>')\n opts = re.compile('<em class=\"sig-param\">(?P<var>[^=<]*)=(?P<val>[^<]*)</em>')\n \n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefargs = ''\n remnargs = remain[match.start(1):match.end(1)]\n optional = opts.search(remnargs)\n count = 0\n while optional:\n prefargs += remnargs[:optional.start(0)]+'<strong>[</strong>'\n prefargs += remnargs[optional.start(0):optional.end(1)]\n prefargs += remnargs[optional.end(2):optional.end(0)]\n remnargs = remnargs[optional.end(0):]\n optional = opts.search(remnargs)\n count += 1\n if count:\n prefargs += '<strong>'+']'*count+'</strong>'\n prefix += prefargs+remnargs\n prefix += remain[match.end(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def parse_args(string):\n return re.findall('[-=][^ ]*', string)", "def remove_elements_from_set(s: set, *args) -> set:\n for _ in args:\n s.remove(_)\n return s", "def _clean(matches):\n # type: (List[str]) -> None\n while True:\n try:\n matches.remove(\"\")\n except ValueError:\n break\n\n while True:\n try:\n matches.remove(\",\")\n except ValueError:\n return", "def processArgument(self, value):\r\n if not isinstance(value, basestring):\r\n return value\r\n\r\n value = self._RE_FIND.subn(self._replaceFind, value)[0]\r\n value = self._RE_ENV.subn(self._replaceEnv, value)[0]\r\n\r\n return value", "def _clean_args(*args):\n newargs = []\n for chk in args:\n if chk is None:\n break\n newargs.append(chk)\n return newargs", "def processArgument(self, value):\n if not isinstance(value, basestring):\n return value\n \n value = self._RE_FIND.subn(self._replaceFind, value)[0]\n value = self._RE_ENV.subn(self._replaceEnv, value)[0]\n \n return value", "def strip_string(input):\n return input.lower().replace(\" \", \"\")", "def keep_params(s, *params):\n patt = '(?s)' + '|'.join(\n r'(?<=\\n)' + s + r'\\s*:.+?\\n(?=\\S+|$)' for s in params)\n return ''.join(re.findall(patt, '\\n' + s.strip() + '\\n')).rstrip()", "def remove_from_string(string, letters):\n output = \"\"\n lookup = set(letters)\n for char in list(string):\n if char in lookup:\n continue\n output+=char\n return output", "def cut_off_rest(arg):\n return arg.split(' : ')[0]", "def stringfilter(func):\n @wraps(func)\n def _dec(*args, **kwargs):\n if args:\n args = list(args)\n args[0] = str(args[0])\n return func(*args, **kwargs)\n\n return _dec", "def remove(string, list_of_unwanted_car, replacement_char=\"_\"):\n new_string = string\n for unwanted_char in list_of_unwanted_car:\n new_string = new_string.replace(unwanted_char, replacement_char)\n return new_string", "def string_strip(string, isql=True, remove_space=True):\n if not string:\n return string\n if isql:\n for regex in isqlsubs:\n string = re.sub(regex, \"\", string)\n for pattern, replacement in self.substitutions:\n string= re.compile(pattern, re.M).sub(replacement, string)\n if remove_space:\n string = space_strip(string)\n return string", "def keepOnly(text, *args):\n\n chars = \"\".join(args)\n new = \"\"\n for char in text:\n if char in chars:\n new += char\n\n return new", "def string_substring_removal_all(string, substring):\n result = string.replace(substring, \"\")\n return result", "def flag(x):\n if x in sys.argv:\n sys.argv.remove(x)\n return True\n else:\n return False", "def rem_str(prelist,names):\n\n for prefix in prelist:\n names=[name.replace(prefix,'') for name in names]\n\n return names", "def remove_space(user_inputs):\r\n return user_inputs.replace(\" \", \"\")", "def remove_unused_args(args, thnn_args):\n def clean_name(name):\n name = name[:name.index('[')] if '[' in name else name\n if name.endswith('_'):\n name = name[:-1]\n return name\n uses = set([clean_name(arg['name']) for arg in thnn_args])\n uses.add('output_mask')\n args = [arg for arg in args if arg['name'] in uses]\n for arg in args:\n if 'default' in arg:\n del arg['default']\n return args", "def _strip_quotes(file_arg):\n return re.sub(\"^[\\'\\\"]|[\\'\\\"]$\", \"\", file_arg)", "def _clean_args(sys_argv, args):\n # print(args.datadir)\n # print( os.path.abspath(os.path.expanduser(sys_argv[0])))\n # if sys_argv[0].startswith(\"_\") or not args.datadir == os.path.abspath(os.path.expanduser(sys_argv[0])):\n # print(\"aa\")\n base = [x for x in sys_argv if\n x.startswith(\"-\") or not args.datadir == os.path.abspath(os.path.expanduser(x))]\n # Remove installer only options we don't pass on\n base = [x for x in base if x not in set([\"--minimize-disk\"])]\n if \"--nodata\" in base:\n base.remove(\"--nodata\")\n else:\n base.append(\"--data\")\n return base", "def delete_kwargs_s(cls, s, args=None, kwargs=None):\n if not args and not kwargs:\n return s\n types = []\n if args is not None:\n types.append(\"`?`?\\*%s`?`?\" % args)\n if kwargs is not None:\n types.append(\"`?`?\\*\\*%s`?`?\" % kwargs)\n return cls.delete_types_s(s, types)", "def keep_params_s(s, params):\n patt = \"(?s)\" + \"|\".join(\"(?<=\\n)\" + s + \"\\s*:.+?\\n(?=\\S+|$)\" for s in params)\n return \"\".join(re.findall(patt, \"\\n\" + s.strip() + \"\\n\")).rstrip()", "def extract_option(prefix, args):\n if prefix in ('#',):\n unique = False\n else:\n unique = True\n value = [a for a in args if a.startswith(prefix)]\n if len(value) == 1:\n value = value[0]\n args.remove(value)\n value = value[1:]\n if not unique:\n return [value]\n return value\n elif len(value) > 1 and unique:\n print('More than one %s found in args' % prefix)\n sys.exit(1)\n elif len(value) > 1 and not unique:\n for v in value:\n if v in args:\n args.remove(v)\n return [v[1:] for v in value]\n return None", "def clear_empty_values(args):\n result = {}\n for param in args:\n if args[param] is not None:\n result[param] = args[param]\n return result", "def __StringStrip(self, string, isql=True):\n if not string:\n return string\n\n if isql:\n for regex in self.__isqlsubs:\n string= re.sub(regex, \"\", string)\n\n for pattern, replacement in self.substitutions:\n string= re.compile(pattern.encode('UTF8'), re.M).sub(replacement.encode('UTF8'), string)\n \n return self.__SpaceStrip(string)", "def _restricted_hashtags(val: str):\n try:\n val = str(val).lower()\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} could not be parsed to a string\")\n\n val = re.sub(_REGEX_CHAR_MATCHER_HASHTAGS, \"\", val)\n return val", "def rem_str(prelist,names):\n \n for prefix in prelist:\n names=[name.replace(prefix,'') for name in names]\n \n return names", "def _remove_parameter(value: Optional[str]) -> Optional[str]:\n if value is None:\n return None\n\n return value.split(\";\")[0]", "def daqStringMod(self, arg):\n\t\tself.stuff = []\n\t\tfor i in arg:\n\t\t\tself.stuff.append(\"\\'\" + i + \"\\'\")\n\t\treturn self.stuff", "def input_args():\n return filter(lambda x: len(x) > 0,\n map(lambda x: x.strip(), sys.argv[1:]))", "def removeMaskString(maskedString):\n global masked_value_set\n # Since we cannot remove an item from a set during itteration over\n # the said set, we only mark a flag and if the flag is set to True\n # we remove the string from the set.\n found = False\n for item in masked_value_set:\n if item == maskedString:\n found = True\n if found:\n masked_value_set.remove(maskedString)", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def clean_param(param):\n if '<' in param:\n param = param.replace(\"<\", \"\")\n if '>' in param:\n param = param.replace(\">\", \"\")\n return param", "def remove(data, pattern):\n return [''.join(filter(pattern, str)) for str in data]", "def clean(val):\n\n val = re.sub(r'/s+', r'/s', val)\n return val.strip()", "def _str_args(self):\n return \"\"", "def preprocess(string):\n return regex.sub('', string)", "def _sanitize(self, opts_list):\n for opt in opts_list:\n if len(opt.strip()) == 0:\n opts_list.remove(opt)\n return opts_list", "def clean_str(data, remove=''):\n return data.translate(None, remove)", "def main(args):\n\n print(main.__doc__)\n s = input()\n print(remove_pontuacao2(s))\n return EX_OK", "def str_remove(string: str, index: int) -> str: # _3 [✅]\n if len(string) == 0:\n raise ValueError # put the msg inside here - refer to the doc \n else:\n return string.replace(string[index], '')", "def strip_null(arg,null=None):\n if null is None:\n null = NULL\n\n if type(arg) is types.ListType:\n return [i for i in arg if i not in null]\n elif type(arg) is types.TupleType:\n return tuple([i for i in arg if i not in null])\n elif type(arg) is type(set()):\n return arg.difference(set(null))\n elif type(arg) is types.DictType:\n return {key:value for key,value in arg.items() if value not in null}\n\n return arg", "def check_flag ( params, string, delete ) :\n i = 0\n value = None\n size = len(string)\n for line in params :\n tmp = line.find(string)\n if tmp != -1 :\n start = tmp + size\n sel_string = line[start:]\n if delete :\n params.pop(i)\n value = sel_string\n i += 1\n return value", "def varStringMod(self, arg):\n\t\targ[0] = \"'\" + arg[0] + \"'\"\n\t\treturn arg", "def ungrist (value):\n assert is_iterable_typed(value, basestring) or isinstance(value, basestring)\n def ungrist_one (value):\n stripped = __re_grist_content.match (value)\n if not stripped:\n raise BaseException (\"in ungrist: '%s' is not of the form <.*>\" % value)\n\n return stripped.group (1)\n\n if isinstance (value, str):\n return ungrist_one (value)\n else:\n return [ ungrist_one (v) for v in value ]", "def remove_extra(name):\n return re.sub(r\"-[\\S\\s]*\", \"\", re.sub(r\"\\([\\w\\W]*\\)\", \"\", name))", "def _clean_salt_variables(params, variable_prefix=\"__\"):\n list(list(map(params.pop, [k for k in params if k.startswith(variable_prefix)])))\n return params", "def stripname(name, stripnums = True):\n\tfor pattern in removestuffregex:\n\t\tname = re.sub(pattern, \"\", name)\n\tif stripnums:\n\t\tname = re.sub(numberregex, \"\", name)\n\tfor pattern in removestuff:\n\t\tname = name.replace(pattern, \"\")\n\treturn name", "def remove_tag(args):", "def replace_in_string(s, args_dict):\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n return s", "def strip_str(str: str) -> str:\r\n return ''.join(re.findall(ARTIST_MATCH_REGEX, str)).lower()", "def string_cleanup(s, garbage=\":,-()&\"):\n s_new = ''\n for x in s:\n if x not in garbage:\n s_new += x\n\n return s_new", "def _normalize_argument(self, value):\n return storepass.utils.normalize_empty_to_none(value)", "def clean_str(\n s: str,\n l: list,\n r: list,\n ) -> str: \n\n # Loop through every substring in the list\n for i in range(0, len(l)):\n\n # Remove all occurrences of the substring\n s = s.replace(l[i], r[i])\n\n return s", "def just_replace_strings_with_nothing(self, artist: str) -> str:\n data = re.sub(' ', '', artist)\n\n return data", "def removeFactor(self, string: str, string2: str) -> _AbstractKnobBuilder__T:\n ...", "def delete_whitespaces(str):\n global legal_white_spaces\n\n try:\n str_copy = str\n for i in legal_white_spaces:\n str_copy = str_copy.replace(i, '')\n return str_copy\n except Exception as e:\n print(e)\n return None", "def _sig_without(sig: inspect.Signature, param: Union[int, str]) -> inspect.Signature:\n if isinstance(param, int):\n params = list(sig.parameters.values())\n params.pop(param)\n else:\n params = [p for name, p in sig.parameters.items() if name != param]\n return sig.replace(parameters=params)", "def strip(self, value):\n raise NotImplementedError" ]
[ "0.7558784", "0.7307502", "0.70154285", "0.70154285", "0.69781893", "0.6924862", "0.676274", "0.6756168", "0.67021793", "0.6586963", "0.64901525", "0.6459049", "0.6459049", "0.6459049", "0.6459049", "0.6459049", "0.6406707", "0.6270779", "0.62498343", "0.62498343", "0.6242943", "0.6157161", "0.6093735", "0.6090899", "0.60143155", "0.599144", "0.59714895", "0.5939507", "0.58549607", "0.57702297", "0.57556105", "0.57493126", "0.5727337", "0.5708241", "0.570733", "0.57026374", "0.5702222", "0.5687957", "0.5676118", "0.5665181", "0.56260765", "0.5606196", "0.5588027", "0.55574137", "0.55148524", "0.55087835", "0.5489128", "0.5478668", "0.5477356", "0.54565555", "0.54540455", "0.545357", "0.54470783", "0.54415494", "0.5440428", "0.54310083", "0.54076576", "0.5404844", "0.5390204", "0.53882104", "0.5376783", "0.5373462", "0.53718746", "0.5369363", "0.5367794", "0.5364913", "0.5359345", "0.53555626", "0.53439116", "0.53333384", "0.5329269", "0.53284", "0.5326372", "0.53178304", "0.5309755", "0.53077507", "0.5303094", "0.529814", "0.5295462", "0.5283628", "0.52742714", "0.52738684", "0.52719927", "0.526908", "0.5265893", "0.52529055", "0.5249638", "0.5246806", "0.5235596", "0.52352244", "0.5227862", "0.5227425", "0.5226847", "0.52208674", "0.5213013", "0.52116704", "0.5203476", "0.51891637", "0.5184083", "0.5172059", "0.5171934" ]
0.0
-1
Monta uma API flask e registra seus blueprints.
def setup(): LOG.info("Creating API.") api = Flask(__name__) LOG.info("Registering blueprints.") api.register_blueprint(health_check_blueprint.setup()) LOG.info("Registering error handlers.") api.register_error_handler(Exception, default_error_handler) LOG.info("Setting up config variables.") api.config['PROPAGATE_EXCEPTIONS'] = True return api
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_blueprints(self):\n # Local import due to flask/blueprint circular imports.\n from mmapi.views import api_bp\n self.app.register_blueprint(api_bp, url_prefix='/api')", "def register_blueprints_on_app(app):\n app.register_blueprint(views.main_pages)\n app.register_blueprint(views.main_api, url_prefix='/api')", "def register_blueprints():\n from app.routes import blog, client\n blueprints = [blog, client]\n\n for bp in blueprints:\n app.register_blueprint(bp)", "def init_app(app):\n api.add_namespace(ns)\n app.register_blueprint(bp, url_prefix='/api/v1')", "def register_blueprints(api):\n for module in MODULES:\n api.register_blueprint(module.blp)", "def register_blueprints(app):\n @app.route('/')\n def hello():\n return '<html><body>{{ cookiecutter.project_name }} - Hello World</body></html>'\n\n @app.route('/healthz')\n def healthz():\n {% if cookiecutter.use_sqlalchemy == 'True' %}\n \"\"\"\n Verify the DB is there.\n\n :return: 200 if all good, otherwise it raises an exception which returns 500\n \"\"\"\n {{cookiecutter.app_name}}.extensions.db.session.query('1').from_statement('SELECT 1').all()\n {% endif %}\n return '', httplib.OK\n\n app.register_blueprint({{ cookiecutter.app_name }}.api.v0_1.get_blueprint())", "def register_blueprints(app):\n app.register_blueprint(general.general)\n app.register_blueprint(validate.validate, url_prefix='')\n\n # All done!\n app.logger.info(\"Blueprints registered\")", "def blueprints_fabrics(app, blueprints):\n\n for blueprint in blueprints:\n app.register_blueprint(blueprint)", "def generate_blueprints(self):\n user_blueprint = self.manager.create_api_blueprint(\n User,\n methods=['GET', 'POST', 'PATCH', 'DELETE'],\n primary_key='username',\n include_columns=['name', 'username', 'tags', 'posts'],\n preprocessors=dict(\n GET_SINGLE=[check_auth],\n GET_MANY=[check_auth],\n POST=[user_encrypt_password],\n PATCH_SINGLE=[check_auth, user_encrypt_password, check_owner],\n DELETE=[check_auth, check_owner]\n )\n )\n self.app.register_blueprint(user_blueprint)\n\n tag_blueprint = self.manager.create_api_blueprint(\n Tag,\n methods=['GET', 'POST', 'PATCH', 'DELETE'],\n preprocessors=dict(\n GET_SINGLE=[check_auth],\n GET_MANY=[check_auth],\n POST=[check_auth, add_owner_id, check_owner],\n PATCH_SINGLE=[check_auth, check_owner],\n DELETE=[check_auth, check_owner]\n )\n )\n self.app.register_blueprint(tag_blueprint)\n\n post_blueprint = self.manager.create_api_blueprint(\n Post,\n methods=['GET', 'POST', 'PATCH', 'DELETE'],\n preprocessors=dict(\n GET_SINGLE=[check_auth],\n GET_MANY=[check_auth],\n POST=[check_auth, check_owner, add_owner_id],\n PATCH_SINGLE=[check_auth, check_owner],\n DELETE=[check_auth, check_owner]\n )\n\n )\n self.app.register_blueprint(post_blueprint)\n\n url_blueprint = self.manager.create_api_blueprint(\n URL,\n methods=['GET', 'POST'],\n preprocessors=dict(\n GET_SINGLE=[check_auth],\n GET_MANY=[check_auth],\n POST=[check_auth]\n )\n )\n self.app.register_blueprint(url_blueprint)\n\n\n addressbook_blueprint = self.manager.create_api_blueprint(\n AddressBook,\n methods=['GET', 'POST'],\n preprocessors=dict(\n GET_SINGLE=[check_auth],\n GET_MANY=[check_auth],\n POST=[check_auth, check_owner, add_owner_id],\n PATCH_SINGLE=[check_auth, check_owner],\n DELETE=[check_auth, check_owner]\n )\n )\n self.app.register_blueprint(addressbook_blueprint)", "def register_routes(self):\n @inlineCallbacks\n def registered(response):\n if response.code != 200:\n text = yield response.text()\n self._env.logger.error('{} {}'.format(response.code, text))\n\n try:\n api_register = '{}://{}:{}/api/1.0.0/register'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port\n )\n remote_ms = self._env.get('remote_ms', None)\n\n for path in self._env.swagger.paths:\n uri = self._env.swagger.base + path.split('{')[0].rstrip('/')\n if remote_ms:\n route = {\n 'protocol': 'https',\n 'host': remote_ms,\n 'port': 443,\n }\n else:\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n }\n route = dict(route, **{'uri': uri, 'key': self._key})\n #self._env.logger.info('Route> {}'.format(str(route)))\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n swagger_paths = ['/ui/css', '/ui/lib', '/ui/images', '/swagger.json']\n ui = '/' + self._env.get('swagger_ui', 'ui')+'/'\n swagger_paths.append(ui)\n\n for path in swagger_paths:\n uri = self._env.swagger.base\n if len(uri):\n if uri[-1] == '/':\n uri = uri[:-1]\n uri += path\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n return True\n except Exception as e:\n self._env.logger.error('error registering routes \"{}\"'.format(str(e)))", "def register_blueprints(app):\n blueprints = {INDEX, DASHBOARD, COMMENT_SECTION}\n\n for blueprint in blueprints:\n app.register_blueprint(blueprint)", "def register_blueprints(app):\n from .main import main_blueprint\n app.register_blueprint(main_blueprint)\n\n from .submissions import submissions_blueprint\n app.register_blueprint(submissions_blueprint, url_prefix='/submissions')\n from .revisions import revisions_blueprint\n app.register_blueprint(revisions_blueprint, url_prefix='/revisions')", "def register_blueprints(app):\n app.register_blueprint(user)\n app.register_blueprint(messages)\n app.register_blueprint(auth, url_prefix='/auth')\n app.register_blueprint(tasks)\n app.register_blueprint(core)\n app.register_blueprint(errors)", "def blueprints(app):\n for blueprint in FLASK_BLUEPRINTS:\n app.register_blueprint(blueprint)\n\n return None", "def register_blueprints(app):\n app.register_blueprint(hello_world.bp_config.bp)", "def create_app():\n app = Flask(__name__)\n\n # app.secret_key = os.urandom(12)\n # jwt_manager = JWTManager()\n # jwt_manager.init_app(app)\n\n CORS(app)\n\n app.register_blueprint(redflag_blueprint, url_prefix=\"/api/v1/red-flags\")\n app.register_blueprint(user_blueprint, url_prefix=\"/api/v1/users\")\n app.register_blueprint(intervention_blueprint, url_prefix=\"/api/v1/interventions\")\n app.register_blueprint(auth_blueprint, url_prefix=\"/api/v1/auth\")\n app.register_blueprint(index_blueprint, url_prefix=\"/api/v1\")\n app.register_blueprint(base_url_blueprint, url_prefix=\"/\")\n app.register_blueprint(media_blueprint, url_prefix=\"/api/v1/files/uploads\")\n # app.register_blueprint(media_edit_blueprint, url_prefix=\"/api/v1/\")\n\n app.register_error_handler(400, bad_request_error)\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(405, method_not_allowed)\n app.register_error_handler(500, internal_server_error)\n\n swagger_ui_blueprint = get_swaggerui_blueprint(SWAGGER_UI_URL, API_URL)\n app.register_blueprint(swagger_ui_blueprint, url_prefix=SWAGGER_UI_URL)\n\n return app", "def build_routes(app):\n app.register_blueprint(workflow_plans_blueprint)\n app.register_blueprint(cache_blueprint)\n app.register_blueprint(config_blueprint)\n app.register_blueprint(dataset_blueprint)\n app.register_blueprint(graph_blueprint)\n app.register_blueprint(jobs_blueprint)\n app.register_blueprint(project_blueprint)\n app.register_blueprint(templates_blueprint)\n app.register_blueprint(version_blueprint)\n app.register_blueprint(apispec_blueprint)\n app.register_blueprint(versions_list_blueprint)", "def register_blueprints(app):\n app.register_blueprint(vs_association, url_prefix='/vs_association')", "def create_routes(api: Api):\n api.add_resource(SignUpApi, '/user/signup/')\n api.add_resource(LoginApi, '/user/login/')\n\n api.add_resource(UsersApi, '/users/')\n\n api.add_resource(CafeteriasCreationAPI, '/createcafeteria/')\n api.add_resource(CreateItemsAPI, '/createcafeteriaitems/')", "def get_blueprint():\n return REQUEST_API", "def register_blueprints(app, blueprints, url_prefix):\n for blueprint in blueprints:\n app.register_blueprint(blueprint, url_prefix=url_prefix)", "def register_blueprints(app, blueprints):\n for blueprint in blueprints:\n app.register_blueprint(blueprint)\n return None", "def register_pc_blueprints(app):\n blueprints = [\n registration_page,\n spectrum_inquiry_page,\n grant_page,\n heartbeat_page,\n relinquishment_page,\n deregistration_page,\n ]\n register_blueprints(app, blueprints, app.config['API_PREFIX'])", "def create_app(config_name):\n app = Flask(__name__)\n app.register_blueprint(v1, url_prefix=\"/api/v1/\")\n return app", "def reg_bps(app):\n from . import categories_bp, items_bp, users_bp\n\n app.register_blueprint(categories_bp, url_prefix='/categories')\n app.register_blueprint(items_bp, url_prefix='/items')\n app.register_blueprint(users_bp)", "def configure(app):\n api.add_resource(Event, '/event/')\n api.add_resource(EventItem, '/event/<event_id>')\n app.register_blueprint(bp_restapi)", "def create_app():\r\n app = Flask(__name__, instance_relative_config=False)\r\n app.config.from_object('config.Config') \r\n \r\n api = Api(app) \r\n \r\n with app.app_context():\r\n from .flights import TicketRoute, FlightRoute\r\n api.add_resource(TicketRoute,\"/api/tickets\")\r\n api.add_resource(FlightRoute,\"/api/flights\")\r\n \r\n \r\n return app", "def init_app(app):\n app.register_blueprint(index_bl)\n app.register_blueprint(main_bl, url_prefix=\"/main\")\n app.register_blueprint(map_bl, url_prefix=\"/map\")\n app.register_blueprint(login_bl, url_prefix=\"/login\")\n app.register_blueprint(prof_bl, url_prefix=\"/profile\")\n app.register_blueprint(average_bl, url_prefix=\"/average\")", "def register_blueprints(self):\n for module in copy.copy(sys.modules).values():\n for blueprint in module_functionalities(module, 'MARA_FLASK_BLUEPRINTS', flask.Blueprint):\n self.register_blueprint(blueprint)", "def create_app(config_name):\n\n app = Flask(__name__)\n api = Api(app)\n CORS(app)\n\n app.config.from_object(config.configurations[config_name])\n \"\"\"This ensures that the urls /login and /login/ are recognized as same\n without considering the trailing slash \"\"\"\n app.url_map.strict_slashes = False\n\n with app.app_context():\n from app.resources.products import MenuResource\n from app.resources.orders import OrderResource\n from app.resources.addresses import AddressResource\n from app.resources.users import LoginResource, SignUpResource\n api.add_resource(MenuResource, \"/api/v1/menu\", \"/api/v1/menu/<int:product_id>\")\n api.add_resource(OrderResource, \"/api/v1/orders\",\n \"/api/v1/orders/<int:order_id>\")\n api.add_resource(AddressResource, \"/api/v1/addresses\",\n \"/api/v1/addresses/<int:address_id>\")\n api.add_resource(LoginResource, \"/api/v1/auth/login\")\n api.add_resource(SignUpResource, \"/api/v1/auth/signup\")\n\n @app.errorhandler(404)\n def error_404(e):\n return jsonify({\"code\": \"404\", \"message\": \"Not found\"}), 200\n\n @app.errorhandler(500)\n def error_500(e):\n return jsonify(\n {\"code\": \"503\", \"message\": \"We have some trouble\"\n \"processing your request\"\n \" please try again later\"}), 500\n\n @app.errorhandler(405)\n def error_405(e):\n return jsonify({\"code\": \"405\", \"message\": \"We dont allow\"\n \" the request method\",\n \"ok\": False}), 200\n\n @app.route(\"/\")\n def home():\n return render_template(\"index.html\")\n\n return app", "def configure_blueprints(app):\n\n for blueprint in _blueprints:\n app.register_blueprint(blueprint)", "def initialize_blueprints(app, *blueprints):\n for blueprint in blueprints:\n app.register_blueprint(blueprint)", "def init_app(app):\n\n def register(path, resource):\n app.add_url_rule(path, view_func=resource.as_view(resource.__name__))\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n docs.register(resource, endpoint=resource.__name__)\n\n docs = FlaskApiSpec(app)\n register(\"/organisms\", Organisms)\n register(\"/organisms/<int:id>\", Organism)\n register(\"/strains\", Strains)\n register(\"/strains/<int:id>\", Strain)\n register(\"/experiments\", Experiments)\n register(\"/experiments/<int:id>\", Experiment)\n register(\"/experiments/<int:id>/data\", ExperimentData)\n register(\"/media\", Media)\n register(\"/media/<int:id>\", Medium)\n register(\"/media/compounds\", MediumCompounds)\n register(\"/media/compounds/<int:id>\", MediumCompound)\n register(\"/conditions\", Conditions)\n register(\"/conditions/<int:id>\", Condition)\n register(\"/conditions/<int:id>/data\", ConditionData)\n register(\"/samples\", Samples)\n register(\"/samples/<int:id>\", Sample)\n register(\"/fluxomics\", Fluxomics)\n register(\"/fluxomics/batch\", FluxomicsBatch)\n register(\"/fluxomics/<int:id>\", Fluxomic)\n register(\"/metabolomics\", Metabolomics)\n register(\"/metabolomics/batch\", MetabolomicsBatch)\n register(\"/metabolomics/<int:id>\", Metabolomic)\n register(\"/proteomics\", Proteomics)\n register(\"/proteomics/batch\", ProteomicsBatch)\n register(\"/proteomics/<int:id>\", Proteomic)\n register(\"/uptake-secretion-rates\", UptakeSecretionRates)\n register(\"/uptake-secretion-rates/<int:id>\", UptakeSecretionRate)\n register(\"/molar-yields\", MolarYields)\n register(\"/molar-yields/<int:id>\", MolarYield)\n register(\"/growth-rates\", GrowthRates)\n register(\"/growth-rates/<int:id>\", GrowthRate)", "def create_app(config_name):\n from webapi.models import User, Event, Rsvp\n\n api = Blueprint('api', __name__)\n app = FlaskAPI(__name__, instance_relative_config=True)\n CORS(app)\n Swagger(app, template_file=\"docs.yml\")\n\n app.config.from_pyfile('config.py')\n app.config.from_object(app_config[config_name])\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n mail = Mail(app)\n db.init_app(app)\n\n s = URLSafeTimedSerializer(app.config['SECRET_KEY'])\n with app.app_context():\n db.create_all()\n\n @app.errorhandler(404)\n def not_found_error(error):\n \"\"\"404 error handler.\"\"\"\n return jsonify(\n {\"error\": \"Page not found. Make sure you typed in the route correctly.\"}), 404\n\n @app.errorhandler(500)\n def internal_server_error(error):\n \"\"\"500 error handler.\"\"\"\n return jsonify({\"error\": \"Internal Server Error\"}), 500\n\n @app.route('/', methods=['GET'])\n def index():\n \"\"\"Render docs\"\"\"\n return redirect(\"/apidocs\")\n\n def token_required(f):\n \"\"\"Accept function with token\"\"\"\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"Check if token is genuine\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return jsonify({\"message\":\"Token is missing!\"}), 401\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({\"message\":\"Token is invalid\"}), 401\n return f(current_user, *args, **kwargs)\n\n return decorated\n\n @api.route('/auth/register', methods=['POST'])\n def register():\n \"\"\"Add new users to data\"\"\"\n result = register_helper(User)\n return jsonify(result[0]), result[1]\n\n @api.route('/auth/login', methods=['POST'])\n def login():\n \"\"\"Login registered users\"\"\"\n result = login_helper(User, app, db)\n return jsonify(result[0]), result[1]\n\n @api.route('/auth/logout', methods=['POST'])\n @token_required\n def logout(current_user):\n \"\"\"Log out users\"\"\"\n result = logout_helper(current_user, db)\n return jsonify(result[0]), result[1]\n\n # @api.route('/auth/reset-password', methods=['POST'])\n # def reset_password():\n # \"\"\"Reset users password\"\"\"\n # result = reset_password_helper(current_user, db)\n # return result[0], result[1]\n\n @api.route('/search', methods=['GET'])\n def search():\n \"\"\"Implement search\"\"\"\n result = search_helper(Event)\n return jsonify(result[0]), result[1]\n\n @api.route('/emails', methods=['GET','POST'])\n def handle_emails():\n \"\"\"Handle functionality around email sending\"\"\"\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201\n\n @api.route('/confirm_email/<option>/<token>', methods=['POST'])\n def confirm_email(option, token):\n try:\n email = s.loads(token, salt='email-confirm', max_age=3600)\n if option == \"reset-password\":\n result = reset_password_helper(email, User, db)\n return jsonify(result[0]), result[1]\n elif option == \"confirm-account\":\n return jsonify (confirm_account_helper(email, db)[0]), confirm_account_helper(email, db)[1]\n except SignatureExpired:\n return jsonify({\"message\":\"The token is expired!\"}), 409\n\n @api.route('/events', methods=['GET'])\n def view_events():\n \"\"\"View a list of events\"\"\"\n result = get_events_helper(Event)\n return jsonify(result[0]), result[1]\n\n @api.route('/events/<username>/<eventname>', methods=['GET'])\n def get_single_event(username, eventname):\n result = get_single_event_helper(username, eventname, Event)\n return jsonify(result[0]), result[1]\n\n @api.route('/events', methods=['POST'])\n @token_required\n def create_event(current_user):\n \"\"\"Add events\"\"\"\n result = create_events_helper(current_user, Event)\n return jsonify(result[0]), result[1]\n\n @api.route('/events/<user_public_id>', methods=['GET'])\n @token_required\n def online_user_events(current_user, user_public_id):\n \"\"\"Online users can view their events\"\"\"\n result = online_user_events_helper(current_user, user_public_id, Event)\n return jsonify(result[0]), result[1]\n\n @api.route('/events/<eventname>', methods=['PUT', 'DELETE', 'GET'])\n @token_required\n def event_update(current_user, eventname):\n \"\"\"Edit existing events\"\"\"\n result = event_update_delete_helper(current_user, eventname, db, Event)\n return jsonify(result[0]), result[1]\n\n @api.route('/events/<eventname>/rsvp', methods=['POST', 'GET', 'DELETE'])\n @token_required\n def rsvps(current_user, eventname):\n \"\"\"Send RSVPs to existing events\"\"\"\n result = rsvps_helper(current_user, eventname, Rsvp, Event)\n return jsonify(result[0]), result[1]\n\n app.register_blueprint(api, url_prefix='/api/v2')\n return app", "def index():\n definition = {\n \"swagger\": \"2.0\",\n \"info\": {\n \"title\": flask.current_app.config.get(\"APPNAME\", \"Not specified\"),\n \"version\": flask.current_app.config.get(\"VERSION\", \"Not specified\"),\n },\n \"host\": request.host,\n \"schemes\": [\"http\"],\n \"consumes\": [\"application/json\"],\n \"produces\": [\"application/json\"],\n \"definitions\": registry._definitions,\n \"paths\": {}\n }\n\n rules = list(flask.current_app.url_map.iter_rules())\n for r in sorted(rules, key=operator.attrgetter('rule')):\n if r.rule.startswith('/static'):\n continue\n if r.endpoint in registry._skipped:\n continue\n\n rule = re.sub(r\"<(?:[_a-zA-Z0-9\\(\\)]+:)?([a-zA-Z0-9_]+)>\", r\"{\\1}\", r.rule)\n if rule not in definition['paths']:\n definition['paths'][rule] = {}\n\n methods_handled = r.methods & REST_METHODS\n handler = flask.current_app.view_functions.get(r.endpoint)\n doc = handler.func_doc\n\n if len(methods_handled) == 1:\n method = methods_handled.pop().lower()\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule][method] = validated\n except Exception:\n pass\n\n else:\n # We need to handle multi-method docstrings differently\n # because the documentation needs to define both, and\n # it's a higher level of the swagger hierarchy\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule].update(validated)\n except Exception:\n definition['paths'][rule] = {}\n\n resp = flask.make_response(\n json.dumps(definition, for_json=True))\n resp.headers.set(\"Content-type\", 'application/json')\n resp.headers.set(\"Access-Control-Allow-Origin\", \"*\")\n return resp", "def initialize_routes(api):\n api.add_resource(WatchlistsApi, '/api/watchlists')\n api.add_resource(WatchlistApi, '/api/watchlist/<id>')\n api.add_resource(RegisterUserApi, '/api/auth/register')\n api.add_resource(LoginUserApi, '/api/auth/login')\n api.add_resource(ResetPassword, '/api/auth/reset')\n api.add_resource(ResetFogottenPassword, '/api/auth/reset/password')\n api.add_resource(ForgotPassword, '/api/auth/forgot')\n api.add_resource(ForgotPasswordReset, '/reset/password/<token>')\n api.add_resource(Home, '/')\n api.add_resource(Logout, '/logout')\n api.add_resource(Dashboard, '/dashboard')\n api.add_resource(DashboardSearch, '/dashboard/search')\n api.add_resource(SearchMovies, '/search/movies/<title>')\n api.add_resource(SearchMovieDetails, '/search/movie/details/<id>')\n api.add_resource(SearchTvShows, '/search/shows/<title>')\n api.add_resource(SearchShowDetails, '/search/show/details/<id>')\n api.add_resource(SearchTrendingMovies, '/search/trending/movies')\n api.add_resource(Recommend, '/recommend')", "def make_app():\n app = flask.Flask('sahara.api')\n\n @app.route('/', methods=['GET'])\n def version_list():\n context.set_ctx(None)\n return api_utils.render({\n \"versions\": [\n {\"id\": \"v1.0\", \"status\": \"CURRENT\"}\n ]\n })\n\n @app.teardown_request\n def teardown_request(_ex=None):\n context.set_ctx(None)\n\n app.register_blueprint(api_v10.rest, url_prefix='/v1.0')\n app.register_blueprint(api_v10.rest, url_prefix='/v1.1')\n app.register_blueprint(api_v11.rest, url_prefix='/v1.1')\n\n def make_json_error(ex):\n status_code = (ex.code\n if isinstance(ex, werkzeug_exceptions.HTTPException)\n else 500)\n description = (ex.description\n if isinstance(ex, werkzeug_exceptions.HTTPException)\n else str(ex))\n return api_utils.render({'error': status_code,\n 'error_message': description},\n status=status_code)\n\n for code in six.iterkeys(werkzeug_exceptions.default_exceptions):\n app.error_handler_spec[None][code] = make_json_error\n\n if CONF.debug and not CONF.log_exchange:\n LOG.debug('Logging of request/response exchange could be enabled using'\n ' flag --log-exchange')\n\n if CONF.log_exchange:\n app.wsgi_app = log_exchange.LogExchange.factory(CONF)(app.wsgi_app)\n\n app.wsgi_app = auth_valid.wrap(app.wsgi_app)\n app.wsgi_app = acl.wrap(app.wsgi_app)\n\n return app", "def initialize_api(app, api):\n api.init_app(app=app) # Initialize api first\n _resources = getattr(app, \"api_registry\", None)\n if _resources and isinstance(_resources, (list, tuple,)):\n for cls, args, kwargs in _resources:\n api.add_resource(cls, *args, **kwargs)", "def register_routes(self, api):\n # Device Registration\n api.add_resource(controllers.UserDeviceRegistration, '/device-registration')", "def create_app():\n\n # create and configure the app\n app = Flask(__name__)\n app.config.from_object('DiamondCaseWeb.config.DevelopmentConfig')\n\n # Register SCSS assets\n assets.register_assets(app)\n\n # Set up mail\n mail = dc_mail.setup_mail(app)\n\n # Setup Database\n from DiamondCaseWeb.model.product import Product, ProductCategory, LocationProduct\n from DiamondCaseWeb.model.static import HelpArticle, HomepageFeature\n from DiamondCaseWeb.model.user import Role, User\n db.init_app(app)\n\n\n from DiamondCaseWeb.view_blueprint import marketing as marketing_bp\n from DiamondCaseWeb.view_blueprint import product as product_bp\n from DiamondCaseWeb.view_blueprint import shop as shop_bp\n from DiamondCaseWeb.view_blueprint import static as static_bp\n from DiamondCaseWeb.view_blueprint import user as user_bp\n\n # Register view blueprints\n app.register_blueprint(marketing_bp.blueprint)\n app.register_blueprint(product_bp.blueprint)\n app.register_blueprint(shop_bp.blueprint)\n app.register_blueprint(static_bp.blueprint)\n app.register_blueprint(user_bp.blueprint)\n\n\n from DiamondCaseWeb.api import help_article as help_article_api_bp\n from DiamondCaseWeb.api import homepage_feature as homepage_feature_api_bp\n from DiamondCaseWeb.api import location as location_api_bp\n from DiamondCaseWeb.api import location_product as location_product_api_bp\n from DiamondCaseWeb.api import product as product_api_bp\n from DiamondCaseWeb.api import product_category as product_category_api_bp\n from DiamondCaseWeb.api import role as role_api_bp\n from DiamondCaseWeb.api import user as user_api_bp\n\n # Register api blueprints\n app.register_blueprint(help_article_api_bp.blueprint)\n app.register_blueprint(homepage_feature_api_bp.blueprint)\n app.register_blueprint(location_api_bp.blueprint)\n app.register_blueprint(location_product_api_bp.blueprint)\n app.register_blueprint(product_api_bp.blueprint)\n app.register_blueprint(product_category_api_bp.blueprint)\n app.register_blueprint(role_api_bp.blueprint)\n app.register_blueprint(user_api_bp.blueprint)\n\n # Admin Dashboard\n @app.route('/admin')\n def admin_backend():\n return app.send_static_file(\"back_office/index.html\")\n\n\n return app", "def register_to_blueprint(blueprint, route, methods_to_apifunc):\n methods_to_viewfunc = {}\n for method in methods_to_apifunc:\n methods_to_viewfunc[method] = methods_to_apifunc[method].get_viewfunc()\n\n if 'HEAD' not in methods_to_viewfunc and 'GET' in methods_to_viewfunc:\n methods_to_viewfunc['HEAD'] = methods_to_viewfunc['GET']\n\n blueprint.add_url_rule(\n \"/%s\" % route,\n endpoint=route,\n view_func=error_handler(route_multiplexer(methods_to_viewfunc)),\n methods=list(methods_to_viewfunc.keys()))", "def create_app() -> Flask:\n\n flask_app = Flask('extraction_api', template_folder='./template')\n flask_app.secret_key = \"super secret key\"\n # import blueprints\n flask_app.register_blueprint(extraction_app)\n\n return flask_app", "def make_blueprint(con):\n\n app = Blueprint(\"question\", __name__)\n \n metric_pooled = PooledTripMetrics(con)\n metric_disabilities = DisabilitiesMetrics(con)\n metric_tt = TaxiTripQuestions(con)\n metric_oHareRideshare = OHareRideshareQuestion(con)\n sidewalk_search = SidewalkCafePermitSearch(con)\n \n \n @app.route(\"/question/hello\")\n def hello():\n return jsonify({ \"message\": \"Welcome to questions!\" })\n\n @app.route(\"/question/pooled_trips\")\n def pooled_trips():\n before_covid = (\"2019-02-01\", \"2020-03-02\")\n since_covid = (\"2020-03-02\", \"2021-04-01\")\n rows = metric_pooled.pooled_trip_comparison(before_covid, since_covid)\n metrics = metric_pooled.metrics_by_area(rows)\n return jsonify({ \"metrics\": metrics })\n \n @app.route(\"/question/disabilities\")\n def disabilities():\n rideshare_metrics = metric_disabilities.disabilities_rideshare_metrics()\n cta_area_metrics = metric_disabilities.disabilities_cta_by_community_area()\n cta_change_metrics = metric_disabilities.disabilities_cta_percent_change_metrics()\n cta_station_ridership_metrics = metric_disabilities.disabilities_ridership_per_station_metrics()\n return jsonify({ \n \"rideshare_metrics\": rideshare_metrics,\n \"cta_area_metrics\" : cta_area_metrics,\n \"cta_change_metrics\" : cta_change_metrics,\n \"cta_station_ridership_metrics\" : cta_station_ridership_metrics\n })\n\n @app.route(\"/question/taxitrips\")\n def taxitrips():\n most_common_dropoff = metric_tt.most_common_dropoff()\n payment_per_pickup = metric_tt.get_payment_type_by_pickup()\n payment_per_dropoff = metric_tt.get_payment_type_by_dropoff()\n return jsonify({\n \"most_common_dropoff\": most_common_dropoff,\n \"payment_per_pickup\": payment_per_pickup,\n \"payment_per_dropoff\": payment_per_dropoff\n })\n \n\n @app.route(\"/question/sidewalk_search\", methods=[\"POST\"])\n def question_sidewalk_search():\n body = request.get_json()\n raw_search = body[\"search\"] if \"search\" in body else \"\"\n search = raw_search.strip().lower()\n if len(search) > 0:\n permits = sidewalk_search.search_permits(search)\n return jsonify({ \"results\": permits })\n return jsonify({ \"results\": [] })\n\n @app.route(\"/question/ohare/rideshare\")\n def rideshare():\n #Area number of o'hare is 76\n #ohare_pickup_2019 = metric_oHareRideshare.metrics_by_area(metric_oHareRideshare.get_total_trips_by_pickup_specific_area_and_year(2019,76))\n #ohare_pickup_2020 = metric_oHareRideshare.metrics_by_area(metric_oHareRideshare.get_total_trips_by_pickup_specific_area_and_year(2020,76))\n #ohare_pickup_2021 = metric_oHareRideshare.metrics_by_area(metric_oHareRideshare.get_total_trips_by_pickup_specific_area_and_year(2021,76))\n ohare_dropoff_2019 = metric_oHareRideshare.metrics_by_area(metric_oHareRideshare.get_total_trips_by_dropoff_specific_area_and_year(2019,76))\n ohare_dropoff_2020 = metric_oHareRideshare.metrics_by_area(metric_oHareRideshare.get_total_trips_by_dropoff_specific_area_and_year(2020,76))\n ohare_dropoff_2021 = metric_oHareRideshare.metrics_by_area(metric_oHareRideshare.get_total_trips_by_dropoff_specific_area_and_year(2021,76))\n return jsonify({\n # \"ohare_pickup_2019\": ohare_pickup_2019,\n # \"ohare_pickup_2020\": ohare_pickup_2020,\n # \"ohare_pickup_2021\": ohare_pickup_2021,\n \"ohare_dropoff_2019\": ohare_dropoff_2019,\n \"ohare_dropoff_2020\": ohare_dropoff_2020,\n \"ohare_dropoff_2021\": ohare_dropoff_2021,\n })\n\n return app", "def create_routes(self):\r\n self._app.route('/api/autoconf',\r\n methods=['GET'],\r\n endpoint='api_autoconf')(self.entrypoint)\r\n self._app.route('/api/autoconf/<string:session_id>',\r\n methods=['GET', 'POST', 'DELETE'],\r\n endpoint='api_autoconf_status')(self.entrypoint)\r\n self._app.route('/api/autoconf/rgc',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_rgc')(self.entrypoint)\r\n self._app.route('/api/autoconf/pd',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_pd')(self.entrypoint)", "def register_blueprints(app) -> None:\n app.register_blueprint(core_app)\n\n for ext in get_valid_extensions():\n try:\n ext_module = importlib.import_module(f\"lnbits.extensions.{ext.code}\")\n app.register_blueprint(getattr(ext_module, f\"{ext.code}_ext\"), url_prefix=f\"/{ext.code}\")\n except Exception:\n raise ImportError(f\"Please make sure that the extension `{ext.code}` follows conventions.\")", "def initialize_routes(app):\n # Authentification \n app.add_resource(auth.LoginApi, '/auth/login')\n app.add_resource(auth.SignupApi, '/auth/SignupApi')\n # Intialisation et activation d'un parking\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/add')\n app.add_resource(parkingInit.ActivateParking, '/administrate/activate')\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/getall', endpoint='getall')\n # Gestion de Clients\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/get')\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/getById/<int:idUser>', endpoint='get_by_id')\n # statistiques financéres\n app.add_resource(stats.Money, '/administrate/finance/monthly', endpoint='monthly')\n app.add_resource(stats.Money, '/administrate/finance/yearly', endpoint='yearly')", "def create_app(self):\n app = Flask(__name__)\n\n app.config[\"auth_func\"] = self.auth_func\n app.config[\"hydrator_func\"] = self.hydrator_func\n app.config[\"request_hydrator_func\"] = self.request_hydrator_func\n app.config[\"database_uri\"] = self.database_uri\n app.config[\"hmac_secret\"] = self.hmac_secret\n\n cors = CORS()\n cors.init_app(app, resources={r\"/*\": {\"origins\": self.cors_origins, \"supports_credentials\": True}})\n\n app.register_blueprint(api_v0.bp)\n\n @app.route(\"/\")\n def health_check():\n \"\"\"Can be called by e.g. Kubernetes to verify that the API is up\n\n Returns:\n str: the static string \"Comet-API\", could be anything\n \"\"\"\n return \"Comet-API\"\n\n return app", "def create_app(config_name):\n app = Flask(__name__)\n\n # apply configuration\n cfg = os.path.join(os.getcwd(), 'config', config_name + '.py')\n app.config.from_pyfile(cfg)\n\n # initialize extensions\n db.init_app(app)\n\n # register blueprints\n from .api_v1 import api as api_blueprint\n from .api_v1_public import api as api_public_blueprint\n\n app.register_blueprint(api_blueprint, url_prefix='/api/v1')\n app.register_blueprint(api_public_blueprint, url_prefix='/api/v1/p')\n\n # register an after request handler\n @app.after_request\n def after_request(rv):\n headers = getattr(g, 'headers', {})\n rv.headers.extend(headers)\n return rv\n\n # authentication token route\n from .auth import auth\n from doroto.models import Company, Recruiter, Candidate\n @app.route('/get-auth-token')\n @auth.login_required\n @rate_limit(1, 600) # one call per 10 minute period\n @no_cache\n @json\n def get_auth_token():\n role = g.user.role.name\n if role == RoleType.ADMIN:\n return {'token': g.user.generate_auth_token(), 'role': g.user.role.name}\n elif role == RoleType.COMPANY:\n entity = Company.query.filter_by(user_id=g.user.id).first()\n elif role == RoleType.CANDIDATE:\n entity = Candidate.query.filter_by(user_id=g.user.id).first()\n elif role == RoleType.RECRUITER:\n entity = Recruiter.query.filter_by(user_id=g.user.id).first()\n if entity == None:\n raise ValidationError(\"No entity associated with this user..\")\n return {'token': g.user.generate_auth_token(), 'role': g.user.role.name, 'id': entity.id}\n\n return app", "def create_app(config_object):\n\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n from app import api_bp\n app.register_blueprint(api_bp, url_prefix='/api')\n\n return app", "def register_endpoints(api):\n api.add_resource(EventList, '/events')", "def register(blueprint):\n blueprint.add_route(post, \"/call-records\", methods=['POST'])", "def add_routes(self):\n# from server.flask import views as flask_views\n# flask_views_custom_methods = filter(lambda x: x.startswith(\"view_\"), dir(flask_views))\n# for custom_method in flask_views_custom_methods:\n# # Retrieve data needed to add the URL rule to the Flask app\n# view_method = getattr(locals()[\"flask_views\"], custom_method)\n# docstring = getattr(view_method, \"__doc__\")\n# index_start = docstring.index(\"@app.route\")\n# index_end = index_start + len(\"@app.route\") + 1\n# custom_method_url = docstring[index_end:].replace(\" \",\"\").replace(\"\\n\",\"\")\n# # Get: (a) method URL to bind flask app, (b), method name, (c) method object to invoke\n# self._app.add_url_rule(custom_method_url, custom_method, view_func=view_method(self._app.mongo))\n self._app.register_blueprint(ro_flask_views)", "def ui_blueprint(app):\n routes = app.config.get(\"CONFIG_TUGRAZ_ROUTES\")\n\n blueprint = Blueprint(\n \"invenio_config_tugraz\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"static\",\n )\n\n blueprint.add_url_rule(routes[\"guide\"], view_func=guide)\n blueprint.add_url_rule(routes[\"terms\"], view_func=terms)\n blueprint.add_url_rule(routes[\"gdpr\"], view_func=gdpr)\n\n @blueprint.before_app_first_request\n def rank_higher():\n \"\"\"Rank this modules blueprint higher than blueprint of security module.\"\"\"\n blueprints = current_app._blueprint_order\n our_index = None\n security_index = None\n\n for index, bp in enumerate(blueprints):\n if bp.name == \"security\":\n security_index = index\n if bp.name == \"invenio_config_tugraz\":\n our_index = index\n\n if (security_index is not None) and (our_index > security_index):\n temp = blueprints[security_index]\n blueprints[security_index] = blueprints[our_index]\n blueprints[our_index] = temp\n\n return blueprint", "def registerBlueprints(module_name):\n module = importlib.import_module(\n \"app.modules.\" + module_name, package=None)\n bp = getattr(module, module_name)\n server_logger.info(\"Registering module: \" + module_name)\n if bp.name == \"index\":\n server.register_blueprint(bp)\n else:\n server.register_blueprint(bp, url_prefix='/' + bp.name)", "def create_app(config=None):\n app = Flask(__name__)\n app.register_blueprint(teacher_api)\n app.run()", "def create_app():\n app = Flask(__name__)\n app.register_blueprint(playlists)\n app.register_blueprint(comments)\n return app", "def create_app():\n # pylint: disable=C0415\n # note: Ignoring 'Import outside toplevel' to avoid import while init\n\n from todo_app.config.swagger import SwaggerConfig\n from todo_app.routes.users import user_management_process\n from todo_app.routes.todo_item import todo_item_management_process\n from todo_app.routes.user_todo_list import todo_list_management_process\n\n # Define the WSGI application object\n app = Flask(__name__)\n\n # DEBUG ONLY!\n app.config[\"WTF_CSRF_ENABLED\"] = CommonConfig.wtf_csrf\n app.config[\"SECRET_KEY\"] = CommonConfig.app_secret_key\n\n @app.route(\"/\")\n def home():\n return \"We are working for new feature development! Please come back later!\"\n\n @app.route('/favicon.ico')\n def favicon():\n # To avoid 404 error\n return {}, 200\n\n\n\n # pylint: disable=W0613\n # note: Ignoring Unused argument 'resp_or_exc' as it's related to app\n # @app.teardown_appcontext\n # def cleanup(resp_or_exc):\n # handler.db_session.remove()\n\n # Register the blueprint here\n \n app.register_blueprint(\n SwaggerConfig.SWAGGERUI_BLUEPRINT, url_prefix=SwaggerConfig.SWAGGER_URL\n )\n app.register_blueprint(user_management_process, url_prefix=\"/api/v1/users\")\n app.register_blueprint(todo_item_management_process, url_prefix=\"/api/v1/todo/item\")\n app.register_blueprint(todo_list_management_process, url_prefix=\"/api/v1/todo/list\")\n \n\n return app", "def initialize_app(flask_app):\n # Create a blueprint to house the API, swagger can be reached from /api\n # and each of the models from /api/[model]\n blueprint = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint)\n\n # Configure namespaces per model on the API.\n api.add_namespace(noms_namespace)\n\n flask_app.register_blueprint(blueprint)\n db.init_app(flask_app)\n\n with flask_app.app_context():\n db.create_all()", "def create_app(config_name):\n app = Flask(__name__, instance_relative_config=True)\n app.url_map.strict_slashes = False \n app.config.from_object(app_config[config_name])\n\n \"\"\"import the blueprint from the V1 folder __init__.py file and register the blueprint\"\"\"\n from app.api.V1 import v1 \n app.register_blueprint(v1) \n return app", "def register_blueprints(*blueprints):\n\t\tblueprints = list(blueprints) + reload_blueprints()\n\t\tfor blueprint in blueprints:\n\t\t\tapp.register_blueprint(blueprint)", "def _register_api(app):\n\n app.add_url_rule('/shipping/',\n \"put_shipping_method\", put_shipping_method, methods=['PUT'])", "def init_rest(app_):\n\n rest_api = Api(app_)\n rest_api.add_resource(views.rest_resources.AppListResource,\n ActiveConfig.REST_URL_APPS_LIST,\n ActiveConfig.REST_URL_APPS_LIST + '/')\n rest_api.add_resource(views.rest_resources.AppResource,\n ActiveConfig.REST_URL_APPS_ITEM,\n ActiveConfig.REST_URL_APPS,\n ActiveConfig.REST_URL_APPS + '/')", "def setup_rest(app: web.Application):\n _logger.debug(\"Setting up %s ...\", __name__)\n\n spec_path: Path = storage_resources.get_path(\"api/v0/openapi.yaml\")\n\n # Connects handlers\n for routes in [\n handlers_health.routes,\n handlers_locations.routes,\n handlers_datasets.routes,\n handlers_files.routes,\n handlers_simcore_s3.routes,\n ]:\n set_default_route_names(routes)\n app.router.add_routes(routes)\n\n _logger.debug(\"routes: %s\", get_named_routes_as_message(app))\n\n # prepare container for upload tasks\n app[UPLOAD_TASKS_KEY] = {}\n\n # Enable error, validation and envelop middleware on API routes\n append_rest_middlewares(app, api_version=f\"/{api_vtag}\")\n\n # Adds swagger doc UI\n setup_swagger(\n app,\n swagger_url=\"/dev/doc\",\n swagger_from_file=f\"{spec_path}\",\n ui_version=3,\n )", "def initialize_app(app):\n # configure_app(app)\n # log.info(\"> Starting development server at http://%s/api/ <<<<<\" %\n # app.config[\"SERVER_NAME\"])\n\n blueprint_api = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint_api)\n app.register_blueprint(blueprint_api)\n\n api.add_namespace(task_namespace)\n api.add_namespace(chain_namespace)\n\n Bootstrap(app)\n nav.init_app(app)\n app.register_blueprint(frontend_blueprint)\n app.register_blueprint(processors_blueprint)\n app.register_blueprint(chains_blueprint)\n app.register_blueprint(tasks_blueprint)\n app.register_blueprint(compare_blueprint)\n\n db.init_app(app)\n db.create_all(app=app)\n\n if not os.path.exists(app.config[\"OCRD_BUTLER_RESULTS\"]):\n os.makedirs(app.config[\"OCRD_BUTLER_RESULTS\"])", "def create_app(register_blueprints=True):\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_object('app.default_config') # default config\n # app.config.from_pyfile('application.cfg.py') # server config file, do not include in versioning\n\n db.init_app(app)\n api = Api(app)\n api.add_resource(UserList, '/api/users')\n\n if register_blueprints:\n register_blueprints_on_app(app)\n\n return app", "def create_app(dictionary_with_strategies):\n\n app = Flask(__name__, static_url_path='',\n static_folder='../dist',\n template_folder='../dist')\n\n @app.route('/')\n def home():\n return redirect(url_for('static', filename='index.html'))\n\n app.url_map.strict_slashes = False\n app.config['Strategies'] = dictionary_with_strategies\n register_blueprints(app, \"/api\")\n\n return app", "def _register_api(app):\n \n app.add_url_rule('/like/', \n \"new_like\", new_like, methods=['PUT'])\n app.add_url_rule('/like/', \n \"delete_like\", delete_like, methods=['DELETE'])", "def register_routes(\n config: Configurator,\n route_name_ext: str = \"x-pyramid-route-name\",\n root_factory_ext: str = \"x-pyramid-root-factory\",\n apiname: str = \"pyramid_openapi3\",\n route_prefix: t.Optional[str] = None,\n) -> None:\n\n def action() -> None:\n spec = config.registry.settings[apiname][\"spec\"]\n for pattern, path_item in spec[\"paths\"].items():\n route_name = path_item.get(route_name_ext)\n if route_name:\n root_factory = path_item.get(root_factory_ext)\n config.add_route(\n route_name,\n pattern=route_prefix + pattern\n if route_prefix is not None\n else pattern,\n factory=root_factory or None,\n )\n\n config.action((\"pyramid_openapi3_register_routes\",), action, order=PHASE1_CONFIG)", "def configure_app(self):\n self.app.route('/', callback=self.get_api)", "def register_blueprints(app, package_name, package_path):\n rv = []\n for _, name, _ in pkgutil.iter_modules(package_path):\n m = importlib.import_module('%s.%s' % (package_name, name))\n for item in dir(m):\n item = getattr(m, item)\n if isinstance(item, Blueprint):\n app.register_blueprint(item)\n rv.append(item)\n return rv", "def register_blueprints(app, package_name, package_path):\n rv = []\n for _, name, _ in pkgutil.iter_modules(package_path):\n m = importlib.import_module('%s.%s' % (package_name, name))\n for item in dir(m):\n item = getattr(m, item)\n if isinstance(item, Blueprint):\n app.register_blueprint(item)\n rv.append(item)\n return rv", "def create_app(config_object):\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n # add blueprint\n from app.api import api_bp\n app.register_blueprint(api_bp, url_prefix='/api/v1/')\n\n # add redis client\n from app.redis_init import redis_client\n redis_client.init_app(app)\n\n # add prometheus middleware\n from app.prometheus_middleware import setup_metrics\n setup_metrics(app)\n\n return app", "def create_api_endpoints(app):\n manager = APIManager(app, flask_sqlalchemy_db=models.database)\n manager.create_api(models.State, results_per_page=0)\n manager.create_api(models.Party, results_per_page=0)\n manager.create_api(models.Candidate, results_per_page=0)\n manager.create_api(models.Election, results_per_page=0)\n manager.create_api(models.ElectoralCollege,\n results_per_page=0, collection_name='electoralcollege')\n manager.create_api(models.PartiesInvolved,\n results_per_page=0, collection_name='partiesinvolved')\n manager.create_api(models.ElectionsToState,\n results_per_page=0, collection_name='electionstostate')", "def make_doc():\n doc_app = Flask(__name__)\n doc_app.register_blueprint(blueprint(no_doc=False))\n return doc_app", "def build():\n\n app = flask.Flask(\"cnc-forge-api\")\n app.api = flask_restful.Api(app)\n\n with open(\"/opt/service/secret/redis.json\", \"r\") as redis_file:\n app.redis = redis.Redis(charset=\"utf-8\", decode_responses=True, **json.loads(redis_file.read()))\n\n app.api.add_resource(Health, '/health')\n app.api.add_resource(Forge, '/forge', '/forge/<id>')\n app.api.add_resource(CnC, '/cnc', '/cnc/<id>')\n\n return app", "def _register_view(self, app, resource, *urls, **kwargs):\n endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()\n self.endpoints.add(endpoint)\n\n if endpoint in getattr(app, 'view_class', {}):\n existing_view_class = app.view_functions[endpoint].__dict__['view_class']\n\n # if you override the endpoint with a different class, avoid the collision by raising an exception\n if existing_view_class != resource:\n raise ValueError('Endpoint {!r} is already set to {!r}.'\n .format(endpoint, existing_view_class.__name__))\n\n if not hasattr(resource, 'endpoint'): # Don't replace existing endpoint\n resource.endpoint = endpoint\n resource_func = self.output(resource.as_view(endpoint))\n\n for decorator in chain(kwargs.pop('decorators', ()), self.decorators):\n resource_func = decorator(resource_func)\n\n for url in urls:\n rule = self._make_url(url, self.blueprint.url_prefix if self.blueprint else None)\n\n # If this Api has a blueprint\n if self.blueprint:\n # And this Api has been setup\n if self.blueprint_setup:\n # Set the rule to a string directly, as the blueprint\n # is already set up.\n self.blueprint_setup.add_url_rule(self._make_url(url, None), view_func=resource_func, **kwargs)\n continue\n else:\n # Set the rule to a function that expects the blueprint\n # prefix to construct the final url. Allows deferment\n # of url finalization in the case that the Blueprint\n # has not yet been registered to an application, so we\n # can wait for the registration prefix\n rule = partial(self._make_url, url)\n else:\n # If we've got no Blueprint, just build a url with no prefix\n rule = self._make_url(url, None)\n # Add the url to the application or blueprint\n app.add_url_rule(rule, view_func=resource_func, **kwargs)", "def create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(app_config[config_name])\n # versions of api\n from app.api.v2 import version2 as v2\n\n app.register_blueprint(v2)\n\n # registered JWT manager\n app.config['JWT_SECRET_KEY'] = 'owezzy'\n jwt = JWTManager(app)\n\n create_tables()\n\n return app", "def create_app(config_name):\n load_env_var()\n storemanager = Flask(__name__, instance_relative_config=True)\n storemanager.config[\"JWT_SECRET_KEY\"] = \"mysecretkey\"\n jwt = JWTManager(storemanager)\n CORS(storemanager)\n\n storemanager.url_map.strict_slashes = False\n storemanager.config.from_object(app_config[config_name])\n\n from .api.v1 import version_1 as v1\n storemanager.register_blueprint(v1)\n\n from .api.v1 import auth_blueprint as auth_bp\n storemanager.register_blueprint(auth_bp)\n\n from .api.v2 import version_2 as v2\n storemanager.register_blueprint(v2)\n\n from .api.v2 import auth_blueprint_v2 as auth_bp_v2\n storemanager.register_blueprint(auth_bp_v2)\n\n # Add app error handlers\n storemanager.register_error_handler(404, resource_not_found)\n storemanager.register_error_handler(405, method_not_allowed)\n storemanager.register_error_handler(401, missing_auth_header)\n\n @storemanager.errorhandler(Exception)\n def unhandled_exception(e):\n return jsonify({\"message\": \"Server error. Contact the admin\",\n \"status\": 500})\n\n @jwt.user_claims_loader\n def add_claim_to_access_token(user_identity):\n return {\"role\": user_identity[\"role\"]}\n\n @jwt.user_identity_loader\n def user_identity_lookup(user_identity):\n return {\"username\": user_identity[\"username\"]}\n\n return storemanager", "def _load_api(self):\n self.app.add_url_rule('/scheduler', 'get_scheduler_info', self._apply_auth(api.get_scheduler_info))\n self.app.add_url_rule('/scheduler/jobs', 'add_job', self._apply_auth(api.add_job), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs', 'get_jobs', self._apply_auth(api.get_jobs))\n self.app.add_url_rule('/scheduler/jobs/reload_jobs', 'reload_jobs', self._apply_auth(api.reload_jobs), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>', 'get_job', self._apply_auth(api.get_job))\n self.app.add_url_rule('/scheduler/jobs/<job_id>', 'delete_job', self._apply_auth(api.delete_job), methods=['DELETE'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>', 'update_job', self._apply_auth(api.update_job), methods=['PATCH'])\n self.app.add_url_rule('/scheduler/jobs/<id>/reschedule', 'reschedule_job', self._apply_auth(api.reschedule_job), methods=['PATCH'])\n self.app.add_url_rule('/scheduler/jobs/<id>/reschedule_once', 'reschedule_job_once', self._apply_auth(api.reschedule_job_once), methods=['PATCH'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>/pause', 'pause_job', self._apply_auth(api.pause_job), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>/resume', 'resume_job', self._apply_auth(api.resume_job), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>/run', 'run_job', self._apply_auth(api.run_job), methods=['POST'])", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n flask_bcrypt.init_app(app)\n jwt.init_app(app)\n\n with app.app_context():\n # Import Blueprints\n from .routes.users_route import users_bp\n from .routes.messages_route import messages_bp\n\n # REGISTER ROUTES\n app.register_blueprint(users_bp, url_prefix=\"/users\")\n app.register_blueprint(messages_bp, url_prefix=\"/messages\")\n\n\n return app", "def create_app(mode=os.environ.get('FLASK_MODE', 'app.config.Development')):\n app = APIFlask(__name__)\n # add configurations\n app_config = config.get(mode)\n app.config.from_object(app_config)\n app_config().init_app(app)\n\n # initialize all extensions\n init_extensions(app)\n\n # register blueprints\n # add blueprint registration statements here\n from app.users import users\n app.register_blueprint(users)\n\n # register error handlers\n app.register_error_handler(400, bad_request)\n app.register_error_handler(Forbidden, forbidden)\n app.register_error_handler(404, not_found)\n app.register_error_handler(405, method_not_supported)\n app.register_error_handler(APIException, conflict)\n\n return app", "def api_factory(global_config, **local_conf):\n\treturn make_app(blueprints.api_server, settings.ProductionConfig)", "def create_rest_app(config=base_config):\n app = Flask(__name__, template_folder=\"templates\", static_folder=\"static\")\n app.config.from_object(config)\n register_extensions(app)\n register_blueprints(app)\n\n @app.route('/', methods=['GET'])\n def index():\n \"\"\"Returns the applications index.\"\"\"\n return \"welcome to sqlalchemy applications!\"\n\n return app", "def setup(app, obj, is_core=False):\n \n # Basic api common to all microservices\n app.router.add_route('GET', '/fledge/service/ping', obj.ping)\n app.router.add_route('POST', '/fledge/service/shutdown', obj.shutdown)\n app.router.add_route('POST', '/fledge/change', obj.change)\n\n if is_core:\n # Configuration\n app.router.add_route('GET', '/fledge/service/category', obj.get_configuration_categories)\n app.router.add_route('POST', '/fledge/service/category', obj.create_configuration_category)\n app.router.add_route('GET', '/fledge/service/category/{category_name}', obj.get_configuration_category)\n app.router.add_route('DELETE', '/fledge/service/category/{category_name}', obj.delete_configuration_category)\n app.router.add_route('GET', '/fledge/service/category/{category_name}/children', obj.get_child_category)\n app.router.add_route('POST', '/fledge/service/category/{category_name}/children', obj.create_child_category)\n app.router.add_route('GET', '/fledge/service/category/{category_name}/{config_item}',\n obj.get_configuration_item)\n app.router.add_route('PUT', '/fledge/service/category/{category_name}/{config_item}',\n obj.update_configuration_item)\n app.router.add_route('DELETE', '/fledge/service/category/{category_name}/{config_item}/value',\n obj.delete_configuration_item)\n\n # Service Registration\n app.router.add_route('POST', '/fledge/service', obj.register)\n app.router.add_route('DELETE', '/fledge/service/{service_id}', obj.unregister)\n app.router.add_route('PUT', '/fledge/service/{service_id}/restart', obj.restart_service)\n app.router.add_route('GET', '/fledge/service', obj.get_service)\n app.router.add_route('GET', '/fledge/service/authtoken', obj.get_auth_token)\n\n # Interest Registration\n app.router.add_route('POST', '/fledge/interest', obj.register_interest)\n app.router.add_route('DELETE', '/fledge/interest/{interest_id}', obj.unregister_interest)\n app.router.add_route('GET', '/fledge/interest', obj.get_interest)\n\n # Asset Tracker\n app.router.add_route('GET', '/fledge/track', obj.get_track)\n app.router.add_route('POST', '/fledge/track', obj.add_track)\n\n # Audit Log\n app.router.add_route('POST', '/fledge/audit', obj.add_audit)\n\n # enable/disable schedule\n app.router.add_route('PUT', '/fledge/schedule/{schedule_id}/enable', obj.enable_disable_schedule)\n\n # Internal refresh cache\n app.router.add_route('PUT', '/fledge/cache', obj.refresh_cache)\n\n # Service token verification\n app.router.add_route('POST', '/fledge/service/verify_token', obj.verify_token)\n\n # Service token refresh\n app.router.add_route('POST', '/fledge/service/refresh_token', obj.refresh_token)\n\n app.router.add_route('GET', '/fledge/ACL/{acl_name}', obj.get_control_acl)\n\n # Proxy API setup for a microservice\n proxy.setup(app)\n\n # enable cors support\n enable_cors(app)", "def create_app(config_name):\n app = Flask(__name__)\n CORS(app)\n # apply configuration\n cfg = os.path.join(os.getcwd(), 'config', config_name + '.py')\n app.config.from_pyfile(cfg)\n\n # initialize extensions\n db.init_app(app)\n mail.init_app(app)\n\n # register blueprints\n from .api_v1 import api as api_blueprint\n app.register_blueprint(api_blueprint, url_prefix='/api/v1')\n\n # register an after request handler\n @app.after_request\n def after_request(rv):\n headers = getattr(g, 'headers', {})\n rv.headers.extend(headers)\n return rv\n\n \"\"\"\n @api {get} /get-auth-token authenticate in server\n @apiName Log in\n @apiGroup User\n\n @apiParam {String} username username\n @apiParam {String} password password\n\n @apiSuccess {String} token a long string is returned as a token\n @apiSuccessExample {json} Success-Response:\n {\"token\": \"eyJhbGciOiJIUzI1NiIsImlhdCI6MTQ2\"}\n\n @apiError {json} 404 userNotExist\n @apiErrorExample {json} 404 Error-Response:\n {\n 'error': 'userNotExist'\n 'message': 'please sign up'\n }\n\n @apiError {json} 403 userNotVerified\n @apiErrorExample {json} 403 Error-Response:\n {\n 'error': 'userNotVerified',\n 'message': 'please verify your email account'\n }\n\n @apiError {json} 400 wrongPassword\n @apiErrorExample {json} 400 Error-Response:\n {\n 'error': 'wrongPassword',\n 'message': 'The password is not correct'\n }\n\"\"\"\n # authentication token route\n from .auth import auth\n @app.route('/get-auth-token')\n @auth.login_required\n # @rate_limit(1, 600) # one call per 10 minute period\n @no_cache\n @json\n def get_auth_token():\n return {'token': g.user.generate_auth_token(),'user_id':g.user.user_id}\n\n socketio.init_app(app)\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\n # Initialize Plugins\n db.init_app(app)\n login_manager.login_view = 'auth_bp.login'\n login_manager.init_app(app)\n\n cache.init_app(app)\n sess.init_app(app)\n\n @app.context_processor\n def inject_session():\n return dict(session=sess)\n\n @app.context_processor\n def inject_datetime():\n return dict(dnow=datetime.now())\n\n @app.context_processor\n def check_permissions():\n def check_perms(perm, permset):\n return Permissions.check(perm, permset)\n return dict(check_perms=check_perms)\n\n @app.context_processor\n def lookup_permissions():\n def lookup_perm(perm):\n return Permissions.lookup(perm)\n return dict(lookup_perm=lookup_perm)\n\n app.add_template_global(Permissions.lookups(), 'permissions')\n\n with app.app_context():\n # Include our Routes\n from .main import main_bp\n from .auth import auth_bp\n from .admin import admin_bp\n from .snapshots import snap_bp\n from .geo import geo_bp\n from .ppe import ppe_bp\n from .space import space_bp\n from .staff import staff_bp\n from .trans import trans_bp\n\n # Register Blueprints\n app.register_blueprint(main_bp)\n app.register_blueprint(auth_bp)\n app.register_blueprint(admin_bp)\n app.register_blueprint(snap_bp)\n app.register_blueprint(geo_bp)\n app.register_blueprint(ppe_bp)\n app.register_blueprint(space_bp)\n app.register_blueprint(staff_bp)\n app.register_blueprint(trans_bp)\n\n\n return app", "def _deferred_blueprint_init(self, setup_state):\n self.blueprint_setup = setup_state\n if setup_state.add_url_rule.__name__ != '_add_url_rule_patch':\n setup_state._original_add_url_rule = setup_state.add_url_rule\n setup_state.add_url_rule = MethodType(Api._add_url_rule_patch,\n setup_state)\n if not setup_state.first_registration:\n raise ValueError('flask-RESTEasy blueprints can only be registered once.')\n self._init_app(setup_state.app)", "def create_app(name, config, db_api):\n app = Flask(name)\n app.logger.addHandler(logging.StreamHandler(sys.stdout))\n app.logger.setLevel(logging.DEBUG)\n\n api = ramlfications.parse('etc/ramls/documents_api.raml')\n\n # 1. collect all methods for same endpoint pattern\n _resources = resource_restructure(api, replacer=FLASK_URI_PARAM_PATTERN)\n\n # 2. add url to app\n for rule, resources in _resources.items():\n\n view_kwargs = dict(resources=resources, api=api, db_api=db_api)\n ramlow_as_view = ramlow_view.as_view(rule, **view_kwargs)\n\n uri_kwargs = dict(view_func=ramlow_as_view, methods=resources.keys())\n app.add_url_rule(rule, **uri_kwargs)\n\n return app", "def _init_routes(self):\n before_hooks = [\n helpers.require_accepts_json,\n helpers.extract_project_id,\n\n # NOTE(kgriffs): Depends on project_id being extracted, above\n functools.partial(helpers.validate_queue_name,\n self._validate.queue_name)\n ]\n\n self.app = falcon.API(before=before_hooks)\n\n queue_controller = self._storage.queue_controller\n message_controller = self._storage.message_controller\n claim_controller = self._storage.claim_controller\n\n # Home\n self.app.add_route('/v1', v1.V1Resource())\n\n # Queues Endpoints\n queue_collection = queues.CollectionResource(self._validate,\n queue_controller)\n self.app.add_route('/v1/queues', queue_collection)\n\n queue_item = queues.ItemResource(queue_controller, message_controller)\n self.app.add_route('/v1/queues/{queue_name}', queue_item)\n\n stats_endpoint = stats.Resource(queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/stats', stats_endpoint)\n\n # Metadata Endpoints\n metadata_endpoint = metadata.Resource(self._wsgi_conf, self._validate,\n queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/metadata', metadata_endpoint)\n\n # Messages Endpoints\n msg_collection = messages.CollectionResource(self._wsgi_conf,\n self._validate,\n message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages', msg_collection)\n\n msg_item = messages.ItemResource(message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages/{message_id}', msg_item)\n\n # Claims Endpoints\n claim_collection = claims.CollectionResource(self._wsgi_conf,\n self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims', claim_collection)\n\n claim_item = claims.ItemResource(self._wsgi_conf, self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims/{claim_id}', claim_item)\n\n # Health\n self.app.add_route('/v1/health', health.HealthResource())", "def create_app():\n logging.basicConfig(\n level=REANA_LOG_LEVEL,\n format=REANA_LOG_FORMAT\n )\n app = Flask(__name__)\n app.config.from_object('reana_server.config')\n app.secret_key = \"hyper secret key\"\n\n # Register API routes\n from .rest import ping, secrets, users, workflows # noqa\n app.register_blueprint(ping.blueprint, url_prefix='/api')\n app.register_blueprint(workflows.blueprint, url_prefix='/api')\n app.register_blueprint(users.blueprint, url_prefix='/api')\n app.register_blueprint(secrets.blueprint, url_prefix='/api')\n\n app.session = Session\n CORS(app)\n return app", "def as_blueprint(self, **options):\n blueprint = self.create_blueprint(**options)\n\n for rule in self.create_url_rules():\n blueprint.add_url_rule(**rule)\n\n for exc_or_code, error_handler in self.create_error_handlers():\n blueprint.register_error_handler(exc_or_code, error_handler)\n\n return blueprint", "def create_app():\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_object('config.Config')\n\n # Initialize Plugins\n db.init_app(app)\n login_manager.init_app(app)\n \"\"\" Initialize plugins \"\"\"\n\n login_manager.login_message = 'You must be logged in to access this page'\n login_manager.login_message_category = 'info'\n login_manager.session_protection = 'strong'\n login_manager.login_view = 'auth_bp.login'\n\n # from .modules.user.models import User\n from .modules.user.methods import UserMethod\n @login_manager.user_loader\n def load_user(session_token):\n # def load_user(user_id):\n print('load_user - user_id - session_token: ', session_token)\n print('loading auth...')\n # since the user_id is just the primary key of our auth table, auth it in the query for the auth\n return UserMethod.get_user_session_token(session_token)\n\n with app.app_context():\n \"\"\" Blueprints \"\"\"\n from .modules.auth.views import auth_bp\n \"\"\" Blueprint for Auth routes in App \"\"\"\n from .modules.catalog.views import catalog_bp\n \"\"\" Blueprint for Catalog routes in App \"\"\"\n from .modules.category.views import category_bp\n \"\"\" Blueprint for Category routes in App \"\"\"\n from .modules.item.views import item_bp\n \"\"\" Blueprint for Item routes in App \"\"\"\n from .modules.user.views import user_bp\n \"\"\" Blueprint for User routes in App \"\"\"\n\n \"\"\"\" Register Blueprints \"\"\"\n app.register_blueprint(auth_bp)\n app.register_blueprint(catalog_bp)\n app.register_blueprint(category_bp)\n app.register_blueprint(item_bp)\n app.register_blueprint(user_bp)\n\n from .modules.catalog.models import Catalog\n from .modules.category.models import Category\n from .modules.item.models import Item\n \"\"\"Import the models so that sqlalchemy can detect them and create the DB \"\"\"\n\n db.create_all()\n \"\"\" Create the DB \"\"\"\n return app", "def init(app):\n from sirepo import feature_config\n from sirepo import simulation_db\n\n if _uri_to_route:\n return\n global _app\n _app = app\n for n in _REQUIRED_MODULES + feature_config.cfg.api_modules:\n register_api_module(importlib.import_module('sirepo.' + n))\n _init_uris(app, simulation_db)", "def create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(app_config[config_name])\n app.register_blueprint(party_bluprint)\n app.register_blueprint(office_bluprint)\n app.register_blueprint(register_bluprint)\n app.register_blueprint(login_bluprint)\n app.register_blueprint(office_bluprint)\n app.register_blueprint(party_bluprint)\n app.register_error_handler(400, bad_request)\n app.register_error_handler(500, internal_server_error)\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(405, method_not_allowed)\n # app.register_error_handler('Type error', type_error)\n return app", "def add_domain_routes(app):\n\n @app.route(\"/v1/list_agencies/\", methods=[\"GET\"])\n @get_dabs_sub_tier_agencies\n def list_agencies(cgac_sub_tiers, frec_sub_tiers):\n \"\"\" Get all agencies the current user has DABS access to.\n Args:\n cgac_sub_tiers - List of all CGAC SubTierAgencies generated by the get_dabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has DABS permissions for\n frec_sub_tiers - List of all FREC SubTierAgencies generated by the get_dabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has DABS permissions for\n \"\"\"\n return JsonResponse.create(StatusCode.OK, get_accessible_agencies(cgac_sub_tiers, frec_sub_tiers))\n\n @app.route(\"/v1/list_all_agencies/\", methods=[\"GET\"])\n def list_all_agencies():\n \"\"\" List all CGAC and FREC Agencies \"\"\"\n return JsonResponse.create(StatusCode.OK, get_all_agencies())\n\n @app.route(\"/v1/list_sub_tier_agencies/\", methods=[\"GET\"])\n @get_fabs_sub_tier_agencies\n def list_sub_tier_agencies(sub_tier_agencies):\n \"\"\" List all Sub-Tier Agencies user has FABS permissions for\n Args:\n sub_tier_agencies - List of all SubTierAgencies generated by the get_fabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has FABS permissions for\n \"\"\"\n return JsonResponse.create(StatusCode.OK, organize_sub_tier_agencies(sub_tier_agencies))", "def register_blueprints(self, package, **options):\n prefix = package.__name__ + '.'\n for importer, name, is_pkg in iter_modules(package.__path__, prefix):\n module = importer.find_module(name).load_module(name)\n blueprint_name = name.rsplit('.')[-1]\n blueprint = getattr(module, blueprint_name, None)\n if blueprint and isinstance(blueprint, Blueprint):\n log.info(' * Registering blueprint {}'.format(name))\n blueprint.name = name\n self.register_blueprint(blueprint, **options)\n elif is_pkg:\n self.register_blueprints(module, **options)", "def init_app(self, app):\n # Avoid double initialization.\n if self._flask_app is app:\n return None\n if self._flask_app is not None:\n raise RuntimeError(\n \"This api has already been registered on a flask application.\"\n )\n\n self._flask_app = app\n\n # Add the url rule.\n app.add_url_rule(\n rule=self._uri + \"/<path:path>\",\n endpoint=\"jsonapi\",\n view_func=self.handle_request,\n methods=[\"get\", \"post\", \"patch\", \"delete\", \"head\"]\n )\n\n # Register the jsonapi extension on the flask application.\n app.extensions = getattr(app, \"extensions\", dict())\n app.extensions[\"jsonapi\"] = self\n\n # Add the api to the jinja environment\n app.jinja_env.globals[\"jsonapi\"] = current_api\n return None", "def init_app(app):\n\n # pylint: disable=import-outside-toplevel\n\n from flask_cors import CORS\n from flask_jwt_extended import JWTManager\n\n allowed_origins = app.config['ALLOWED_ORIGINS']\n CORS(app, supports_credentials=True, origins=allowed_origins)\n\n jwt = JWTManager()\n jwt.init_app(app)\n\n import json\n import firebase_admin\n\n creds = app.config['FIREBASE_CREDENTIAL']\n\n try:\n credential = firebase_admin.credentials.Certificate(cert=creds)\n except FileNotFoundError:\n creds_escaped = creds.encode().decode('unicode_escape')\n creds_dict = json.loads(creds_escaped, strict=False)\n credential = firebase_admin.credentials.Certificate(cert=creds_dict)\n\n firebase_admin.initialize_app(credential=credential)\n\n from .auth import Token, TokenRevoke, TokenRefresh\n from .quote import Quotes, Quote, Random as RandomQuote, Contributor\n from .like import Likes, Like\n from .user import User, CurrentUser\n from .quote_status import QuoteStatus\n\n api.init_app(bp)\n api.add_resource(Token, '/auth/token')\n api.add_resource(TokenRefresh, '/auth/refresh')\n api.add_resource(TokenRevoke, '/auth/revoke')\n api.add_resource(Quotes, '/quotes')\n api.add_resource(Quote, '/quotes/<int:quote_id>')\n api.add_resource(RandomQuote, '/quotes/random')\n api.add_resource(Likes, '/likes')\n api.add_resource(Like, '/likes/<int:quote_id>')\n api.add_resource(User, '/users/<int:user_id>')\n api.add_resource(CurrentUser, '/users/me')\n api.add_resource(Contributor, '/quotes/<int:quote_id>/contributor')\n api.add_resource(QuoteStatus, '/quote-statuses')\n\n app.register_blueprint(bp, url_prefix='/v1')", "def register_oauthhandlers(app):\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix=\"/oauth\")", "def create_app(app_name: str):\n\n app = Flask(app_name)\n app.json_encoder = CustomJSONEncoder\n\n app.config.update({\n 'SQLALCHEMY_DATABASE_URI': build_db_uri(),\n 'SQLALCHEMY_TRACK_MODIFICATIONS': os.environ.get('SQLALCHEMY_TRACK_MODIFICATIONS', False),\n 'APP_CONFIG': {\n 'HOSTNAME': os.environ.get('HOSTNAME', ''),\n 'GREETING': os.environ.get('GREETING', 'Hello'),\n }\n })\n\n db.init_app(app)\n api = Api(app)\n\n with app.app_context():\n api.add_resource(Index, '/')\n api.add_resource(Config, '/config')\n api.add_resource(StudentMany, '/student')\n api.add_resource(StudentOne, '/student/<int:student_id>')\n return app" ]
[ "0.7972771", "0.764559", "0.7619169", "0.7566443", "0.74812996", "0.74501944", "0.7441911", "0.7426413", "0.7388967", "0.73659635", "0.7289081", "0.7288449", "0.72654545", "0.724093", "0.7233655", "0.71914", "0.71732324", "0.71653473", "0.71482444", "0.71245396", "0.70734113", "0.70652753", "0.70514256", "0.6970904", "0.69564426", "0.6931496", "0.6920728", "0.6895274", "0.6838854", "0.6835451", "0.6832563", "0.6828872", "0.6820762", "0.6770979", "0.6767105", "0.67636377", "0.67353845", "0.6731451", "0.672934", "0.67063564", "0.669839", "0.66835403", "0.6675405", "0.66713196", "0.667051", "0.66292274", "0.6621837", "0.6580391", "0.6575231", "0.65595466", "0.65157324", "0.6502345", "0.6487238", "0.64848596", "0.6467438", "0.64537764", "0.6433519", "0.6432508", "0.6432299", "0.6396123", "0.6344051", "0.6329973", "0.6299438", "0.6279129", "0.62745184", "0.6274095", "0.62482584", "0.6248004", "0.6244444", "0.62434924", "0.62434924", "0.6203289", "0.6195641", "0.6172342", "0.61686414", "0.6166302", "0.6147437", "0.61446756", "0.61356014", "0.6124742", "0.61232966", "0.6123294", "0.6105067", "0.60937214", "0.60824174", "0.60630906", "0.60533595", "0.6045904", "0.60430586", "0.60409373", "0.6036198", "0.60324425", "0.60275215", "0.6019559", "0.601075", "0.6008299", "0.5999678", "0.59995985", "0.5988599", "0.59857196" ]
0.66193205
47
generate samples of mixture Gaussian distribution
def mix_gaussian(mu, sigma_list, weights, num_sample): """ inputs: ------- mu mean list, numpy array sigma_list sigma list weights weights corresponding to each components num_sample the number of samples returns: -------- samples probability density function (pdf) of mixture Gaussian distribution """ dim = mu.shape[1] num_components = mu.shape[0] assert (len(weights) == num_components) and (num_components == len(sigma_list)) data = np.zeros((num_sample, dim)) for i in range(num_sample): idx_component = np.random.choice(num_components, p=weights) mean = mu[idx_component] cov = sigma_list[idx_component] data[i, :] = np.random.multivariate_normal(mean, cov) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_samples(mu1,cov,number_of_samples):\n samples = np.random.multivariate_normal(mu1, cov,number_of_samples)\n return samples", "def gen_mixture():\n npr.seed(0)\n num_exp = int(1e4)\n x_dim = 2\n z_dim = 2\n mu1 = [5, 5,]\n mu2 = [-5, -5]\n theta = np.array([[2,1],[-1,-2]])\n sigma = 0.1\n u = npr.uniform((num_exp,))\n z = np.zeros((num_exp, z_dim))\n cov = np.zeros((z_dim, z_dim))\n np.fill_diagonal(cov, 1)\n sz = int(num_exp/2)\n z[:sz, ]= npr.multivariate_normal(mu1, cov,sz)\n z[sz:, ] = npr.multivariate_normal(mu2,cov,sz)\n mu_x = theta@z.transpose()\n\n x = np.zeros((num_exp, x_dim))\n for i in range(num_exp):\n x[i,:] = npr.multivariate_normal(mu_x[:,i], sigma*cov)\n print(x.shape)\n np.save('data/syn_mixture.npy', x)", "def gaussian_gen(ngaussian, nd, batchSize, seed, random=True):\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n means, covars, weights = get_means_covars(nd, ngaussian, random)\n while True:\n # yield multi_normal.sample((batchSize,))\n yield mm_gaussian(batchSize, means, covars, weights) / 1.414", "def resample_gmms(model_set):\n samples = np.zeros(iter_num)\n\n for i in range(iter_num):\n rand_num = random()\n # For each distribution in the model\n for gmm_distro in model_set:\n # If the random number is less than the distribution's weight, where the weight is the sum of all\n # distribution's weights so far\n if rand_num < gmm_distro[3]:\n # Then sample from the distribution and save it as the path cost, then skip to the next iteration\n samples[i] = gauss(gmm_distro[0], gmm_distro[1])\n break\n\n # plt.hist(samples, bins=50, density=True)\n # plt.show()\n\n return samples", "def generate_samples(self, config, num_samples):\n tic = time.time()\n\n generator = GMM(**config)\n weights = torch.rand(config.num_components)\n generator.component_weights.set_(weights / weights.sum())\n generator.gaussian.means.set_(torch.randn(config.num_components, config.num_features))\n\n if config.covariance == 'diag':\n generator.gaussian.covars.set_(torch.rand(config.num_components, config.num_features))\n\n samples = generator.sample(num_samples)\n\n toc = time.time()\n print(f\"Generated {num_samples:,} samples in {toc-tic:.2f} seconds.\")\n\n return samples", "def _generate_gmm_data(self):\n \n # randomly generate means and covariances of GMM if params not given\n if not self.params:\n self.params = generate_GMM_params(self.n_clusters, self.spread) \n\n # sample from the GMM\n return [np.random.multivariate_normal(mean, std, size=self.n_points) \\\n for mean, std in self.params]", "def mm_gaussian(nsample, means, covars, weights):\n assert len(means) == len(covars), \"Number of means or covariance matrices inconsistant with the number of gaussians\"\n ngaussian = len(means)\n nd = means[0].size(0)\n weights.div_(weights.sum())\n # weights = torch.tensor([0.5, 0.5])\n # means = torch.tensor([[-3, 0], [3, 0]], dtype=torch.float)\n samples = torch.zeros(ngaussian, nsample, nd)\n for i, (mean, covar) in enumerate(zip(means, covars)):\n # covar = I\n # covar.div_(covar.max())\n # corr = 0.01 * (R.t() + R) + 3*I # cross correletion matrix\n # covar = corr - torch.mm(mean.unsqueeze(1), mean.unsqueeze(1).t())\n multi_normal = MultivariateNormal(loc=mean, covariance_matrix=covar)\n samples[i] = multi_normal.sample((nsample,))\n indices = np.random.permutation(nsample) # the total range of indices\n range_idx = (0, 0)\n mm_sample = samples[0] # the mixture model for the gaussian\n for i in range(ngaussian):\n n = int(0.5 + weights[i] * nsample) # the number of samples belonging to this\n range_idx = range_idx[1], min(n+range_idx[1], nsample)\n idx = indices[range_idx[0]:range_idx[1]]\n mm_sample[idx] = samples[i, idx]\n return mm_sample.unsqueeze(2).unsqueeze(3)", "def Gaussian_sampling(mu=1,beta=1,size=None,rng=np.random.RandomState(100)):\n \n if (not np.isscalar(mu)):\n size=mu.shape\n if np.isscalar(beta):\n beta=np.repeat(beta,size[0])\n beta.shape=(beta.size,1)\n X=np.copy(mu)\n #for i in range(size[0]):\n # for j in range(size[1]):\n # X[i,j]=rng.normal(mu[i,j],1/math.sqrt(beta[i,0]),size=1)\n X=rng.normal(loc=0,scale=1,size=size)\n X=X/np.sqrt(beta) + mu\n if (np.isscalar(mu) and size is None):\n size=1\n X=rng.normal(mu,1/math.sqrt(beta),size=size)\n #tol=100\n #X[abs(X)>tol]=tol\n return X", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def rand_gauss(n=100, mu=[1, 1], sigma=[0.1, 0.1]):\n d = len(mu)\n res = np.random.randn(n, d)\n return np.array(res * sigma + mu)", "def __init__(self, quantity, dist_weights, gauss_params, upper_bound, lower_bound):\n self.dist_weights = dist_weights\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n if len(self.dist_weights) != len(gauss_params):\n print(\n \"Number of distribution weights do not match number of distributions!\"\n )\n diff = len(gauss_params) - len(dist_weights)\n if diff < 0:\n print(\"Ignoring trailing distribution weights\")\n self.dist_weights = self.dist_weights[: len(dist_weights) + diff]\n else:\n print(\"Assuming default weights of 1\")\n self.dist_weights.extend([1] * diff)\n # normalize weights\n self.dist_weights = np.array(\n [float(i) / sum(self.dist_weights) for i in self.dist_weights]\n )\n # create samples\n self.samples = []\n self.gauss_params = gauss_params\n sample_size = quantity\n self.sample_min, self.sample_max = [float(\"inf\"), -float(\"inf\")]\n while True:\n # determine the gaussian to sample from for each sample\n mixture_idx = np.random.choice(\n len(self.dist_weights),\n size=sample_size,\n replace=True,\n p=self.dist_weights,\n )\n # create the samples from the respective gaussian\n temp = np.fromiter(\n (ss.norm.rvs(*(gauss_params[i])) for i in mixture_idx), dtype=np.float64\n )\n # remember mixed sampled extremas for plotting\n self.sample_min = min(self.sample_min, temp.min())\n self.sample_max = max(self.sample_max, temp.max())\n # add those samples that are within the bounds\n self.samples = np.concatenate(\n [\n self.samples,\n np.fromiter(\n [x for x in temp if x <= upper_bound and x >= lower_bound],\n dtype=np.float64,\n ),\n ]\n )\n sample_size = quantity - len(self.samples)\n if sample_size == 0:\n break", "def gauss_sample(mean, covariance):\n\n return None", "def gaussianOneSample(self, mu=0., sig=1., size=1):\n\n thisSample = np.random.normal(mu, sig, size)\n\n return thisSample", "def generate_2D_gaussian(num_points, mu, sigma=np.eye(2)):\n\tdata_set = np.array([])\n\tfor i in range(len(mu)):\n\t\tdata = np.random.multivariate_normal(mean=mu[i], cov=sigma, size=num_points)\n\t\tif data_set.size == 0:\n\t\t\tdata_set = data\n\t\telse:\n\t\t\tdata_set = np.concatenate([data_set,data])\n\treturn data_set", "def create_model() -> sklearn.mixture.GaussianMixture:\n logger.info(\"Creating Gaussian Mixture model\")\n logger.debug(\n f\"Model: GaussianMixture, n_components={data.N_CENTERS}, \"\n + f\"covariance_type={COVARIANCE_TYPE}, n_init={N_INIT}, \"\n + f\"init_params={INIT_PARAMS}\"\n )\n return GaussianMixture(\n n_components=data.N_CENTERS,\n covariance_type=COVARIANCE_TYPE,\n n_init=N_INIT,\n init_params=INIT_PARAMS,\n )", "def gaussian_dataset(ngaussian, nd, nsample, random=True):\n means, covars, weights = get_means_covars(nd, ngaussian, random)\n return mm_gaussian(nsample, means, covars, weights)", "def add_gaussian_noise(self, samples):\n\n if 'sigma' in self.gaussian_component:\n sigma = self.gaussian_component['sigma']\n return samples + self.random_state.normal(size=samples.shape) * sigma\n if 'sigmas' in self.gaussian_component:\n sigmas = self.gaussian_component['sigmas']\n return samples + self.random_state.normal(size=samples.shape) * sigmas\n\n return samples", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def gaussian_mutation(population, **kwargs):\r\n new_pop = []\r\n for indiv in population:\r\n mutation_mask = np.random.random(size=indiv.shape) < kwargs['mutation_prob']\r\n mutated = indiv + mutation_mask * np.random.randn(indiv.shape[0]) * kwargs['sigma']\r\n new_pop.append(mutated)\r\n return new_pop", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def Gaussian_sampling2(mu=1,beta=1,size=None,rng=np.random.RandomState(100)):\n tol=1000\n if (not np.isscalar(mu)):\n size=mu.shape\n if np.isscalar(beta):\n beta=np.tile(beta,size)\n X=np.copy(mu)\n# for i in range(size[0]):\n# for j in range(size[1]):\n# X[i,j]=rng.normal(mu[i,j],1/math.sqrt(beta[i,j]),size=1)\n X=rng.normal(loc=0,scale=1,size=size)\n X=X/np.sqrt(beta) + mu\n if (np.isscalar(mu) and size is None):\n size=1\n X=rng.normal(mu,1/math.sqrt(beta),size=size)\n X[abs(X)>tol]=tol\n return X", "def generate_mog_dataset():\n\n n_per_class = 100\n dim = 2\n n_gaussians = 4\n mus = [(0, 1), (-1, 0), (0, -1), (1, 0)]\n mus = [torch.tensor(m) for m in mus]\n var = 0.05\n\n inputs, labels = [], []\n\n for id in range(n_gaussians):\n # Generate input data by mu + x @ sqrt(cov)\n cov = np.sqrt(var) * torch.eye(dim)\n mu = mus[id]\n inputs.append(mu + torch.randn(n_per_class, dim) @ cov)\n\n # Labels\n labels.append(torch.tensor(n_per_class * [1.0 if id < 2 else 0.0]))\n\n return torch.cat(inputs, dim=0), torch.cat(labels, dim=0)", "def sample(self, n_samps):\n # print('gauss trying to sample '+str(n_samps)+' from '+str(self.dist))\n # xs = np.array([self.sample_one() for n in range(n_samps)])\n xs = np.array(self.dist.sample(n_samps))\n # print('gauss sampled '+str(n_samps)+' from '+str(self.dist))\n return xs", "def generate_distribution(p1=[1.0,0.1,10000],p2=[-1.0,0.6,30000]):\n mu1,sig1,num1 = p1\n mu2,sig2,num2 = p2\n g1 = scipy.stats.norm(mu1,sig1).rvs(num1)\n g2 = scipy.stats.norm(mu2,sig2).rvs(num2)\n\n samples = np.concatenate([g1,g2])\n return samples", "def sample(self, log_pi, mu, std, tau=1.0):\n # Get all shapes [batch size, number of densities, data dimension]\n N, K, D = log_pi.shape\n # Convert to [N*D, K], easy to use for Categorical probabilities\n log_pi = log_pi.permute(0, 2, 1).view(-1, K)\n mu = mu.permute(0, 2, 1).view(-1, K)\n std = std.permute(0, 2, 1).view(-1, K)\n \n # Get mixing coefficient\n if tau == 1.0: # normal sampling, no uncertainty control\n pi = torch.exp(log_pi)\n else: # use temperature\n pi = F.softmax(log_pi/tau, dim=1)\n # Create a categorical distribution for mixing coefficients\n pi_dist = Categorical(probs=pi)\n # Sampling mixing coefficients to determine which Gaussian to sample from for each data\n pi_samples = pi_dist.sample()\n # Convert \n # Iteratively sample from selected Gaussian distributions\n samples = []\n for N_idx, pi_idx in enumerate(pi_samples):\n # Retrieve selected Gaussian distribution\n mu_i = mu[N_idx, pi_idx]\n std_i = std[N_idx, pi_idx]\n # Create standard Gaussian noise for reparameterization trick\n eps = torch.randn_like(std_i)\n # Sampling via reparameterization trick\n if tau == 1.0: # normal sampling, no uncertainty control\n samples.append(mu_i + eps*std_i)\n else: # use temperature\n samples.append(mu_i + eps*std_i*math.sqrt(tau))\n \n # Convert sampled data to a Tensor and reshape to [N, D]\n samples = torch.stack(samples, dim=0).view(N, D)\n \n return samples", "def normal_sample(mu, sigma):\n return mu + sigma * torch.randn_like(sigma)", "def sample_1d_gmm(n_samples=200, n_components=3, sigma=.1, random_state=None):\n rng = check_random_state(random_state)\n\n means = np.arange(n_components)\n pi = np.ones(n_components) / n_components\n\n y = rng.choice(np.arange(n_components), p=pi, size=n_samples)\n\n X = np.random.normal(size=n_samples, scale=sigma)\n X += y\n\n params = {'means': means, 'sigma': sigma, 'pi': pi}\n\n return X.reshape(-1, 1), y, params", "def test_gaussian_em():\n fname = \"gmm-3-10-0.7.npz\"\n gmm = GaussianMixtureModel.generate( fname, 3, 3 )\n k, d, M, S, w = gmm.k, gmm.d, gmm.means, gmm.sigmas, gmm.weights\n N, n = 1e6, 1e5\n\n\n X = gmm.sample( N, n )\n\n algo = GaussianMixtureEM(k, d)\n\n def report( i, O_, lhood ):\n M_, _, _ = O_\n lhood, Z, O_ = algo.run( X, None, report )\n\n M_, S_, w_ = O_\n\n M_ = closest_permuted_matrix( M, M_ )\n w_ = closest_permuted_vector( w, w_ )\n\n print w, w_\n\n print norm( M - M_ )/norm(M)\n print abs(S - S_).max()\n print norm( w - w_ ) \n\n assert( norm( M - M_ )/norm(M) < 1e-1 )\n assert (abs(S - S_) < 1 ).all()\n assert( norm( w - w_ ) < 1e-2 )", "def sample_generator(self, x):\n\n assert isinstance(x, list)\n assert len(x) > 0\n assert isinstance(x[0], tuple)\n\n mean = self.mean(x)\n sigma = self.sigma(x)\n\n d, u = np.linalg.eig(sigma)\n assert np.allclose(u@np.diag(d)@u.T, sigma)\n\n d = np.real(d)\n d[d < FUZZ] = FUZZ\n d_sqrt = np.sqrt(d)\n\n while True:\n sample = np.random.normal(loc=0, scale=1, size=len(x))\n sample = mean + u@np.diag(d_sqrt)@sample\n\n yield sample", "def _build_mixture(self) -> None:\n for mu, sigma in zip(self.mus, self.sigmas):\n self.pdfs.append(norm(mu, sigma))", "def sample_gaussian(m, v, repeat=1):\n if repeat > 1:\n v = v.squeeze()\n m = m.squeeze()\n sqrt_v = torch.cat([torch.sqrt(v)] * repeat, dim=0)\n m = torch.cat([m] * repeat, dim=0)\n else:\n sqrt_v = torch.sqrt(v)\n sample = m + torch.randn_like(sqrt_v) * sqrt_v\n return sample", "def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)", "def estimate_multi_gaussian(X):\n m, n = X.shape\n mu = mean(X, axis=0)\n sigma = cov_matrix(X, mu)\n\n return mu, sigma", "def sample_from(self):\n return numpy.random.normal(self.mu, math.sqrt(self.sigma))", "def multi_gaussian(X, mu, sigma):\n m, n = X.shape\n X = X - mu\n\n factor = X.dot(inv(sigma))\n factor = multiply(factor, X)\n factor = - (1 / 2) * sum(factor, axis=1, keepdims=True)\n\n p = 1 / (power(2 * pi, n / 2) * sqrt(det(sigma)))\n p = p * exp(factor)\n\n return p", "def gmm_sample(key, resps_c, means_c, logvar_c, varmin=1e-16):\n keys = random.split(key, 2)\n # pick gaussian to sample\n u = random.uniform(keys[0])\n cum_resps_c = np.cumsum(softmax(resps_c))\n cidx = np.argmax(u <= cum_resps_c)\n # sample that gaussian\n return diag_gaussian_sample(keys[1], means_c[cidx], logvar_c[cidx], varmin)", "def GaussianPosteriorSample(bs, ls) :\n def gps(args) :\n mu, log_var = args\n eps = K.random_normal(shape=(bs, ls), mean=0.0, stddev=1.0) # 10 x 2\n return mu + K.exp(log_var / 2.) * eps\n return gps", "def sample_gssm(mu, sigma, cross_cov, size=1, random_state=None):\n if mu.ndim == 1:\n mu = mu.reshape(-1, 1)\n sigma = np.expand_dims(sigma.reshape(-1, 1), axis=-1)\n cross_cov = np.expand_dims(cross_cov.reshape(-1, 1), axis=-1)\n\n n_time_steps, n_features = mu.shape\n rng = check_random_state(random_state)\n\n x = np.zeros((size, n_time_steps, n_features))\n\n x[:, 0] = rng.multivariate_normal(mu[0], sigma[0], size=size)\n for t in range(1, n_time_steps):\n # calculate conditional means and covariances\n sigma_inv = np.linalg.pinv(sigma[t-1])\n mu_cond = (mu[t].reshape(-1, 1) +\n cross_cov[t-1].T @ sigma_inv @ (x[:, t-1] - mu[t-1]).T).T\n cov_cond = sigma[t] - cross_cov[t-1].T @ sigma_inv @ cross_cov[t-1]\n\n # sample from conditional\n cov_sqrt = scipy.linalg.sqrtm(cov_cond)\n x[:, t] = (mu_cond + (\n cov_sqrt @ rng.randn(n_features * size).reshape(n_features, size)).T)\n\n return np.squeeze(x)", "def generate_samples(self):\n self.analytic_probability()", "def sample(self, size):\n if len(self.observations) == 0:\n raise ValueError(\"need to fit KDE with observations\")\n\n samples = np.zeros(size)\n for i in range(size):\n \n # randomly select an anchor point for a gaussian\n anchor_point = np.random.choice(self.observations)\n\n # then sample from that gaussian\n sample = np.random.normal(loc=anchor_point, scale=self.bandwidth)\n samples[i] = sample\n return samples", "def rand_sample_gauss():\n mean = float(NUM_UNIQUE_VALUES + 1) / 2\n while True:\n r = random.normalvariate(mean, DIST_PARAM)\n value = int(round(r))\n # Rejection sampling to cut off Gaussian to within [1, NUM_UNIQUE_VALUES]\n if 1 <= value <= NUM_UNIQUE_VALUES:\n break\n\n return value # true client value", "def generate_x(number_dimensions, T_train, T_test, mu, feature_model):\n number_training_obeservations = T_train.shape[0]\n number_testing_obeservations = T_test.shape[0]\n\n X_train = np.zeros((number_training_obeservations,number_dimensions))\n X_test = np.zeros((number_testing_obeservations,number_dimensions))\n\n mixture_indicator_train = generate_mixture_indicator(number_training_obeservations)\n mixture_indicator_test = generate_mixture_indicator(number_testing_obeservations)\n\n G = np.random.normal(0,1,(number_dimensions,number_dimensions))\n q, r = np.linalg.qr(G)\n\n mu1 = mu*np.ones(number_dimensions)\n mu2 = -mu*np.ones(number_dimensions)\n\n if feature_model == \"A\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@lambda1@q.T\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n\n\n elif feature_model == \"B\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@lambda1@q.T\n\n eigenvalues2 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues2 = np.sort(eigenvalues2, axis = 0)[::-1]/np.sum(eigenvalues2)\n lambda2 = np.identity(number_dimensions)\n np.fill_diagonal(lambda2,eigenvalues2)\n cov2 = q@lambda2@q.T\n\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n\n train_mean = np.mean(X_train, axis = 0)\n train_std = np.std(X_train, axis = 0)\n X_train = (X_train - train_mean)/train_std\n X_test = (X_test - train_mean)/train_std\n \n return X_train, X_test", "def _generate_distribution_samples(self, set_count, parameter_count):\n self._samples = numpy.zeros((set_count, parameter_count))\n for i, distribution in enumerate(self.parameter_distributions.values()):\n self._samples[:, i] = distribution.ppf(self._quantiles[:, i])", "def test_sample(self, normal_mock):\n # Setup\n instance = GaussianMultivariate(GaussianUnivariate)\n data = pd.DataFrame([\n {'A': 25, 'B': 75, 'C': 100},\n {'A': 30, 'B': 60, 'C': 250},\n {'A': 10, 'B': 65, 'C': 350},\n {'A': 20, 'B': 80, 'C': 150},\n {'A': 25, 'B': 70, 'C': 500}\n ])\n instance.fit(data)\n\n normal_mock.return_value = np.array([\n [0.1, 0.1, 0.1],\n [0.2, 0.2, 0.2],\n [0.4, 0.4, 0.4],\n [0.6, 0.6, 0.6],\n [0.8, 0.8, 0.8]\n ])\n\n expected_result = pd.DataFrame([\n {'A': 22.678232998312527, 'B': 70.70710678118655, 'C': 284.35270009440734},\n {'A': 23.356465996625055, 'B': 71.41421356237309, 'C': 298.7054001888146},\n {'A': 24.712931993250110, 'B': 72.82842712474618, 'C': 327.4108003776293},\n {'A': 26.069397989875164, 'B': 74.24264068711929, 'C': 356.116200566444},\n {'A': 27.425863986500215, 'B': 75.65685424949238, 'C': 384.8216007552586}\n ])\n\n # Run\n result = instance.sample(5)\n\n # Check\n assert result.equals(expected_result)\n\n assert normal_mock.called_once_with(\n np.zeros(instance.covariance.shape[0]),\n instance.covariance,\n 5\n )", "def sample(self):\n M = np.random.normal(self._mu.reshape(-1), self._sig).reshape(self.shape)\n return M", "def test_gaussian_basis_hon(self):\n def row_generator():\n return [random.gauss(0, 1) for i in range(self.d)]\n\n self._test_sample_basis_hon(row_generator)", "def sample_from_mixture(x, pred_weights, pred_means, pred_std, amount):\n samples = np.zeros((amount, 2))\n n_mix = len(pred_weights[0])\n to_choose_from = np.arange(n_mix)\n for j, (weights, means, std_devs) in enumerate(\n zip(pred_weights, pred_means, pred_std)):\n index = np.random.choice(to_choose_from, p=weights)\n samples[j, 1] = np.random.normal(means[index], std_devs[index], size=1)\n samples[j, 0] = x[j]\n\n if j == amount - 1:\n break\n return samples", "def _sample_schechter(x0, alpha, x_min, size=100, max_iter=1000):\n out = []\n n = 0\n num_iter = 0\n while (n<size) & (num_iter<max_iter):\n x = np.random.gamma(scale=x0, shape=alpha+2, size=size)\n x = x[x>x_min]\n u = np.random.uniform(size=x.size)\n x = x[u<x_min/x]\n out.append(x)\n n+=x.size\n num_iter += 1\n\n if num_iter >= max_iter:\n msg = (\"The maximum number of iterations reached.\",\n \"Random variates may not be representitive.\",\n \"Try increasing `max_iter`.\")\n print(msg)\n\n return np.concatenate(out)[:size]", "def make_gmm_dataset(config='random', classes=10,dim=2,samples=10,spread = 1,\n shift=None, rotate=None, diagonal_cov=False, shuffle=True):\n means, covs, distribs = [], [], []\n _configd = gmm_configs[config]\n spread = spread if (config == 'random' or not 'spread' in _configd) else _configd['spread']\n shift = shift if (config == 'random' or not 'shift' in _configd) else _configd['shift']\n\n for i in range(classes):\n if config == 'random':\n mean = torch.randn(dim)\n cov = create_symm_matrix(1, dim, verbose=False).squeeze()\n elif config == 'star':\n mean = gmm_configs['star']['means'][i]\n cov = gmm_configs['star']['covs'][i]\n if rotate:\n mean = rot(mean, rotate)\n cov = rot_evecs(cov, rotate)\n\n if diagonal_cov:\n cov.masked_fill_(~torch.eye(dim, dtype=bool), 0)\n\n means.append(spread*mean)\n covs.append(cov)\n distribs.append(MultivariateNormal(means[-1],covs[-1]))\n\n X = torch.cat([P.sample(sample_shape=torch.Size([samples])) for P in distribs])\n Y = torch.LongTensor([samples*[i] for i in range(classes)]).flatten()\n\n if shift:\n print(X.shape)\n X += torch.tensor(shift)\n\n if shuffle:\n indx = torch.arange(Y.shape[0])\n print(indx)\n X = X[indx, :]\n Y = Y[indx]\n return X, Y, distribs", "def gen_4_normal():\n return [mn(mean=np.array([1.0, 1.0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]])),\n mn(mean=np.array([1.0, -1.0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]])),\n mn(mean=np.array([-1.0, -1.0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]])),\n mn(mean=np.array([-1.0, 1.0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]]))]", "def test_gaussian_mixture_num_components(n_mixture_components):\n # Set random seed\n set_random_seed_from_args(\n \"test_gaussian_mixture_num_components\",\n n_mixture_components,\n )\n # Initialise input arguments\n output_dim = 4\n n_train = np.random.randint(10, 20)\n n_test = np.random.randint(10, 20)\n input_dim = np.random.randint(2, 5)\n # Initialise data set\n classification_data = data.MixtureOfGaussians(\n input_dim=input_dim,\n output_dim=output_dim,\n n_train=n_train,\n n_test=n_test,\n n_mixture_components=n_mixture_components,\n )\n assert classification_data.train.x.shape == (input_dim, n_train)\n assert classification_data.test.x.shape == (input_dim, n_test)\n assert classification_data.train.labels.shape == (n_train, )\n assert classification_data.test.labels.shape == (n_test, )\n assert classification_data.train.y.shape == (output_dim, n_train)\n assert classification_data.test.y.shape == (output_dim, n_test)", "def distribute_Gaussian(self):\n\n sigma_x = np.sqrt(self.emitx*self._betax)\n sigma_xp = np.sqrt(self.emitx*self._gammax)\n\n sigma_y = np.sqrt(self.emity*self._betay)\n sigma_yp = np.sqrt(self.emity*self._gammay)\n\n self.particles[:,0] = np.random.randn(self.npart)*sigma_x #set x-coordinates\n self.particles[:,1] = np.random.randn(self.npart)*sigma_xp #set xp-coordinates\n self.particles[:,2] = np.random.randn(self.npart)*sigma_y #set y-coordinates\n self.particles[:,3] = np.random.randn(self.npart)*sigma_yp #set yp-coordinates", "def sample(self, size=1):\n samples = scipy.stats.multivariate_normal.rvs(\n self.mu, self.sigma, size=size\n )\n return samples", "def create_gaussian_data(self, mean, std, nPoints, nClusters, nDimension):\n dataset = np.zeros((nClusters, nPoints, nDimension), dtype=float)\n for i in range(nClusters):\n cov = std[i] ** 2\n dataset[i, :, :] = np.random.multivariate_normal(mean[i], cov, nPoints)\n\n return dataset", "def generate_data(params, sigma):\n rng = random.PRNGKey(0)\n k = len(params) // 2\n a_array = params[:k]\n b_array = params[k:]\n n = 20 * k\n xs = sample_our_uniform(n, 1, rng).reshape((n,))\n ys = onp.zeros(n)\n all_indices = set(onp.arange(n))\n for i in range(k):\n i_idxs = onp.random.choice(list(all_indices), 20, replace=False)\n all_indices = set(all_indices) - set(i_idxs)\n ys[i_idxs] = xs[i_idxs] * a_array[i] + b_array[i] + onp.random.normal(0, sigma, size=(20,))\n return xs, ys", "def get_gauss_kernel(sigma, samples):\n p = ny.ceil (2*ny.sqrt(2*ny.log(2))*sigma)\n r = ny.linspace(-p, p, samples)\n x,y = ny.meshgrid(r, r)\n b=bivariate_normal(x,y,sigma,sigma)\n A=(1/ny.sum(b))\n B=A*b\n return x,y,B", "def build_random_gaussian_process(points_sampled, covariance, noise_variance=None, gaussian_process_type=GaussianProcess):\n if noise_variance is None:\n noise_variance = numpy.zeros(points_sampled.shape[0])\n\n gaussian_process = gaussian_process_type(covariance, HistoricalData(points_sampled.shape[1]))\n for i, point in enumerate(points_sampled):\n # Draw function value from the GP\n function_value = gaussian_process.sample_point_from_gp(point, noise_variance=noise_variance[i])\n # Add function value back into the GP\n sample_point = [SamplePoint(point, function_value, noise_variance[i])]\n gaussian_process.add_sampled_points(sample_point)\n\n return gaussian_process", "def process_0(self):\n raw_data = self.pull_data(self.sub_folder)\n\n prepped_data = self._prep_data(raw_data)\n\n print(len(prepped_data))\n\n\n gmm = GaussianMixture(5)\n\n gmm.fit(prepped_data)\n\n return gmm.means_", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def sample(self, like_params):\n\t\tassert len(like_params) == 1, f\"SphericalGaussianLikelihood only takes\"\\\n\t\t\t\t+ f\" a single parameter. Found {len(like_params)}.\"\n\t\t# Unwrap the single parameter tuple.\n\t\tlike_params = like_params[0] # [b,s,m,m_dim]\n\t\t# Make a Gaussian distribution.\n\t\tdist = Normal(\n\t\t\t\tlike_params,\n\t\t\t\tself.std_dev*torch.ones_like(like_params),\n\t\t)\n\t\tsamples = dist.sample()\n\t\treturn (samples,)", "def sample_mixture(sample_M1, sample_M2, M1_to_M2_ratio): \n\n i = 0\n j = 0\n mixture_sample = np.random.randn(sample_M1.shape[0])[:,None]\n while (i < len(sample_M1.T)) and (j < len(sample_M2.T)):\n u = np.random.uniform()\n if u < (M1_to_M2_ratio / (M1_to_M2_ratio + 1)):\n sample_new = sample_M1[:,i] \n mixture_sample = np.concatenate((mixture_sample, sample_new[:,None]), axis = 1)\n i += 1\n else:\n sample_new = sample_M2[:,j] \n mixture_sample = np.concatenate((mixture_sample, sample_new[:,None]), axis = 1)\n j+=1\n return mixture_sample[:,1:]", "def sample_GP_prior(x_test, mean_func, cov_func, kernel_params, \n\t\t\t\t\tseed=42, n_samples=5):\n m = mean_func(x_test)\n k = cov_func(x_test, x_test, *kernel_params)\n prng = np.random.RandomState(int(seed))\n sample = prng.multivariate_normal(m, k, n_samples)\n return sample", "def eachDigitGMM(data, cfg):\r\n\r\n models = {}\r\n for j in range(len(data)):\r\n train_set = data[j][0]\r\n for i in range(1, len(data[j])):\r\n train_set = np.concatenate((train_set, data[j][i]), axis=0)\r\n\r\n estimator = GaussianMixture(n_components=cfg['components'], max_iter=cfg['max_iterations'],\r\n tol=cfg['tolerance'], covariance_type=cfg['covariance_type'])\r\n models[j] = estimator.fit(train_set)\r\n\r\n return models", "def sample_gaussian(self, batch_size):\n return Variable(t.randn([batch_size, self.params.latent_variable_size])) # Dimension [batch_size x latent_dim]", "def simple_sampler(fun, start, sigma, iterations, verbose=False):\n mean = np.zeros(len(start))\n cov = np.eye(len(start)) * sigma\n\n if isinstance(start, np.ndarray):\n previous = start\n else:\n previous = np.array(start)\n\n f_previous = fun(previous)\n\n samples = np.zeros((iterations, len(start)))\n acceptance = 0\n for i in range(iterations):\n proposal = previous + np.random.multivariate_normal(mean=mean, cov=cov)\n f_proposal = fun(proposal)\n fun(previous)\n if (np.log(np.random.rand())) < (f_proposal - f_previous):\n previous = proposal\n acceptance += 1\n samples[i] = np.array(previous)\n\n if verbose:\n print('sampler acceptance = {0:.3f}'.format(acceptance / iterations))\n\n return samples", "def bigaussian(\n n_particles: int,\n mean: Tuple[float, float, float, float, float],\n geometric_emittance_h: float,\n geometric_emittance_v: float,\n sigma_p: float,\n) -> np.array:\n cov = np.diag(\n (\n geometric_emittance_h,\n geometric_emittance_h,\n geometric_emittance_v,\n geometric_emittance_v,\n sigma_p ** 2,\n )\n )\n return np.random.multivariate_normal(mean, cov, n_particles).T", "def generate_normal_data(\n mu: float = 0,\n var: float = 1,\n n_features: int = 20,\n n_samples: int = 2000,\n seed: int = 42\n) -> np.array:\n x = []\n for i in range(n_features):\n np.random.seed(seed + i)\n x_ = np.random.normal(mu, var, n_samples).reshape(-1, 1)\n x.append(x_)\n x = np.hstack(x)\n x = StandardScaler().fit_transform(x)\n return x", "def _make_gaussian_matrix(\n data_count: int,\n feature_count: int,\n) -> np.ndarray:\n return np.random.randn(data_count, feature_count)", "def sampleGaussian(self, mu, log_sigma):\n # reparameterization trick\n epsilon = tf.random_normal(tf.shape(log_sigma), name=\"epsilon\")\n return mu + epsilon * tf.exp(log_sigma) # N(mu, I * sigma**2)", "def gaussianise_series(self, train_x):\n\n n_batches = train_x.shape[0]\n\n for batch in range(n_batches):\n train_x[batch, :, :] = gaussianise(train_x[batch, :, :], target_sigma=1.0)\n\n return train_x", "def gmm(X, k):\n mix = sklearn.mixture.GaussianMixture(n_components=k).fit(X)\n pi = mix.weights_\n m = mix.means_\n S = mix.covariances_\n clss = mix.predict(X)\n bic = mix.bic(X)\n\n return pi, m, S, clss, bic", "def sample(self, mean, std, n_samples, sample_seed):\n self.seed_samples(sample_seed)\n self.Y_mean = mean\n self.Y_std = std\n samples = torch.zeros([self.batch_size, n_samples, self.Y_dim],\n device=self.device)\n # Determine first vs. second Gaussian\n unif2 = torch.rand(self.batch_size, n_samples)\n second_gaussian = (self.w2 > unif2)\n # Sample from second Gaussian\n samples2 = self.sample_full_rank(n_samples, self.mu2,\n self.tril_elements2, as_numpy=False)\n samples[second_gaussian, :] = samples2[second_gaussian, :]\n # Sample from first Gaussian\n samples1 = self.sample_full_rank(n_samples, self.mu,\n self.tril_elements, as_numpy=False)\n samples[~second_gaussian, :] = samples1[~second_gaussian, :]\n samples = samples.data.cpu().numpy()\n return samples", "def multivariate_gauss_prob(observed, mean, covariance):\n\n return None", "def make_gaussian_dataset(batch_size: int,\n input_dim: int,\n seed: int = 0) -> base.BatchIterator:\n sample_fn = jax.jit(lambda x: jax.random.normal(x, [batch_size, input_dim]))\n def batch_iterator():\n rng = hk.PRNGSequence(seed)\n while True:\n x = sample_fn(next(rng))\n yield base.Batch(x, y=jnp.ones([x.shape[0], 1]))\n return batch_iterator()", "def sample(self, n_samples, sample_seed):\n self.seed_samples(sample_seed)\n samples = torch.zeros([self.batch_size, n_samples, self.Y_dim], device=self.device)\n # Determine first vs. second Gaussian\n unif2 = torch.rand(self.batch_size, n_samples)\n second_gaussian = (self.w2 > unif2)\n # Sample from second Gaussian\n samples2 = self.sample_full_rank(n_samples, self.mu2, self.tril_elements2, as_numpy=False)\n samples[second_gaussian, :] = samples2[second_gaussian, :]\n # Sample from first Gaussian\n samples1 = self.sample_full_rank(n_samples, self.mu, self.tril_elements, as_numpy=False)\n samples[~second_gaussian, :] = samples1[~second_gaussian, :]\n samples = samples.data.cpu().numpy()\n return samples", "def samples(self, gp):\r\n orig_shape = gp.shape\r\n gp = gp.flatten()\r\n Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp])\r\n return Ysim.reshape(orig_shape)", "def sample_gaussian(self, probabilities):\n return tf.add(probabilities, tf.random.normal(probabilities.shape, mean=0.0, stddev=1.0))", "def model_gauss(xsigma, nx, ny=1, nz=1, ysigma=None, zsigma=None, xcenter=None, ycenter=None, zcenter=None):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\tif( ysigma == None ) : ysigma = xsigma\n\tif( zsigma == None ) : zsigma = xsigma\n\tif( xcenter == None ) : xcenter = nx//2\n\tif( ycenter == None ) : ycenter = ny//2\n\tif( zcenter == None ) : zcenter = nz//2\n\te.process_inplace(\"testimage.puregaussian\", {\"x_sigma\":xsigma,\"y_sigma\":ysigma,\"z_sigma\":zsigma,\"x_center\":xcenter,\"y_center\":ycenter,\"z_center\":zcenter} )\n\treturn e", "def create_gaussian_filter(size, sigma):\n h = size[0] #height of the template\n w = size[1] #width of the template \n if h % 2 == 0: h += 1 #add 1 if dimensions are even\n if w % 2 == 0: w += 1\n x = math.floor(h/2)\n y = math.floor(w/2) \n sum = 0\n #create our template\n template = np.zeros((h,w))\n #fill the template in with the numbers from Gaussian distribution\n for i in range(h):\n for j in range(w):\n template[i,j] = math.exp(-((((j-x)**2)+((i-y)**2))/(2*(sigma**2))))\n sum = sum + template[i,j]\n #normalise the numbers\n gaussian_filter = template/sum\n return gaussian_filter", "def sample_from(self):\n return numpy.random.lognormal(mean=self.mu, sigma=self.sigma)", "def test_gaussian():\n generator = SignalGenerator()\n data = generator.random_gaussian(means=[1, 0, -1], stds=[0.1, 0.1, 0.1])\n freq_features = FrequencyFeature(data, sr=50)\n freq_features.fft().peaks()\n top_n = range(1, 11)\n top_n_dominant_frequencies = np.concatenate(\n list(map(freq_features.dominant_frequency_power, top_n)), axis=0)\n std_top_n_dfs = np.std(top_n_dominant_frequencies, axis=0)\n assert np.all(std_top_n_dfs < 0.001)", "def sample(self, n_samples, sample_seed):\n self.seed_samples(sample_seed)\n samples = torch.zeros([self.batch_size, n_samples, self.Y_dim], device=self.device)\n # Determine first vs. second Gaussian\n unif2 = torch.rand(self.batch_size, n_samples)\n second_gaussian = (self.w2 > unif2)\n # Sample from second Gaussian\n samples2 = torch.Tensor(self.sample_low_rank(n_samples, self.mu2, self.logvar2, self.F2))\n samples[second_gaussian, :] = samples2[second_gaussian, :]\n # Sample from first Gaussian\n samples1 = torch.Tensor(self.sample_low_rank(n_samples, self.mu, self.logvar, self.F))\n samples[~second_gaussian, :] = samples1[~second_gaussian, :]\n samples = samples.data.cpu().numpy()\n return samples", "def samples(self, gp, Y_metadata=None, samples=1):\n raise NotImplementedError(\"\"\"May be possible to use MCMC with user-tuning, see\n MCMC_pdf_samples in likelihood.py and write samples function\n using this, beware this is a simple implementation\n of Metropolis and will not work well for all likelihoods\"\"\")", "def generate_sample_data(m=1, noise_level=0.005, gauss_sigma=0.1):\n from imtools import thresholding_functions\n\n data3d = np.zeros((100 * m, 100 * m, 100 * m), dtype=np.int)\n\n # size 8\n data3d_new = np.ones((100 * m, 100 * m, 100 * m), dtype=np.bool)\n data3d_new[0:30 * m, 20 * m, 20 * m] = 0\n data3d_new[scipy.ndimage.distance_transform_edt(data3d_new) <= 8 * m] = 0\n data3d[data3d_new == 0] = 1\n # size 7\n data3d_new = np.ones((100 * m, 100 * m, 100 * m), dtype=np.bool)\n data3d_new[31 * m:70 * m, 20 * m, 20 * m] = 0\n data3d_new[scipy.ndimage.distance_transform_edt(data3d_new) <= 7 * m] = 0\n data3d[data3d_new == 0] = 1\n # size 6\n data3d_new = np.ones((100 * m, 100 * m, 100 * m), dtype=np.bool)\n data3d_new[70 * m, 20 * m:50 * m, 20 * m] = 0\n data3d_new[31 * m, 20 * m, 20 * m:70 * m] = 0\n data3d_new[scipy.ndimage.distance_transform_edt(data3d_new) <= 6 * m] = 0\n data3d[data3d_new == 0] = 1\n # size 5\n data3d_new = np.ones((100 * m, 100 * m, 100 * m), dtype=np.bool)\n data3d_new[70 * m:95 * m, 20 * m, 20 * m] = 0\n data3d_new[31 * m:60 * m, 20 * m, 70 * m] = 0\n data3d_new[70 * m:90 * m, 50 * m, 20 * m] = 0\n data3d_new[70 * m, 50 * m, 20 * m:50 * m] = 0\n data3d_new[31 * m, 20 * m: 45 * m, 20 * m] = 0\n data3d_new[scipy.ndimage.distance_transform_edt(data3d_new) <= 5*m] = 0\n data3d[data3d_new == 0] = 1\n # size 4\n data3d_new = np.ones((100*m, 100*m, 100*m), dtype=np.bool)\n data3d_new[31*m, 20*m:50*m, 70*m] = 0\n data3d_new[scipy.ndimage.distance_transform_edt(data3d_new) <= 4*m] = 0\n data3d[data3d_new == 0] = 1\n # size 3\n data3d_new = np.ones((100*m, 100*m, 100*m), dtype=np.bool)\n data3d_new[31*m:50*m, 50*m, 70*m] = 0\n data3d_new[31*m:50*m, 45*m, 20*m] = 0\n data3d_new[70*m, 50*m:70*m, 50*m] = 0\n data3d_new[70*m:80*m, 50*m, 50*m] = 0\n data3d_new[scipy.ndimage.distance_transform_edt(data3d_new) <= 3*m] = 0\n data3d[data3d_new == 0] = 1\n\n data3d = data3d*3030 # 3030+5920 = vessel value\n data3d += 5920 # 5920 = background value\n\n if gauss_sigma > 0:\n sigma = np.round(gauss_sigma, 2)\n sigmaNew = thresholding_functions.calculateSigma([1, 1, 1], sigma)\n data3d = thresholding_functions.gaussFilter(data3d, sigmaNew)\n\n if noise_level > 0:\n noise = np.random.normal(1, noise_level, (100*m, 100*m, 100*m))\n data3d = data3d*noise\n\n return data3d", "def random_glove_generator(emb_mean, emb_stddev):\n x = np.random.normal(loc=0.0, scale=1.0, size=len(emb_mean))\n x_rand = np.multiply(x, emb_stddev) + emb_mean\n return x_rand", "def resample_from_mf(self):\n self.g = np.zeros((self.K, self.K, self.B))\n for k1 in xrange(self.K):\n for k2 in xrange(self.K):\n self.g[k1,k2,:] = np.random.dirichlet(self.mf_gamma[k1,k2,:])", "def gen_energies(n_muons):\r\n pdist, bounds = fit_energylaw()\r\n samples = monte_carlo_sample(pdist, bounds, n_muons)\r\n return samples", "def data_gauss(N0, N1=None, mu0=arr([0, 0]), mu1=arr([1, 1]), sig0=np.eye(2), sig1=np.eye(2)):\n\tif not N1:\n\t\tN1 = N0\n\n\td1,d2 = twod(mu0).shape[1],twod(mu1).shape[1]\n\tif d1 != d2 or np.any(twod(sig0).shape != arr([d1, d1])) or np.any(twod(sig1).shape != arr([d1, d1])):\n\t\traise ValueError('data_gauss: dimensions should agree')\n\n\tX0 = np.dot(np.random.randn(N0, d1), sqrtm(sig0))\n\tX0 += np.ones((N0,1)) * mu0\n\tY0 = -np.ones(N0)\n\n\tX1 = np.dot(np.random.randn(N1, d1), sqrtm(sig1))\n\tX1 += np.ones((N1,1)) * mu1\n\tY1 = np.ones(N1)\n\n\tX = np.row_stack((X0,X1))\n\tY = np.concatenate((Y0,Y1))\n\n\treturn X,Y", "def rand_tri_gauss(n1=100, n2=100, n3=100, mu1=[1, 1],\n mu2=[-1, -1], mu3=[1, -1], sigma1=[0.1, 0.1],\n sigma2=[0.1, 0.1], sigma3=[0.1, 0.1]):\n ex1 = rand_gauss(n1, mu1, sigma1)\n ex2 = rand_gauss(n2, mu2, sigma2)\n ex3 = rand_gauss(n3, mu3, sigma3)\n res = np.vstack([np.hstack([ex1, 1. * np.ones((n1, 1))]),\n np.hstack([ex2, 2. * np.ones((n2, 1))]),\n np.hstack([ex3, 3. * np.ones((n3, 1))])])\n ind = np.arange(res.shape[0])\n np.random.shuffle(ind)\n return np.array(res[ind, :])", "def sampling(args):\n z_mean, z_log_sigma = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean = 0., stddev=0.1)\n return z_mean + K.exp(z_log_sigma) * epsilon", "def gnoise(mag, sigma, mu):\n noise = np.random.normal(mu,sigma,n)\n mag = mag + noise\n return mag, noise", "def multivariateGaussian(X, mu, sigma2):\n\tk = len(mu)\n\n\tif sigma2.ndim == 1:\n\t\t# convert sigma2 to a diagonal matrix\n\t\tsigma2 = np.diag(sigma2)\n\n\t# vectorized version of Multivariate Gaussian Distribution\n\tX = X - mu\n\t# p is a vector contains all probabilities of each examples\n\tp = (2 * np.pi) ** (- k / 2.0) * np.linalg.det(sigma2) ** (-0.5) * \\\n\t np.exp(-0.5 * np.sum(X.dot(np.linalg.pinv(sigma2)) * X, axis=1))\n\n\treturn p", "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def combined_gaussian(amps, fwhms, means, x):\n if len(amps) > 0.:\n for i in range(len(amps)):\n gauss = gaussian(amps[i], fwhms[i], means[i], x)\n if i == 0:\n combined_gauss = gauss\n else:\n combined_gauss += gauss\n else:\n combined_gauss = np.zeros(len(x))\n return combined_gauss", "def create_gaussian_array(self):\n\n # Fill array of size l x w with Gaussian Noise.\n terrain_length = int(ceil(self.length/self.resolution))\n terrain_width = int(ceil(self.width/self.resolution))\n gaussian_array = np.random.normal(self.mu, self.sigma, (terrain_length,terrain_width))\n\n # Filter the array to smoothen the variation of the noise\n gaussian_array = gaussian_filter(gaussian_array, self.sigma_filter)\n\n return gaussian_array", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def generate_samples(self, num_samples=100):\n samples = np.random.random_sample((num_samples, self.input_size))\n samples[:, self.attribute_size:] *= (self.sample_range[1] - self.sample_range[0]) + self.sample_range[0]\n # convert the attribute vector part to 0/1 representation\n samples[:, :self.attribute_size] = np.rint(samples[:, :self.attribute_size])\n return samples", "def create_multivariat(mean, cov, n,show):\n\tif n==1:\n\t\tx=np.random.default_rng().multivariate_normal(mean, cov)\n\telse:\n\t\tx=np.random.default_rng().multivariate_normal(mean, cov, n)\n\tif show:\n\t \tdf=pd.DataFrame({'x':x[:,0],'y':x[:,1]})\n\t \tsns.jointplot(data=df,x='x',y='y')\n\treturn x", "def sample_one(self):\n # x = self.mean + self.sigma * np.random.normal()\n x = self.dist.sample(1)\n return x" ]
[ "0.71659434", "0.70966846", "0.7007388", "0.6865769", "0.677201", "0.67334753", "0.6702295", "0.6684492", "0.6668763", "0.66618276", "0.6634252", "0.6606611", "0.6601144", "0.648774", "0.6463612", "0.64621603", "0.6461179", "0.64429975", "0.6393839", "0.6344043", "0.6344043", "0.6343068", "0.6329106", "0.631779", "0.63107026", "0.62981284", "0.6278677", "0.62731946", "0.6270396", "0.626938", "0.62449247", "0.62349045", "0.6227573", "0.62014997", "0.6195601", "0.61948234", "0.6183793", "0.6177709", "0.61657274", "0.6162885", "0.6147579", "0.6146362", "0.61395574", "0.6134022", "0.6131083", "0.6119985", "0.61179143", "0.6115477", "0.60991704", "0.6098225", "0.60899913", "0.60893446", "0.60805136", "0.60733885", "0.60564333", "0.60557884", "0.60509574", "0.6045932", "0.6043845", "0.60329354", "0.6031446", "0.6026609", "0.60255516", "0.6024089", "0.6019554", "0.60132205", "0.6013032", "0.60119593", "0.60005033", "0.59999293", "0.5998947", "0.5989044", "0.5956168", "0.5954456", "0.59462804", "0.59456563", "0.59364945", "0.59334016", "0.5921941", "0.5921595", "0.5910338", "0.5904641", "0.5901129", "0.5899244", "0.58974075", "0.58804214", "0.5878164", "0.5869501", "0.58693796", "0.5865824", "0.5862511", "0.58575237", "0.5850643", "0.5843576", "0.583566", "0.5833949", "0.58310485", "0.58283114", "0.58230656", "0.5821011" ]
0.70466185
2
Show info to the user depending on verbosity level
def message(self, data, newline="\n"): # Are we logging to screen, file or both? if not self.quiet: print(data) if self.log_fo: self.log_fo.write(data + newline) self.log_fo.flush()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verbosity(v):\n assert v in [0,1,2] # debug, warn, info\n GLOBAL['VERBOSITY'] = v", "def say(self, verbosity, msg):\n if self.verbosity >= verbosity:\n print(msg)", "def verbose():\n return Verbose.level()", "def _verbose(self,text):\n if self.verbose:\n print(text)", "def verbose(ctx, msg, *args):\n if ctx.verbose:\n info(msg, *args)", "def info(self, message):\n for_verbosity = 2\n if self.verbosity_level >= for_verbosity:\n self.logger.info(message)", "def is_verbose():\n return g_verbose", "def print_verbose(args, msg):\n if args.verbose:\n print(msg)", "def StatusUpdate(msg):\n if verbosity > 0:\n print msg", "def verbose ( self , message , *args , **kwargs ) :\n return self.logger.verbose ( message , *args , **kwargs )", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def StatusUpdate(msg):\r\n if verbosity > 0:\r\n print msg", "def printMessage(Message, verbosity):\n if(verbosity == 1):\n print(Message)", "def verbose():\n GLOBAL['VERBOSE'] = True", "def message(self, message):\n for_verbosity = 1\n if self.verbosity_level >= for_verbosity:\n self.logger.info(message)", "def cli(verbose):\n level = (logging.WARNING, logging.INFO, logging.DEBUG)[min(verbose, 2)]\n logging.basicConfig(level=level)", "def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))", "def verbose_print(text,verbose_level):\n if Args.verbose >= verbose_level:\n print '\\t' * (verbose_level-1) + text", "def v_action(option,opt_str,value,parser):\n cmdline_main.message(\"Enabling verbose message output.\")\n if hasattr(parameterized,'get_logger'):\n parameterized.get_logger().setLevel(parameterized.VERBOSE)\n else: # For versions of the param package before 9 May 2013\n parameterized.min_print_level=parameterized.VERBOSE", "def if_verbose(message):\n if args.verbose:\n logging.info(message)\n global_timer()", "def pprint(self, level: int, *values):\n if abs(self.max_verbosity - level + 1) < self.verbosity:\n print(*values)", "def status(msg, options):\n if options.verbose:\n sys.stderr.write(msg)", "def verbosity_for_session(request):\n return request.config.getoption(\"--verbosity-project\")", "def print_info(message: str):\n global verbose\n if verbose:\n print(\"%s%s%s\" % (KYEL, message, KNRM))", "def info(msg, *args):\n if args:\n msg %= args\n click.echo(msg, file=sys.stdout)", "def print_verbose(message:str):\n if params['verbose']:\n print(message)\n return", "def verbose():\n return _verbose", "def info(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['info']:\n self.print_lines(self.colored(('green', 'bold'), lines))", "def is_verbose() -> bool:\n return VERBOSE", "def output_debug_info(self):", "def turn_on_verbosity(self):\n self.m.setParam('OutputFlag', 1)", "def verbose(self, *args):\n self.mylog.log(logging.INFO - 1, *args)", "def verbosity(self):\n return self._verbosity", "def verbose(self):\n verbose = self.__class__.__name__ + \", alpha: \" + str(self.alpha)\n return verbose", "def verbosity(self):\n return self._get('verbosity')", "def show_info(title, message):\n\n pass", "def verbose( self ):\n return Verbose.__level", "def print_info(self, message: str=\"\", src_file: str=\"\") -> None:\n if self._verbosity_level >= int(VerbosityLevel.VERBOSITY_LEVEL2):\n _mes = src_file + \": \" + message\n if self._print_statements_enabled:\n print(\"INFO \\t\\t- \", src_file + \": \\t\" + message)\n logging.info(_mes)", "def Verbose(on_off=\"on\"):\n\n global verbose\n \n if on_off.isdigit():\n int_value = int(on_off)\n else:\n int_value = 1\n\n if on_off.lower() == \"off\":\n int_value = 0\n print \"Verbose disabled.\"\n elif on_off.lower() == \"on\":\n int_value = 1\n print \"Verbose enabled.\"\n \n if -1 < int_value < 3:\n verbose=int_value\n interface.VERBOSE=int_value\n else:\n raise TypeError", "def info(self, message):\n if self.show_info:\n print(message)", "def _do_option(self, line: str) -> None:\n if line.startswith(\"option verbosity\"):\n self._verbosity = Level(int(line[len(\"option verbosity \") :]))\n _write(\"ok\")\n else:\n _write(\"unsupported\")", "def gnupg_verbose():\n if LOGGER.getEffectiveLevel() == logging.DEBUG:\n return [\"--verbose\"]\n\n return [\"-q\"]", "def verbose(self, *args):\n\n if self.is_on(_Log.VERBOSE):\n self._write(self._out, *args)", "def verbose_print(msg: str = '') -> None:\n assert isinstance(msg, str)\n if __verbose:\n print(msg)", "def print_standout(info):\n sys.stdout.write(\"Info: %s\" % info)\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()", "def v_print(msg):\n if (VERBOSE == 1):\n print(msg)", "def verbose(string, level, indent=None):\n if args.verbose:\n if args.verbose > level:\n if indent is None:\n if level <= LEVEL_4:\n indent = \" \" * level\n else:\n indent = \" \"\n print (indent + string)\n return", "def standard_status():\n errors, warnings, infos = THE_LOGGER.status()\n info(errors, \"errors\")\n info(warnings, \"warnings\")\n info(infos, \"infos\")", "def show_info(self):\n print(\"Problem number: \" + str(self.number))\n print(\"Problem name: \" + str(self.name))\n print(\"Problem description: \" + str(self.desc))", "def test():\n v_print(1, \"-vvv Verbose 1 - INFO\")\n v_print(2, \"-vv Verbose 2 - WARN\")\n v_print(3, \"-v Verbose 3 - ERROR\")", "def isVerbose(self):\n return self.opts.verbose", "def logg(msg):\n if VERBOSE: print msg", "def display_detail(msg, *args):\n msg = _concat_message(msg, *args)\n if verbose > 1:\n print \" %s\" % msg.encode(\"UTF-8\")\n sys.stdout.flush()\n if prefs.pref(\"LoggingLevel\") > 0:\n munkilog.log(u\" \" + msg)", "def vprint(msg):\n if defaults.verbose:\n print(msg)", "def log(self, msg, level=1):\n if self.verbosity >= level:\n print(msg)", "def print_info(msg, level=1):\n if config.cfg.verbosity >= level:\n if config.cfg.excessive_verbosity:\n # Get caller info\n fn, lineno, funcnm = inspect.stack()[1][1:4]\n colour.cprint(\"INFO (level: %d) [%s:%d - %s(...)]:\" %\n (level, os.path.split(fn)[-1], lineno, funcnm),\n 'infohdr')\n msg = msg.replace('\\n', '\\n ')\n colour.cprint(\" %s\" % msg, 'info')\n else:\n colour.cprint(msg, 'info')\n sys.stdout.flush()", "def summary(self, *args):\n if len(args) == 1:\n self.level = args[0].level\n self.message = args[0].message\n elif len(args) == 2:\n self.level = args[0]\n self.message = str(args[1])", "def show_status():\n\n pass", "def getVerbose(self):\n return self.__VERBOSE", "def info(msg):\n print(colored.green(\"[INFO]: {0}\".format(msg)))", "def _output(self, message, verbosity, exact, stream):\n if exact:\n if self.config.verbosity == verbosity:\n stream.write(message + \"\\n\")\n else:\n if self.config.verbosity >= verbosity:\n stream.write(message + \"\\n\")", "def verbose(module, message):\n if loggingLevel >= loggingLevelVerbose:\n ModuLog.log(\"V\", module, message)", "def _verboseHeader(self):\n\n if verbose:\n name = self._getName()\n methodName = self._getMethodName()\n\n title = f\"Running {name}.{methodName}\"\n print('{}\\n{}'.format(title, '-' * len(title)))", "def info(cls, message, level=0):\n\n if cls.print_level == 3 or (cls.print_level == 2 and level >= 1) or \\\n (cls.print_level == 1 and level >= 2):\n print(cls.marker_theme.info(level) + cls.time() + cls.parse(message))", "def tennis():\n print(\"The tennis option is a placeholder for testing. The option is not currently available. \\n\\n\")", "def go_loud(options):\n my_logger = logging.getLogger(__name__)\n if options['panzer']['quiet']:\n verbosity_level = 'WARNING'\n else:\n verbosity_level = 'INFO'\n my_logger.setLevel(LEVELS[verbosity_level])", "def info(msg):\n click.secho(msg, fg='blue')", "def info(self, msg, *args, **kwargs):\n pass", "def info(self, *args, **kwargs):", "def yap(self, msg):\n\t\tif self.verbose:\n\t\t\tprint msg", "def print_info(msg):\n print(msg)", "def info(self, zolo, module, args):\n print(f\"[Other] Version {module.version}\")", "def print_status(self):\r\n\t\tif VERBOSE:\r\n\r\n\t\t\tprint( 'Player : ')\r\n\t\t\tfor h in self.hands:\r\n\t\t\t\tprint('\\t' + str(h))\r\n\t\t\tprint( 'Dealer:\\n\\t' + str(self.dealer))\r\n\t\t\tprint( '-----------------------')", "def do_verbose(self, arg):\n global verbose\n if verbose == 1:\n verbose = 0\n # prtin and add to log file \n logmsg = \" INFO: verbose mode disable\"\n log(logmsg)\n else:\n verbose = 1\n # prtin and add to log file \n logmsg = \" INFO: verbose mode enable\"\n log(logmsg)", "def _verbose(self):\n return self._toBool(os.environ.get('VERBOSE', 0))", "def verbose_log(*args):\n\tif _verbose and logging.getLogger().isEnabledFor(logging.INFO):\n\t\tlogging.info(\" \".join(str(arg) for arg in args))\n\t\tlogging.getLogger().handlers[0].flush()", "def _print_status(self):", "def info(): # noqa: E501\n return 'do some magic!'", "def logecho(message, level='info'):\n if level == 'error':\n logger.error(message)\n click.echo(Fore.RED + level.upper() + ': ' + Fore.WHITE +\n message, err=True) if verbose else False\n elif level == 'warning':\n logger.warning(message)\n click.echo(Fore.YELLOW + level.upper() + ': ' +\n Fore.WHITE + message) if verbose else False\n elif level == 'debug':\n logger.debug(message)\n click.echo(Fore.GREEN + level.upper() + ': ' +\n Fore.WHITE + message) if debug else False\n else:\n logger.info(message)\n click.echo(message)", "def info(self):\n\n if self.engine_name == 'RAGE':\n self._add_argument('-help')\n self._log('info', 'command line arguments')\n else:\n self._log('info', 'not supported', True)", "def info(self, msg, *args):\n if self.lvl<=logging.INFO: return self._log(msg, *args)", "def verbose(value=None):\n global verbosity\n\n if value != None:\n verbosity = value\n \n try:\n rval = verbosity\n except NameError:\n verbosity = False\n rval = verbosity\n\n return rval", "def _set_verbosity_level1(self):\n logging.basicConfig( filename=LOGGING_FILE_NAME, \\\n filemode='w', \\\n format='%(asctime)s - %(levelname)s \\t- %(message)s', \\\n level=logging.WARNING )", "def verbose_str(self):\n return self.summary.verbose(self.results) or ''", "def v_flag():\n log.setLevel(logging.INFO)", "def print_info(self):\r\n self.system.print_to_log(\r\n f\"{self.__class__.__name__} model: Infection probability: {self.p}, Infectious period: {self.i}, Recovery period: {self.r}.\")", "def more_informations():\n print \"--help for more informations.\"\n sys.exit(1)", "def info() -> None:", "def _ansible_verbose(verbose_level=1):\n flag = ''\n if verbose_level > 1:\n flag = f'-{\"v\" * (verbose_level - 1)}'\n return flag", "def output_info(text):\n if conf.eval_output:\n info_dict = {'type':'info', 'text' : text}\n output_message_eval(info_dict)\n else:\n output_message('[INFO] ' + text)", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def print_warning(verbose, message):\n if verbose:\n print(message)", "def info(self, text):\n\n debug_text = self._get_debug_text(text)\n super().info(debug_text)", "def info(cls, msg, debug=True):\n if debug:\n Console.info(msg)", "def printStatus(level,text):\n\tglobal t0\n\tpre = \"[{0:>7.2f}] \".format(time.time()-t0)\n\tfor x in range(0,level):\n\t\tpre += \"-\"\n\tpre += \"> \"\n\tprint(pre+text)", "def info(self, *args, **kwargs):\n self.msg(logging.INFO, *args, **kwargs)", "def command_info(fmt, *args, **kwargs):\n sys.stderr.write(fmt.format(*args, **kwargs))", "def gated_loginfo(quiet, msg):\n\n if(not quiet):\n loginfo(msg)", "def report(s):\n if opts[\"verbose\"]:\n print(\"%s: %s\" % (NAME, s))", "def test_increase_verbosity(self):\n # Start from a known state.\n set_level(logging.INFO)\n assert get_level() == logging.INFO\n # INFO -> VERBOSE.\n increase_verbosity()\n assert get_level() == logging.VERBOSE\n # VERBOSE -> DEBUG.\n increase_verbosity()\n assert get_level() == logging.DEBUG\n # DEBUG -> SPAM.\n increase_verbosity()\n assert get_level() == logging.SPAM\n # SPAM -> NOTSET.\n increase_verbosity()\n assert get_level() == logging.NOTSET\n # NOTSET -> NOTSET.\n increase_verbosity()\n assert get_level() == logging.NOTSET", "def enable_verbose(self):\n self.verbose = True" ]
[ "0.7465201", "0.7334355", "0.71003336", "0.69231784", "0.6840511", "0.6791418", "0.6718375", "0.6699678", "0.6693496", "0.66344607", "0.6631135", "0.66225713", "0.66161317", "0.6589693", "0.6587105", "0.65763825", "0.65573853", "0.6548502", "0.65454394", "0.6543679", "0.6530314", "0.64966387", "0.64682823", "0.64488435", "0.6441829", "0.64380515", "0.6432296", "0.64300776", "0.641969", "0.64167255", "0.64148796", "0.64106524", "0.6410337", "0.6364081", "0.63636446", "0.6347084", "0.6338188", "0.6330291", "0.63266945", "0.6323182", "0.6250819", "0.62465143", "0.62438893", "0.6229464", "0.61967313", "0.61922157", "0.6189785", "0.6179099", "0.6171273", "0.61701524", "0.6169509", "0.61616", "0.61423296", "0.6134458", "0.61327875", "0.6123496", "0.610877", "0.6107583", "0.6107288", "0.61040956", "0.6090223", "0.6082538", "0.6077381", "0.6073183", "0.60730755", "0.60721666", "0.60690546", "0.605808", "0.6055704", "0.6054978", "0.6047408", "0.6035446", "0.6030849", "0.60266924", "0.6019022", "0.5998644", "0.59983844", "0.59900004", "0.597526", "0.597429", "0.5972459", "0.5970509", "0.5951877", "0.5950832", "0.5932759", "0.59325385", "0.59302044", "0.5928259", "0.5926317", "0.59201294", "0.5915548", "0.5910968", "0.5895602", "0.5892796", "0.58751494", "0.5865794", "0.58608395", "0.5859665", "0.58565295", "0.5854158", "0.58475816" ]
0.0
-1
Wrapper to make an API GET request, return the response and handle errors
def __make_api_get(self, api_path): try: self.last_response = urllib2.urlopen( self.api_server + api_path, cafile=self.cacert_path) json_data = self.last_response.read() # Check for errors except urllib2.HTTPError as err: error = "API HTTP error [%s] - '%s'" % (err.code, err.read()) raise EFIgyCliError(error, self.last_response) except urllib2.URLError as err: error = 'Problem calling API at location %s - %s' % ( self.api_server + api_path, err) raise EFIgyCliError(error, self.last_response) # Decode json response into an object try: ret = json.loads(json_data) except ValueError as err: error = "Problem deserialising data, expecting JSON.\nError: %s\nData: %s" % ( err, json_data) raise EFIgyCliError(error, self.last_response) # Return JSON deserialised object return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _http_get(self, url, params={}):\n if not self.token:\n self.get_token()\n headers = {'Authorization': self.token, 'Accept': 'application/json; indent=4'}\n url = self.server + '/api2' + url\n try:\n r = requests.get(url=url, headers=headers, params=params)\n except requests.exceptions.RequestException as e:\n return check_failed(e)\n # raise ClientHttpError(None, e)\n if r.status_code != 200:\n return check_failed(r.status_code)\n # return ClientHttpError(r.status_code, json.loads(r.text)['error_msg'])\n try:\n data = json.loads(r.text)\n except:\n data = r.text\n # TODO: check data\n return data", "def get(api, url, headers=None, auth=_KERBEROS_AUTH, proxies=None,\n retries=_NUM_OF_RETRIES, timeout=None, stream=None):\n return call(api, url, 'get',\n headers=headers, auth=auth, proxies=proxies, retries=retries,\n timeout=timeout, stream=stream)", "def api_get(self, *args, **kwargs):\n return self.api_get_with_response(*args, **kwargs)[0]", "def _get(self, url, **queryparams):\n url = urljoin(self.base_url, url)\n if len(queryparams):\n url += '?' + urlencode(queryparams)\n try:\n r = self._make_request(**dict(\n method='GET',\n url=url,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n return r.json()", "def send_get(self, api_url, query=None):\n resp = requests.get(self.base_url + api_url, params=query)\n\n return resp", "def get(call,\n headers=None,\n base=cloudBase,\n no_headers=False,\n json_output=True,\n raw=False,\n **kwargs):\n return _call(method=requests.get,\n call='{0}{1}'.format(base, call),\n headers=headers,\n no_headers=no_headers,\n json_output=json_output,\n raw=raw,\n **kwargs)", "def _get(self, url: str) -> requests.Response:\n # todo: do some error checking here\n if url.startswith(API_PATH['base']):\n try:\n # logger.debug(f\"RestClient._get(): {url}\") # log in calling function\n response = requests.get(url, auth=self.auth)\n rest_code = response.json()['meta']['code']\n if rest_code not in [200, 201, 204]:\n raise RestException(f\"REST API Error: {rest_code}. {response.content}\")\n except RestException as e:\n logger.error(e)\n return None\n return response\n else:\n raise ValueError(f\"URL is invalid: {url}\")", "def _request_get(self, url):\n try:\n r = requests.get(url)\n except Exception:\n raise Exception('Cannot connect')\n if (r.status_code != 200):\n raise Exception('%d %s' % (r.status_code, r.text))\n if (not r.text) or (not r.text.strip()):\n raise Exception('Empty answer')\n try:\n response = json.loads(r.text)\n except Exception:\n raise Exception('Cannot parse response')\n return response", "def _get(url, *, verbose=False): \n r = get_from_api(url, verbose=verbose)\n return json.loads(r.content)", "def _api_request(self, endpoint, params=None):\n \n if params:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header},\n params=params)\n else:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header})\n code = response.status_code\n if 200 <= code < 300:\n logging.debug(f\"API call: {self.api_url}/{endpoint} | {code}\")\n encoding = response.encoding\n raw = response.content\n return json.loads(raw.decode(encoding))\n elif code > 500:\n raise APIAuthException\n else:\n logging.error(f\"ERROR: Bad API call: {self.api_url}/{endpoint} | {code}\")", "def get(apikey=None, action=None, version='2', _url=None, **kwargs):\n\n url = _url or _api_url()\n _raw = kwargs.pop('_raw', False) # remove _raw kwarg, default to False\n\n output = 'JSON' # don't allow XML, so we can parse JSON response\n apikey = apikey or resolve_apikey()\n\n # sanity checks\n if not apikey:\n raise MonitisError(\"get: apikey is required\")\n if not action:\n raise MonitisError(\"get: action is required\")\n\n # build the request\n req_params = [('apikey', apikey), ('action', action),\n ('output', output), ('version', version)]\n req_params.extend(kwargs.items())\n req_url = url + '?' + urlencode(req_params)\n\n # make the request\n if Monitis.debug:\n print \"Request URL: \" + req_url\n req = Request(req_url)\n try:\n res = urlopen(req)\n except HTTPError, error:\n raise MonitisError('API Error: ' + error.read())\n if _raw:\n return res\n res_json = res.read()\n if Monitis.debug:\n print \"Response: \" + res_json\n\n # build a python object out of the result\n return decode_json(res_json)", "def call_api(url):\n\n req = requests.get(url)\n return req", "def call_api(url):\n\n req = requests.get(url)\n return req", "def api_call(endpoint, params, headers):\n\n api_response = get(BASE_URL.format(endpoint=endpoint), params=params,\n headers=headers)\n\n api_response.raise_for_status()\n json_resp = api_response.json()\n\n api_response.close()\n return json_resp", "def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response", "def request_get(self, path, params=None):\n\tif params is None:\n\t\tparams = {}\n\t\trequest_url = self.host_url + path\n\t\ttry:\n\t\t\tresponse = self.session.get(request_url, auth=self.api_key, params=params)\n\t\texcept requests.RequestException as e:\n\t\t\traise self.DataUnavailable(\"Network exception\") from e\n\n\tif response.status_code != 200:\n\t\traise self.DataUnavailable(\n\t\t\t\"Unexpected response status (%s)\" % response.status_code\n\t\t)\n\n\treturn response.json()", "def get(self, url: str, params: Dict[str, Any] = None, headers: Dict[str, Any] = None) -> Response:\n return self._api_client._get(url, params=params, headers=headers)", "def get(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'get', api_path, *args, **kwargs)", "def api_request(self, api_url: str, params: dict = None, headers: dict = None) -> APIResponse:\n if headers is None:\n headers = {}\n if params is None:\n params = {}\n\n link = self._session.get(url=api_url, params=params, headers=headers)\n\n return APIResponse(link.text)", "def get_call_api(url, payload, headers):\n return requests.request(\"GET\", url, headers=headers, data=payload)", "def _api_request(*args, **kwargs):\n response = requests.request(*args, **kwargs)\n return APIResponse(response)", "def make_get_request(client, endpoint):\n return client.get(endpoint)", "def __get(self, url):\n\n res = requests.get(url, headers=self.auth_header)\n res.raise_for_status()\n return res", "async def _api_request(self,\n method: str,\n path_url: str,\n params: Dict[str, Any] = {}) -> Dict[str, Any]:\n base_url = f\"https://{global_config_map['gateway_api_host'].value}:\" \\\n f\"{global_config_map['gateway_api_port'].value}\"\n url = f\"{base_url}/{path_url}\"\n client = await self._http_client()\n if method == \"get\":\n if len(params) > 0:\n response = await client.get(url, params=params)\n else:\n response = await client.get(url)\n elif method == \"post\":\n response = await client.post(url, data=params)\n\n parsed_response = json.loads(await response.text())\n if response.status != 200:\n err_msg = \"\"\n if \"error\" in parsed_response:\n err_msg = f\" Message: {parsed_response['error']}\"\n raise IOError(f\"Error fetching data from {url}. HTTP status is {response.status}.{err_msg}\")\n if \"error\" in parsed_response:\n raise Exception(f\"Error: {parsed_response['error']}\")\n\n return parsed_response", "def __request(self,endpoint):\n apiRequest = requests.get(\"%s/%s\" % (self.baseurl,endpoint), \n auth=requests.auth.HTTPBasicAuth(self.api_id, self.api_secret))\n try:\n json = apiRequest.json()\n return json\n except JSONDecodeError:\n print(\"Failed to download or failed to parse JSON.\")\n print(apiRequest)\n return None", "def get(self, api_path, *args, **kwargs):\n\n return self._do_operation(u'get', api_path, *args, **kwargs)", "def api_get(api_url=PUPPETDB_HOST,\n api_version='v3',\n path='',\n method='get',\n params=None,\n verify=PUPPETDB_VERIFY_SSL,\n cert=PUPPETDB_CERTIFICATES):\n if not params:\n params = {}\n method = method.lower()\n headers = {\n 'Accept': 'application/json',\n 'Content-type': 'application/json',\n }\n methods = {\n 'get': requests.get,\n }\n\n if path[0] != '/':\n path = '/{0}'.format(path)\n\n if params:\n path += '?{0}'.format(urlparse.urlencode(params))\n\n url = '{0}{1}'.format(api_url + api_version, path)\n resp = methods[method](url,\n headers=headers,\n verify=verify,\n cert=cert)\n if 'X-records' in resp.headers:\n return json.loads(resp.text), resp.headers\n else:\n return json.loads(resp.text)", "def get_from_api(url, *, verbose=False):\n vprint = lambda *a, **kwa: print(*a, **kwa) if verbose else None\n\n with open(\"APIKey.txt\", \"r\") as keyFile:\n apiKey=keyFile.readline()\n if apiKey[-1] == '\\n':\n apiKey = apiKey[:-1]\n \n headers = {'X-API-Key': apiKey}\n vprint(\"getting\", url, \"with headers\", headers, \"...\")\n r = requests.get(url, headers=headers)\n vprint(\"...done\")\n return r", "def _get(self, endpoint):\n res = self._request(\"get\", endpoint)\n if not res.content:\n return {}\n try:\n res = res.json()\n except ValueError:\n raise ValueError(\"Cannot parse {} as JSON\".format(res))\n if \"error\" in res:\n raise AirthingsError(res[\"error\"])\n return res", "def _api_call(self, **kwargs):\n params = {\n 'format': 'json',\n }\n params.update(kwargs)\n r = requests.get(self.api_base_url, params=params)\n return r.json()", "def get(self, api_url, timeout=30):\n return self._request('GET', api_url, timeout=timeout)", "def get(self, endpoint, data=None):\n if endpoint.startswith(\"http\"):\n url = endpoint\n elif endpoint.startswith(\"/\"):\n url = \"{}{}\".format(api_endpoint, endpoint)\n else:\n url = \"{}/{}\".format(api_endpoint, endpoint)\n\n response = requests.get(\n url,\n data=data,\n headers= {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Bearer \" + self.token,\n }\n )\n\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 404:\n print(\"404: Probably invalid endpoint\")\n else:\n print(\"ERROR IN REQUEST: {}\".format(response.content))\n return response", "def http_get(call):\n\n verify_ssl = (\n True if \"verify_ssl\" not in call.data.keys() else call.data[\"verify_ssl\"]\n )\n\n headers = basic_headers\n if \"headers\" in call.data.keys():\n headers.update(call.data[\"headers\"])\n\n auth = None\n if \"auth_username\" in call.data.keys() and \"auth_password\" in call.data.keys():\n auth = (\n call.data[\"auth_username\"]\n if \"auth_username\" in call.data.keys()\n else None,\n call.data[\"auth_password\"]\n if \"auth_password\" in call.data.keys()\n else None,\n )\n\n resp = requests.get(\n url=call.data[\"url\"],\n params=call.data[\"get_params\"]\n if \"get_params\" in call.data.keys()\n else None,\n headers=headers,\n verify=verify_ssl,\n timeout=10,\n auth=auth,\n )\n\n return resp.status_code == 200", "def get(url, to_error=_default_to_error, **kwargs):\n\n return request('get', url, to_error=to_error, **kwargs)", "def _request(self, endpoint: str = \"/api/\", params: object = {}) -> dict:\n ret: dict = {}\n try:\n if not self.api_key:\n ret[\"error\"] = \"API key is empty\"\n raise APIError(ret['error'])\n\n r = requests.get(f\"{self.apibase}{endpoint}\",\n params=params,\n headers=self.headers,\n verify=self.verify_ssl)\n response_data = orjson.loads(r.text)\n except orjson.JSONDecodeError:\n ret[\"error\"] = \"Failed to parse response data to JSON\"\n if self.debug:\n ret[\"error\"] += \"\\nDescription: \" + r.reason\n ret[\"error\"] += \"\\nData: \" + r.text\n except requests.HTTPError:\n ret[\"error\"] = f\"{r.status_code}: {r.reason}\"\n if self.debug:\n ret[\"error\"] += \"\\nDescription: \" + r.reason\n ret[\"error\"] += \"\\nData: \" + r.text\n\n if ret.get('error', None):\n raise APIError(ret['error'])\n check_status_code(request=r, debug=self.debug, ret=ret)\n\n ret = response_data\n return ret", "def _get_request(url_root,api_key,path,response_type,params, ssl_verify):\n url = _url_builder(url_root,api_key,path,params)\n content = _fetch(url, ssl_verify)\n response = _dispatch(response_type)(content)\n return response", "def get(self, url):\n headers = {\"Authorization\": \"Bearer \" + self.token}\n full_url = self.api_url + starts_slash(url)\n logging.info(\"GET url: \" + str(full_url))\n logging.info(\"GET header: \" + str(headers))\n try:\n result = requests.get(full_url, headers=headers).json()\n except json.decoder.JSONDecodeError:\n result = \"error parsing JSON response\"\n logging.info(\"GET result: \" + str(result))\n return result", "def api_get_with_response(self, path, data={}, **kwargs):\n return self.api_call(client_http_method=self.client.get,\n path=path,\n data=data,\n **kwargs)", "def _get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "def get(self, *args, **kwargs):\n return Response({'foo': 'bar'})", "def _fetch(cls, *args, **kwargs):\n apikey = htpc.settings.get('plexpy_apikey')\n\n if apikey is None:\n raise\n\n url = '%sapi/v2?apikey=%s&%s' % (cls._build_url(), apikey, urlencode(kwargs))\n\n try:\n r = requests.get(url, verify=False)\n r.raise_for_status()\n # Lets just copy the headers for now.\n cherrypy.response.headers['Content-Type'] = r.headers.get('Content-Type', 'application/json;charset=UTF-8')\n resp = r.json()\n if resp.get('response', {}).get('result') == 'success':\n return resp['response']['data']\n except:\n log.exception('Failed to get %s' % url)\n return", "def requester(get_args: dict) -> dict:\n get_args.update(dict(apikey = apikey))\n response = requests.get(URL, params = get_args)\n return response.json()", "async def get(self, path, params=None, json_data=None):\n response = await self.request('GET', path, params, json_data)\n return response", "def get(self, params=None):\n params = self.parameters(additional_parameters=params)\n res = get(self.endpoint_url, params=params)\n return Response(res)", "def _genericGet(self,resource,**kwargs):\n requestUrl = self.apiRootUrls[0] + resource\n debugRequest(requestUrl)\n r = retry(self.session.get,requestUrl,params=kwargs)\n if r.json is None:\n debugError('not json. here is the actual body text:')\n debugRaw(r.text)\n return\n return r.json", "def get_rest_call(api_url, username, password):\n response = requests.get(api_url,\n auth=HTTPBasicAuth(username, password),\n verify=False,\n timeout=4)\n return response", "def _api_request(self, path, method, data=None, query=None):\n\n url = request_url(\n self.config['secure'],\n self.config['hostname'],\n self.config['port'],\n path,\n query,\n )\n\n try:\n resp = request(\n url,\n method,\n self._headers(),\n data,\n self.config['timeout'],\n )\n\n return Response(\n resp.get('meta', {}),\n # Response info may have 'object' or 'objects' key, depending\n # on whether there are 1 or multiple results.\n resp.get('object', resp.get('objects'))\n )\n except HTTPError as e:\n response = e.read()\n fallback = '{0} {1}'.format(e.code, e.msg)\n\n if isinstance(response, bytes):\n data = response.decode('utf8')\n else:\n data = response\n\n error = json.loads(data).get('error', {})\n message = error.get('message', fallback)\n raise HTTPResponseError(message, status_code=e.code, cause=e)", "def req_get(url, headers=None, params=None) -> Response:\n if params:\n url = \"{}?{}\".format(url, parse.urlencode(params))\n\n req = Request(url, headers=headers, method=\"GET\")\n\n with request.urlopen(req) as res:\n response = Response(res)\n return response", "def __apiRequest(self, url, parms={}):\n authparms = self.__addAuthParms(parms);\n request = self.http.request('GET', url, fields=authparms)\n if request.status != 200:\n raise ApiCommunicationError('Failed to retrieve data from Marvel, HTTP Status {}'.format(request.status))\n else:\n return json.loads( request.data.decode('utf-8') )", "def _get(self, url):\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise HTTPError", "def _do_get(self, uri, **kwargs):\n #TODO:\n # Add error handling. Check for HTTP status here would be much more conveinent than in each calling method\n scaleioapi_get_headers = {'Content-type':'application/json','Version':'1.0'}\n self.logger.debug(\"_do_get() \" + \"{}/{}\".format(self._api_url,uri))\n \n if kwargs:\n for key, value in kwargs.iteritems():\n if key == 'headers':\n scaleio_get_headersvalue = value\n\n try:\n #response = self._im_session.get(\"{}/{}\".format(self._api_url, uri), headers = scaleioapi_get_headers, payload = scaleio_payload).json()\n response = self._im_session.get(\"{}/{}\".format(self._api_url, uri), **kwargs).json()\n #response = self._session.get(url, headers=scaleioapi_post_headers, **kwargs)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise RuntimeError(\"_do_get() - HTTP response error\" + response.status_code)\n except:\n raise RuntimeError(\"_do_get() - Communication error with ScaleIO gateway\")\n return response", "def __signed_GET(self, api_url, params={}, timeout=5):\r\n sign_str = ''\r\n for key in sorted(params.keys()):\r\n _ = '&' + key + '=' + str(params[key])\r\n sign_str += _\r\n payload_str = 'GET' + '&' + api_url + sign_str\r\n signature = hmac.new(bytes(self.secret, encoding='utf-8'), bytes(payload_str, encoding='utf-8'), digestmod=hashlib.sha256).hexdigest()\r\n params['sign'] = signature\r\n url = self.__base_url + api_url\r\n try:\r\n r = requests.get(url, params=params, timeout=timeout)\r\n r.raise_for_status()\r\n except ReadTimeout:\r\n print(\"get timeout\")\r\n return\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n return\r\n if r.status_code == 200:\r\n return r.json()", "def api_get(self, path, query=None):\n return self._api_request(path, 'GET', query=query)", "def get(url_ext, query_params={}, custom_err=None, timeout=DEFAULT_TIMEOUT):\r\n url = get_url() + url_ext\r\n # get request headers\r\n headers = get_headers()\r\n\r\n r = requests.get(url, params=query_params, headers=headers, timeout=timeout)\r\n return handle_response(r, \"GET\", custom_err)", "def get(self, path):\n url = urljoin(self.api_endpoint, path)\n response = requests.get(url, headers=self.headers)\n if response.status_code == requests.codes.ok:\n return response.json()\n elif response.status_code == requests.codes.not_found:\n return None\n else:\n response.raise_for_status()", "def get(url, headers=None, json=None, parameters=None):\n log = log_utilities.create_logger()\n response = None\n http_code = None\n\n try:\n r = requests.get(url, headers=headers, json=json, params=parameters)\n log.info(\"[http_utilities.get] URL: {url}\".format(url=url))\n if parameters:\n log.info(\"[http_utilities.get] parameters: {parameters}\".format(parameters=parameters))\n if headers:\n log.info(\"[http_utilities.get] headers: {headers}\".format(headers=headers))\n if json:\n log.info(\"[http_utilities.get] json: {json}\".format(json=json))\n log.info(\"[http_utilities.get] Request sent for URL: {url}\".format(url=r.url))\n response = r\n http_code = r.status_code\n log.info(\"[http_utilities.get] HTTP status code: {http_code}\".format(http_code=http_code))\n except requests.exceptions.RequestException as e:\n log.exception(\"[http_utilities.get] Exception {type} = {exception}\".format(type=type(e), exception=e.message))\n\n return response, http_code", "def _get_request(self, url, payload=None):\n url = self.baseUrl + url\n logger.debug(\"GET %s\", url)\n with self.session.get(url, params=payload) as req:\n try:\n result = req.json()\n except json.decoder.JSONDecodeError as exc:\n raise errors.PluginError(\"no JSON in API response\") from exc\n if result[\"result\"] == \"success\":\n return result[\"data\"]\n if result[\"error\"][\"code\"] == \"not_authorized\":\n raise errors.PluginError(\"cannot authenticate\")\n raise errors.PluginError(\n \"error in API request: {} / {}\".format(\n result[\"error\"][\"code\"], result[\"error\"][\"description\"]\n )\n )", "def get(self, url, params=None):\n # TODO: handle params\n path = self.get_path(url)\n return self.build_response_for(path)", "def _get_api_request(url):\n req = requests.get(url)\n\n if not req.status_code == 200:\n print(\"Error getting API request:\", url)\n print(\"Status code:\", req.status_code)\n print(\"Error:\", req.text)\n exit(200)\n\n data = None\n try:\n data = req.json()\n except JSONDecodeError:\n print(\"WarcraftLogs did not return proper JSON, it is likely down for maintenance.\")\n print(\"Request response:\", req.text)\n exit(300)\n\n return data", "def get(self, url, params=None, headers=None, timeout=10):\r\n headers = headers or self.headers\r\n try:\r\n return self.request(url=url, method='GET', params=params, extra_headers=headers, timeout=timeout)\r\n except requests.exceptions.ProxyError:\r\n return None\r\n except requests.RequestException as error:\r\n print(error)\r\n if self._debug:\r\n logging.exception(\r\n ''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))\r\n return None", "def _ext_get(self, url, key=None, status=200):\n\n resp, body = self.get(url)\n body = json.loads(body)\n self.expected_success(status, resp.status)\n\n if not key:\n return service_client.ResponseBody(resp, body)\n elif isinstance(body[key], dict):\n return service_client.ResponseBody(resp, body[key])\n elif isinstance(body[key], list):\n return service_client.ResponseBodyList(resp, body[key])\n\n return service_client.ResponseBodyData(resp, body[key])", "def make_get_request(url, headers=None):\n logger.info(\"Inside: make_get_request\")\n logger.debug(\"make_get_request: parameters - {}, {}\".format(url, headers))\n\n timeout = get_config(REQUEST_TIMEOUT,\"timeout\")\n\n if not headers:\n headers = {}\n\n resp = requests.get(url, verify=False, headers=headers, timeout=timeout)\n\n logger.debug('received status : {}'.format(resp.status_code))\n logger.debug('received text : {}'.format(resp.text))\n logger.info(\"Exit: make_get_request\")\n\n if RestClient.result_success(resp.status_code):\n return resp\n else:\n err_msg = 'ERROR, received {} code during API call {}'.format(resp.status_code, url)\n logger.error(err_msg)\n raise APIException(err_msg, resp.text)", "def get_api_result(self, url, params):\n return self.HANDLER_HTTP.send_get_request(url, params)", "def __get(self, url, headers=None):\n return self.__req(url, \"GET\", headers=headers)", "def get(self, limit=None, offset=None):\r\n\r\n params = base.get_params(None, locals())\r\n\r\n return http.Request('GET', self.get_url(), params), parsers.parse_json", "def get(self, limit=None, offset=None):\r\n\r\n params = base.get_params(None, locals())\r\n\r\n return http.Request('GET', self.get_url(), params), parsers.parse_json", "def handle_get(self, api, command):\n return self._make_request_from_command('GET', command)", "def request(host=API_HOST, path=SEARCH_PATH, api_key=API_KEY, url_params=params):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def GET(self, path, params={}):\n request_url = 'https://{0}:{1}/rest/{2}'.format(\n self.settings.api_host,\n self.settings.api_port,\n path\n )\n\n # Make the API request\n response = requests.get(request_url,\n auth = (self.settings.api_user, self.settings.api_password),\n verify = self.settings.verify_ssl,\n headers = self.settings.headers,\n params = params\n )\n\n # Request failed\n if not int(response.status_code) == 200:\n raise Exception('Failed to GET {0}: {1}'.format(request_url, response.json()))\n return response.json()", "def call_api(self, url, method='GET', headers=None, params=None, data=None):\n r = requests.request(method=method, url=url, headers=headers, params=params, data=data)\n \n self.log.debug(f'Called endpoint {url} with result {r}')\n\n try:\n jayson = json.loads(r.text)\n return jayson\n except:\n self.log.info(f'ERROR! Text of response object: {r.text}')", "def api_get(func, data=None):\n result = {}\n try:\n req = requests.get(FC_URL + func, params=data, verify=False)\n result.update({'status': req.status_code, 'result': req.json()})\n except requests.exceptions.RequestException as error:\n result.update({'status': 'ERROR', 'result': str(error)})\n except simplejson.errors.JSONDecodeError as error:\n print(str(req))\n print(str(error))\n return result", "def http_request(endpoint, data, method='POST'):\n url = BASE_API + endpoint\n data['authkey'] = AUTH_KEY\n\n response = requests.request(method, url=url, data=data, timeout=300, verify=VERIFY)\n if response.status_code == 200:\n try:\n return response.json()\n except Exception as e:\n return_error('Response JSON decoding failed due to {}'.format(str(e)))\n\n else:\n return_error('API Returned, {}:{}'.format(response.status_code, response.reason))", "def get(self, path, params=None):\n \n # prep\n get_url = self.url(path)\n \n # request\n response = requests.get(get_url, params=params, auth=self.auth, headers=API.HEADERS)\n\n # test and return\n self.raise_for_status(response)\n return response.json()", "def api_get(self, name):\n try:\n r = self._get(['apis', name])\n except requests.HTTPError:\n return None\n else:\n return r", "def _api_GET(config, function, param, value, token):\n if config.verbose:\n click.secho(\"\\nGETing {}={} from {} with {}\".format(param, value, function, token), fg='white')\n try:\n ret = requests.get(config.url + \"/api/get/{}?\".format(function) + \"{}={}\".format(param, value) + \"&token={}\".format(token))\n except:\n click.secho(\"\\nConnection Refused!...\\n\", fg='red', reverse=True)\n if config.verbose:\n click.secho(\"Server connection was denied. Check your internet connections and try again. Otherwise contact support.\", fg='cyan')\n else:\n click.secho(str(ret.status_code), fg='yellow')\n click.secho(ret.text, fg='yellow')\n return [ret.status_code, ret.text]", "def get(self, *args, **kwargs):\n return self._request('get', *args, **kwargs)", "async def request_api(url):\n\theaders = {\"User-Agent\": f\"Mozilla/5.0 aiotfm/{__version__}\"}\n\n\ttry:\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(url, headers=headers) as resp:\n\t\t\t\treturn await resp.json()\n\texcept aiohttp.ClientError:\n\t\treturn {}", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request to {0} : {1}'.format(url, str(e)))\n return None", "def httpGet(self, url, parameters=None):\r\n return self.auth.get(url, parameters)", "def generate_get_request(self, endpoint, parameters=None):\n try:\n if parameters == None:\n response = requests.get(endpoint)\n else:\n response = requests.get(endpoint, params=parameters)\n\n if response.status_code != 200:\n raise GetRequestException\n\n return response\n except GetRequestException as e:\n # Handling exception\n logging.error(\n \"Error with status code %d : %s \", response.status_code, response.text\n )\n except Exception as e:\n # Handling exception\n logging.exception(\"Error processing request\")", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request \\\n to {0} : {1}'.format(url, str(e)))\n return None", "async def _api_call(self, url, payload={}, retry=False):\n timeout = aiohttp.ClientTimeout(total=self.api_timeout)\n try:\n async with self._client_session.get(\n API_URL + url, headers=self.headers, timeout=timeout, data=payload\n ) as resp:\n if not retry and resp.status == 401:\n await self.renew_auth()\n return await self._api_call(url, payload, True)\n\n # 4xx represents unauthenticated\n if resp.status == 401 or resp.status == 403 or resp.status == 404:\n raise SenseAuthenticationException(f\"API Return Code: {resp.status}\")\n\n if resp.status != 200:\n raise SenseAPIException(f\"API Return Code: {resp.status}\")\n\n return await resp.json()\n except asyncio.TimeoutError as ex:\n # timed out\n raise SenseAPITimeoutException(\"API call timed out\") from ex", "def get(self, url=\"\", query={}):\r\n qs = urllib.urlencode(query)\r\n if qs:\r\n qs = \"?%s\" % qs\r\n \r\n url = \"%s%s%s\" % (self.base_url, url, qs)\r\n log.debug(\"GET %s\" % (url))\r\n \r\n self.__connection.connect()\r\n request = self.__connection.request(\"GET\", url, None, self.__headers)\r\n response = self.__connection.getresponse()\r\n data = response.read()\r\n self.__connection.close()\r\n \r\n log.debug(\"GET %s status %d\" % (url,response.status))\r\n result = {}\r\n \r\n # Check the return status\r\n if response.status == 200:\r\n log.debug(\"%s\" % data)\r\n parser = DetailsToDict()\r\n parseString(data, parser)\r\n return parser.data\r\n \r\n elif response.status == 204:\r\n raise EmptyResponseWarning(\"%d %s @ https://%s%s\" % (response.status, response.reason, self.host, url))\r\n \r\n elif response.status == 404:\r\n log.debug(\"%s returned 404 status\" % url)\r\n raise HTTPException(\"%d %s @ https://%s%s\" % (response.status, response.reason, self.host, url))\r\n \r\n elif response.status >= 400:\r\n _result = simplejson.loads(data)\r\n log.debug(\"OUTPUT %s\" % _result)\r\n raise HTTPException(\"%d %s @ https://%s%s\" % (response.status, response.reason, self.host, url))\r\n \r\n return result", "def call_api_get(\n self, method: str, params: Optional[Dict] = None, headers: Optional[Dict] = None\n ) -> Union[Dict, List]:\n return self.__api_call(\n self.__get_url(method), \"GET\", headers=headers, params=params\n )", "def httpapi_request(client, **params) -> 'Response':\n return requests.get(\n _HTTPAPI,\n params={\n 'client': client.name,\n 'clientver': client.version,\n 'protover': 1,\n **params\n })", "def _make_request(self, url: str, parameters: dict = None,\n method: str = 'GET', *args, **kwargs):\n response = requests.request(\n method=method,\n url=build_url(\n self.BASE_API_URL, url, parameters\n ),\n headers={\n 'Authorization': 'Bearer {}'.format(self._access_token)\n }, **kwargs\n )\n if response.ok:\n return response.json()\n raise MondoApiException(response.json()['message'])", "def get_response(url, auth_token, add_headers=None, verb=\"GET\", status_ok=200):\n headers = {\"Authorization\": \"Basic {}\".format(auth_token)}\n if add_headers:\n headers.update(add_headers)\n\n if verb == \"GET\":\n response = requests.get(url, headers=headers)\n elif verb == \"HEAD\":\n response = requests.head(url, headers=headers)\n elif verb == \"DELETE\":\n response = requests.delete(url, headers=headers)\n else:\n raise Exception(\"Unknown verb [{}].\".format(verb))\n\n if not response.status_code == status_ok:\n raise response.raise_for_status()\n\n try:\n resp_json = response.json()\n except Exception:\n resp_json = {}\n\n return (resp_json, response.headers)", "def make_request(self, url):\n try:\n response = requests.get(url)\n if response.status_code != 200:\n return None\n return response.json()\n except requests.ConnectionError:\n return None", "def make_get_request(url:str, **kwargs) -> (bool, dict):\n\n print(\"Making call to '{}'...\".format(url))\n resp = requests.get(url, **kwargs)\n print(\"Received response.\")\n\n if not resp.ok:\n return False, resp.status_code, json.loads(resp.content)\n\n return True, resp.status_code, json.loads(resp.content)", "def _get(self, url):\n return self._request(url)", "def fetch(url: str, raise_for_status: bool=True, **query_params) -> Tuple[int, Dict[str, Any]]:\n # HINT carefully read the requests documentation to figure out the cleanest way to raise a requests error.\n #query_params is a dict, **query_params is keyword arg \n res_obj = requests.get(url, params=query_params)\n if raise_for_status:# default to True \n res_obj.raise_for_status()# if 200 result is None \"All is well\"\n return (res_obj.status_code, res_obj.json()) # returns code and dict ", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def simple_get(url):\n\n def is_good_response(resp):\n \"\"\"\n Checks if a response is good.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None\n and content_type.find('html') > -1)\n\n def log_error(err):\n \"\"\"\n Simple error logging wrapper\n \"\"\"\n print(err)\n\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n return None\n\n except RequestException as err:\n log_error(\"Error during requests to {0} : {1}\".format(url, str(err)))", "def request(host, path, api_key, url_params=None):\r\n url_params = url_params or {}\r\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\r\n headers = {\r\n 'Authorization': 'Bearer %s' % api_key,\r\n }\r\n\r\n print(u'Querying {0} ...'.format(url))\r\n \r\n response = requests.request('GET', url, headers=headers, params=url_params)\r\n\r\n return response.json()", "def req(url, headers=None):\n if headers is None:\n headers = {}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n response = json.loads(response.text)\n return response\n return None", "def get_response(request_url):\n return requests.get(request_url)", "def get(self, url, authenticate=True, headers=None, query_params=None, expected_status_codes=None, retry=0):\n\n # Initialize headers if not provided.\n if headers is None:\n headers = {}\n\n # Set expected status codes to default value if not provided.\n if expected_status_codes is None:\n expected_status_codes = [200, 204]\n\n # If request is authenticated, add authorization header.\n if authenticate:\n headers[\"Authorization\"] = self._get_authorization_header()\n\n # Append query params to URL if provided.\n if query_params is not None:\n url = f\"{url}?\"\n for key, value in query_params.items():\n url = f\"{url}{key}={value}&\"\n\n # If max number of retries is exceeded, abort.\n if retry > consts.MAX_RETRIES:\n log.abort_and_exit(\"GHUB\", f\"Request to {url} with headers {headers} failed after {retry} retries.\")\n\n # Sleep before making request to ensure proper delay.\n time.sleep(consts.API_REQUEST_DELAY_SEC)\n\n # Before making a request, check for rate limiting. Wait if necessary.\n if self.is_rate_limited():\n self._handle_rate_limit()\n\n # Make request and update rate limit status from response headers.\n response = requests.get(url, headers=headers)\n self._rate_limit_status = self._parse_rate_limit_headers(response.headers)\n status = response.status_code\n\n retry_after_header = response.headers.get(\"Retry-After\")\n if retry_after_header is not None:\n # Retry-After header found, indicates abuse rate limiting. Discard response, wait and retry.\n retry_sec = int(retry_after_header)\n log.warning(\"GHUB\",\n f\"Received Retry-After (abuse rate limiting), trying again after '{retry_sec}' seconds.\")\n self.update_rate_limit_status()\n self.get(url, headers, expected_status_codes, retry + 1)\n\n if (status == 403) or (status not in expected_status_codes):\n # Check for rate limiting in case of unexpected status code.\n if self.is_rate_limited():\n # Wait until the rate limit should be lifted.\n self._handle_rate_limit()\n else:\n # It was not a rate limiting issue - log a warning.\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")\n\n # Rate limit should now be lifted if there was one. Retry, update number of retries.\n self.get(url, headers, expected_status_codes, retry + 1)\n\n return status, response.json(), self._parse_link_header(response.headers.get(\"Link\"))" ]
[ "0.778995", "0.768153", "0.7418646", "0.739191", "0.73206294", "0.72995013", "0.7262301", "0.7259874", "0.7223338", "0.7208629", "0.71811944", "0.7158694", "0.7158694", "0.71570796", "0.7130304", "0.7127936", "0.7122294", "0.709202", "0.7063909", "0.7061155", "0.70599645", "0.7015708", "0.6999518", "0.69958556", "0.6961935", "0.6959945", "0.69523793", "0.6951484", "0.6949693", "0.69470805", "0.69457114", "0.6942209", "0.6931668", "0.6924481", "0.691494", "0.6910765", "0.6905366", "0.69011617", "0.68886346", "0.6887114", "0.6880268", "0.68434125", "0.68409896", "0.6836408", "0.68351525", "0.68283033", "0.68271446", "0.6789979", "0.6779164", "0.6770816", "0.6766349", "0.6760421", "0.6757423", "0.67428774", "0.67382646", "0.6734402", "0.6728647", "0.67111164", "0.6710592", "0.6702306", "0.67012113", "0.6693957", "0.66889006", "0.6682268", "0.6679991", "0.6679991", "0.667017", "0.66683096", "0.666001", "0.6654671", "0.66461104", "0.6640715", "0.663489", "0.6628294", "0.6625782", "0.66183114", "0.66140693", "0.6609067", "0.65989244", "0.65989244", "0.65989244", "0.6595867", "0.65942657", "0.6588054", "0.6585901", "0.65811753", "0.6577047", "0.6576532", "0.6576403", "0.6569968", "0.6566178", "0.65519583", "0.6551381", "0.6548323", "0.6538921", "0.65305376", "0.6512455", "0.6510521", "0.65098226", "0.6507617" ]
0.72182447
9
Wrapper to make an API POST request, return the response and handle errors
def __make_api_post(self, api_path, data=None): headers = { "Content-type": "application/json", "Accept": "application/json"} x = json.dumps(data) try: req = urllib2.Request(self.api_server + api_path, x, headers) self.last_response = urllib2.urlopen(req, cafile=self.cacert_path) json_data = self.last_response.read() # Check for errors except urllib2.HTTPError as err: error = "API HTTP error [%s] - '%s'" % (err.code, err) raise EFIgyCliError(error, err) except urllib2.URLError as err: error = 'Problem calling API at location %s - %s' % ( self.api_server + api_path, err) raise EFIgyCliError(error, self.last_response) # Decode json response into an object try: ret = json.loads(json_data) except ValueError as err: error = "Problem deserialising data, expecting JSON.\nError: %s\nData: %s" % ( err, json_data) raise EFIgyCliError(error, self.last_response) # Return JSON deserialised object # print "DEBUG - %s"%(ret), type(ret) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self, *args, **kwargs):\n return self._requests_call(util.requests_post, *args, **kwargs)", "def make_post_request(api_endpoint: str, data: dict):\n response = requests.post(api_endpoint, data=data)\n logprint_request(api_endpoint)\n\n logprint_response(response)\n log.debug(\"Response dict:\\n{}\".format(response.__dict__))\n\n # Split response text into list of strings.\n response_lines = response.text.split('\\n')\n log.info(response_lines)\n\n # Pop the first entry from the lines as it is a status code, and save it in its own variable.\n status_code = response_lines.pop(0)\n\n if status_code != \"0\":\n error_info = \"Got non-zero return code {}\".format(status_code)\n\n # If status code is known, append its meaning.\n if status_code in API_STATUS_CODES:\n error_info += \": {}\".format(API_STATUS_CODES[status_code])\n\n raise Exception(\"{}, aborting!\".format(error_info))\n\n # Remove empty entries\n for i in range(len(response_lines)):\n if response_lines[i] == \"\":\n response_lines.pop(i)\n\n return response_lines", "def _http_post(self, url, params={}):\n url = self.server + '/api2' + url\n try:\n r = requests.post(url=url, data=params)\n except requests.exceptions.RequestException as e:\n return check_failed(e)\n # raise ClientHttpError(None, e)\n if r.status_code != 200:\n return check_failed(r.status_code)\n # return ClientHttpError(r.status_code, r.text)\n data = json.loads(r.text)\n # TODO: check data\n return data", "def api_post(self, *args, **kwargs):\n return self.api_post_with_response(*args, **kwargs)[0]", "def _post(self, url, data=None):\n url = urljoin(self.base_url, url)\n try:\n r = self._make_request(**dict(\n method='POST',\n url=url,\n json=data,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n\n if r.status_code == 204:\n return None\n return r.json()", "def make_post_request(url:str, post_params:dict, **kwargs):\n\n print(\"Making call to '{}'...\".format(url))\n resp = requests.post(url, data=post_params, **kwargs)\n print(\"Received response.\")\n\n if not resp.ok:\n return False, resp.status_code, json.loads(resp.content)\n\n return True, resp.status_code, json.loads(resp.content)", "def _api_request_post(self, endpoint, data, headers=None):\n\n all_headers = {\"Authorization\": self.auth_header}\n\n if headers:\n for header in headers:\n all_headers[header] = headers[header]\n\n response = requests.post(url=f\"{self.api_url}/{endpoint}\", headers=all_headers, data=data)\n code = response.status_code\n if 200 <= code < 300:\n logging.debug(f\"API POST call: {self.api_url}/{endpoint} | {code}\")\n encoding = response.encoding\n raw = response.content\n return json.loads(raw.decode(encoding))\n elif code > 500:\n raise APIAuthException\n else:\n logging.error(f\"ERROR: Bad API POST call: {self.api_url}/{endpoint} | {code}\")", "def make_post_request(url, headers=None, json_data=None, data=None):\n logger.info(\"Inside: make_post_request\")\n logger.debug(\"make_post_request: parameters - {}, {}, {}, {}\".format(url, headers, json_data, data))\n\n timeout = get_config(REQUEST_TIMEOUT,\"timeout\")\n\n if not headers:\n headers = {}\n\n if json_data:\n resp = requests.post(url, verify=False, headers=headers, json=json_data, timeout=timeout)\n elif data:\n resp = requests.post(url, verify=False, headers=headers, data=data, timeout=timeout)\n\n logger.debug('received status : {}'.format(resp.status_code))\n logger.debug('received text : {}'.format(resp.text))\n logger.info(\"Exit: make_post_request\")\n if RestClient.result_success(resp.status_code):\n return resp\n else:\n err_msg = 'ERROR, received {} code during API call {}'.format(resp.status_code, url)\n logger.error(err_msg)\n raise APIException(err_msg, resp.text)", "def post(api, url, payload, headers=None, auth=_KERBEROS_AUTH, proxies=None,\n retries=_NUM_OF_RETRIES, timeout=None):\n return call(api, url, 'post', payload=payload,\n headers=headers, auth=auth, proxies=proxies, retries=retries,\n timeout=timeout)", "def post_resource(**kwargs) -> dict:\n\n response = requests.post(**kwargs)\n print(f\"HTTP {response.request.method}: {response.url}\")\n print(f'HTTP Status code: {response.status_code}')\n\n # Raise an exception if the response is not OK\n if not response.ok:\n print(response.text)\n response.raise_for_status()\n\n # Convert the reply to JSON\n response_json = response.json()\n\n # Return json parsed data as a python dictionary\n return response_json", "def make_post_request(client, endpoint, data):\n return client.post(endpoint, data=data)", "def _post(self, *args, **kwargs):\n return self._request('post', *args, **kwargs)", "def post(url, to_error=_default_to_error, data=None, json=None, **kwargs):\n\n return request('post',\n url, to_error=to_error, data=data, json=json, **kwargs)", "def _post(self, api_uri, data={}):\n url = r'{0}{1}'.format(self._protocol_domain, api_uri)\n \n headers = copy.deepcopy(self._headers)\n headers['UTCTimestamp'] = self._generate_UTCTimestamp()\n headers['nonce'] = self._generate_nonce()\n \n msg = self._generate_header_body_str(api_uri, headers, data)\n headers['signature'] = self._generate_signature(self.access_key, msg)\n \n # http_body = urllib.urlencode(data)\n http_headers = json.dumps(headers)\n http_body = json.dumps(data)\n \n if self.need_log:\n self.log_obj('POST: {0}'.format(url))\n self.log_obj('HTTP Headers: {0}'.format(http_headers))\n self.log_obj('HTTP Body: {0}'.format(http_body))\n\n # add handler HTTPErrorProcessorNew to get HTTP400's response\n opener = urllib2.build_opener(HTTPErrorProcessorNew)\n\n request = urllib2.Request(url, data=http_body.encode('utf-8'), headers=headers)\n \n try:\n response = opener.open(request, timeout=8)\n except Exception as e:\n return_message = e\n raise APIError(400, return_message, url) # Bad request\n\n content = response.read().decode('utf-8')\n r = json.loads(content)\n if self.need_log:\n self.log_obj('HTTP Response: {0}'.format(content))\n\n return_code = r.get('code', -1)\n return_message = r.get('message', 'No message!!!')\n return_data = r.get('data', None)\n\n if return_code != 200:\n raise APIError(return_code, return_message, url)\n return return_data", "def send_post(url, data, headers, return_output=False):\n req = requests.post(url=url, data=json.dumps(data), headers=headers)\n if return_output:\n return req\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)", "def send_post(self, api_url, query=None, body=None, data=None):\n\n if body is not None:\n resp = requests.post(self.base_url + api_url, params=query, data=body)\n else:\n resp = requests.post(self.base_url + api_url, params=query, json=data)\n\n return resp", "def post(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'post', api_path, *args, **kwargs)", "def post(self, url, body):\n full_url = self.api_url + starts_slash(ends_slash(url))\n headers = {\"Authorization\": \"Bearer \" + self.token}\n logging.info(\"POST url: \" + str(full_url))\n logging.info(\"POST header: \" + str(headers))\n logging.info(\"POST body \" + str(body))\n result = requests.post(url=full_url, headers=headers,\n json=body).json()\n logging.info(\"POST result: \" + str(result))\n return result", "def _post_request(self, url, payload):\n url = self.baseUrl + url\n logger.debug(\"POST %s\", url)\n with self.session.post(url, data=payload) as req:\n try:\n result = req.json()\n except json.decoder.JSONDecodeError as exc:\n raise errors.PluginError(\"no JSON in API response\") from exc\n if result[\"result\"] == \"success\":\n return result[\"data\"]\n raise errors.PluginError(\n \"error in API request: {} / {}\".format(\n result[\"error\"][\"code\"], result[\"error\"][\"description\"]\n )\n )", "def __post(self, url, params, data):\n\n try:\n response = self.session.post(\n url,\n params=params,\n data=data,\n headers=self.__post_headers,\n timeout=(5, 25),\n )\n response.raise_for_status()\n except requests.ConnectionError as error:\n logging.exception(error, exc_info=True)\n raise error from None\n except requests.HTTPError as error:\n try:\n error_data = json.loads(response.text)\n except ValueError:\n logging.error(error)\n raise error from None\n else:\n if \"errors\" in error_data:\n logging.error(\n \"post api call bad params \"\n + error_data[\"errors\"][0][\"message\"]\n )\n raise EntsoeApiBadParams(\n error_data[\"errors\"][0][\"message\"]\n ) from None\n else:\n return json.loads(response.text)", "def _api_post(self, query, data):\n r = requests.post(self._url + query,\n headers={'Content-Type': 'application/json', 'Accept': 'application/json'},\n auth=self._auth, data=json.dumps(data), timeout=self._request_timeout_secs)\n r.raise_for_status()\n return r", "def post(url, headers=None, data=None, json=None, parameters=None):\n log = log_utilities.create_logger()\n response = None\n http_code = None\n\n try:\n r = requests.post(url, data=data, headers=headers, json=json, params=parameters)\n log.info(\"[http_utilities.post] URL: {url}\".format(url=url))\n if parameters:\n log.info(\"[http_utilities.post] parameters: {parameters}\".format(parameters=parameters))\n if headers:\n log.info(\"[http_utilities.post] headers: {headers}\".format(headers=headers))\n if json:\n log.info(\"[http_utilities.post] json: {json}\".format(json=json))\n if data:\n log.info(\"[http_utilities.post] data: {data}\".format(data=data))\n log.info(\"[http_utilities.post] Request sent for URL: {url}\".format(url=r.url))\n response = r\n http_code = r.status_code\n log.info(\"[http_utilities.post] HTTP status code: {http_code}\".format(http_code=http_code))\n except requests.exceptions.RequestException as e:\n log.exception(\"[http_utilities.post] Exception {type} = {exception}\".format(type=type(e), exception=e.message))\n\n return response, http_code", "def post(url, data=None, json=None, **kwargs):\n\n return request('post', url, data=data, json=json, **kwargs)", "def post(self, api_path, *args, **kwargs):\n\n return self._do_operation(u'post', api_path, *args, **kwargs)", "def make_post_request(self, url, data):\n auth = (self.AUTH_ID, self.AUTH_TOKEN)\n headers = {'content-type': 'application/json'}\n return requests.post(url, data=data, auth=auth, headers=headers)", "def _post(self, url, **kwargs):\n return self._call('POST', url, kwargs)", "def __post(self, url, payload=None, headers=None):\n if headers is None:\n headers = {\"Content-Type\": \"application/json\"}\n return self.__req(url, \"POST\", body=payload, headers=headers)", "def http_post(self, **kwargs):\n return self.rabjcallable.post(**kwargs)", "def api_post_with_response(self, path, data={}, expected_status=201,\n **kwargs):\n return self.api_call(client_http_method=self.client.post,\n path=path,\n data=data,\n expected_status=expected_status,\n **kwargs)", "def post(self, data=None, params=None):\n params = self.parameters(additional_parameters=params)\n res = post(self.endpoint_url, data=data, params=params)\n return Response(res)", "def post_algorithm():\n try:\n request_json = request.get_json()\n result = json.dumps([])\n response = app.response_class(\n response=result,\n status=200,\n mimetype='application/json')\n except ValueError as e:\n response = app.response_class(\n status=400,\n response=str(e)\n )\n return response", "def post(url, data={}, verify=True):\n data = requests.post(url=url, data=json.dumps(data), verify=verify)\n if data.status_code == 201:\n return data", "def post(self, *args, **kwargs):\n headers = self.post_headers\n headers.update(kwargs.get('headers', {}))\n kwargs['headers'] = headers\n return self._request('post', *args, **kwargs)", "async def post(self, path, params=None, json_data=None):\n response = await self.request('POST', path, params, json_data)\n return response", "def post(call,\n headers=None,\n data=None,\n params=None,\n base=cloudBase,\n no_headers=False,\n raw=False,\n **kwargs):\n return _call(method=requests.post,\n call='{0}{1}'.format(base, call),\n headers=headers,\n data=data,\n params=params,\n no_headers=no_headers,\n raw=raw,\n **kwargs)", "def make_request(payload):\r\n\r\n log.info(\"make_request\\n{0}\\n{1}\".format(settings.API_URL,\r\n payload['method']))\r\n\r\n # log.debug(\"make_request\\n{0}\\n{1}\".format(settings.API_URL,\r\n # json.dumps(payload)))\r\n\r\n response = requests.post(settings.API_URL,\r\n data=json.dumps(payload),\r\n headers={\"content-type\": \"application/json\"},\r\n auth=(settings.API_USER, settings.API_PASS))\r\n\r\n #log.debug(response.content)\r\n response_json = response.json()\r\n\r\n if 'error' in response_json:\r\n log.error(response_json['error'])\r\n\r\n return response_json", "def post(self, api_url, payload, timeout=30):\n return self._request('POST', api_url, data=json.dumps(payload), timeout=timeout)", "def post(self, url, data=None, headers=None, timeout=10):\r\n try:\r\n return self.request(url=url, method='POST', data=data, extra_headers=headers, timeout=timeout)\r\n except requests.exceptions.ProxyError:\r\n return None\r\n except requests.RequestException as error:\r\n if self._debug:\r\n logging.exception(\r\n ''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))\r\n return None", "def api_post(self, path, data):\n return self._api_request(path, 'POST', data)", "def _api_POST(config, function, data_dict):\n if config.verbose:\n click.secho(\"\\nPOSTing to /{}\".format(function), fg='white')\n try:\n ret = requests.post(config.url + \"/api/{}\".format(function), data=data_dict)\n except:\n click.secho(\"\\nConnection Refused!...\\n\", fg='red', reverse=True)\n if config.verbose:\n click.secho(\"Server connection was denied. Check your internet connections and try again. Otherwise contact support.\", fg='cyan')\n else:\n click.secho(str(ret.status_code), fg='yellow')\n click.secho(ret.text, fg='yellow')\n return [ret.status_code, ret.text]", "def httpPost(self, url, post_parameters=None):\r\n return self.auth.post(url, post_parameters)", "async def __api_post(self, api: str, data) -> aiohttp.ClientResponse:\n return await self._session.post(f\"{await self._api_url()}/cxrestapi/{api}\", data=data)", "def post_rest_call(api_url, data_dict, username, password, print_output=False):\n headers = {\n 'Content-Type': 'application/json'\n }\n\n response = requests.post(api_url,\n auth=HTTPBasicAuth(username, password),\n data=json.dumps(data_dict),\n headers=headers,\n verify=False,\n timeout=4)\n\n if print_output:\n if response.status_code == 201:\n print(\"POST OK %s (code %d)\" % (api_url, response.status_code))\n elif response.status_code == 200:\n print(\"POST OK %s (code %d)\" % (api_url, response.status_code))\n elif response.status_code == 204:\n print(\"POST OK %s (code %d)\" % (api_url, response.status_code))\n else:\n print(\"POST Failed for: %s (code %d)\" % (api_url, response.status_code))\n print(\" - Data: %s\" % json.dumps(data_dict))\n print(\" - Text: %s\" % response.text)\n return response", "def post(self, **kwargs):\n post_args = deepcopy(kwargs)\n post_args['apikey'] = self.apikey\n post_args['version'] = self.version\n post_args['timestamp'] = timestamp()\n\n # calculate a checksum based on the values and secret key\n post_checksum = self.checksum(**post_args)\n\n # use urllib to post the values\n post_args['checksum'] = post_checksum\n\n params = urlencode(post_args)\n try:\n if Monitis.debug is True:\n print \"Request URL: \" + self.url\n print \"Request params: \" + str(post_args)\n result = urlopen(self.url, params)\n except HTTPError, error:\n raise MonitisError('API Error: ' + error.read())\n ret = result.read()\n if Monitis.debug is True:\n print \"Response: \" + ret\n result.close()\n return ret", "def httpPost(self, url='', data='', params={}, headers={}):\n\n return self.httpRequest('POST', url, data, params, headers)", "def api_post(func, data=None):\n result = {}\n try:\n req = requests.post(FC_URL + func, data=json.dumps(data), verify=False)\n result = req.json()\n result.update({'status': req.status_code})\n except requests.exceptions.RequestException as error:\n result.update({'status': 'ERROR : ' + str(error)})\n except simplejson.errors.JSONDecodeError as error:\n print(str(req))\n print(str(error))\n return result", "def handle_post(self, api, command):\n return self._make_request_from_command('POST', command)", "def _post(self, url, data=None):\n if data is not None:\n data = urllib.urlencode(data)\n return self._request(url, method='POST', payload=data)", "def post(self, url_or_path, *args, **kwargs):\n return self.request.post(url_or_path, *args, **kwargs).json()", "def post_call(url, params=None, json=None, headers=None):\n if params is None:\n params = {}\n\n response = requests.post(url, data=params, json=json, headers=headers)\n\n print response.status_code\n if response.status_code == 401:\n raise RedirectException(reverse('logout'))\n\n return response", "def post(self, *args, **kwargs):\n return self._hit(\"POST\", *args, **kwargs)", "def json_post(method):\n def wrap(*args, **kwargs):\n # idx is the position of the data\n idx = 0\n if not isinstance(args[0], webob.Request):\n idx = 1\n\n json_data = json.loads(args[idx].body)\n kwargs['post_data'] = json_data\n\n #print \"JP:\", repr(args), repr(kwargs)\n\n return method(*args, **kwargs)\n \n return json_return(wrap)", "def do_POST(self,):\n self.http_method = 'POST'\n self.response()", "def post(self, url, body):\n return self._query(url, 'POST', json=body)", "def _post_request(url, params):\n data = dumps(params).encode(\"utf-8\")\n request = requests.post(url, data=data)\n return request", "def raw_post(\n self, uri: str, data: Optional[Dict] = None, json: Optional[Dict] = None, **kwargs\n ) -> requests.Response:\n return self.session.post(url=self._url(uri), data=data, json=json, **kwargs)", "def post(self, url, data=None):\r\n response = self.requestHelper.post(url, data=data)\r\n return self.process(response)", "def http_post(call):\n\n verify_ssl = (\n True if \"verify_ssl\" not in call.data.keys() else call.data[\"verify_ssl\"]\n )\n\n headers = basic_headers\n if \"headers\" in call.data.keys():\n headers.update(call.data[\"headers\"])\n\n auth = None\n if \"auth_username\" in call.data.keys() and \"auth_password\" in call.data.keys():\n auth = (\n call.data[\"auth_username\"]\n if \"auth_username\" in call.data.keys()\n else None,\n call.data[\"auth_password\"]\n if \"auth_password\" in call.data.keys()\n else None,\n )\n\n data = None\n\n if \"data\" in call.data.keys():\n data = call.data[\"data\"]\n\n resp = requests.post(\n url=call.data[\"url\"],\n data=data,\n headers=headers,\n verify=verify_ssl,\n timeout=10,\n auth=auth,\n )\n\n return resp.status_code == 200", "def post(self, url, body=None, headers=None, serialize=True):\n return self._request('POST', url, body, headers, serialize)", "async def _perform_api_post_request(self, url, **kwargs):\n error = ''\n json = {}\n max_retries = 5\n for retries in range(max_retries):\n async with self._session.post(url, **kwargs) as resp:\n self.request_count += 1\n status = resp.status\n if resp.status == 504:\n error = 'API timeout'\n self.retry_count += 1\n continue\n try:\n resp.raise_for_status()\n except ClientResponseError:\n error = f'{resp.status}: {resp.reason}'\n continue\n try:\n json = await resp.json()\n except ContentTypeError:\n error = 'Unable to decode JSON'\n self.retry_count += 1\n status = 0\n continue\n json['request_datetime'] = datetime.now()\n break\n\n if retries == max_retries - 1 and error:\n logger.warning(error)\n\n return json, status", "def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)", "def simulate_post(app, path, **kwargs) -> _ResultBase:\n return simulate_request(app, 'POST', path, **kwargs)", "def post(\n self, url: str, json: Dict[str, Any], params: Dict[str, Any] = None, headers: Dict[str, Any] = None\n ) -> Response:\n return self._api_client._post(url, json=json, params=params, headers=headers)", "def _post(self, path=\"\", body=None, **kwargs):\n uri = force_json(self.uri + path)\n return self.client.request(uri, method=\"POST\", d=body, **kwargs)", "def _post(self, path, data=None):\n headers = {'content-type': 'application/json'}\n if data:\n data = json.dumps(data)\n r = requests.post(self._url(path), data=data, headers=headers)\n assert r.status_code == 200\n return r", "def http_request(endpoint, data, method='POST'):\n url = BASE_API + endpoint\n data['authkey'] = AUTH_KEY\n\n response = requests.request(method, url=url, data=data, timeout=300, verify=VERIFY)\n if response.status_code == 200:\n try:\n return response.json()\n except Exception as e:\n return_error('Response JSON decoding failed due to {}'.format(str(e)))\n\n else:\n return_error('API Returned, {}:{}'.format(response.status_code, response.reason))", "def call_api_post(\n self,\n method: str,\n data: Dict,\n params: Optional[Dict] = None,\n headers: Optional[Dict] = None,\n ) -> Union[Dict, List]:\n return self.__api_call(\n self.__get_url(method), \"POST\", headers=headers, json=data, params=params\n )", "def do_post_json_to_le(url, payload, x_api_key):\n LOGGER.debug(\"Making post request with payload: %s to the url: %s\", payload, url)\n headers = generate_headers(x_api_key)\n return requests.post(url, headers=headers, json=payload)", "def post(self, endpoint, params=None, data=None):\n\n return self._call(requests.post, endpoint, params=params, data=data)", "def post(self, *args, **kwargs):\n return self.handle_post_request()", "def post_via_meraki_api(api_uri, payload):\n url = API_URL + api_uri\n post_data = json.dumps(payload)\n make_post = requests.post(url, headers=api_headers, data=post_data, verify=False)\n if make_post.status_code in [200, 201, 202, 203, 204]:\n data = json.loads(make_update.text)\n logger.info(\"Meraki POST operation suceeded : %s \", api_uri)\n else:\n data = {}\n logger.info(\"Meraki POST Operation failed : %s\", make_update.status_code)\n return data", "def _request_post(self, path, method='POST', body=None, headers=None):\n url = '{}{}'.format(self._url_base, path)\n headers = self._headers() if headers is None else headers\n \n response, content = super(DSBaseService, self)._request(url,\n method=method,\n body=str(body).replace(\"'\", '\"'),\n headers=headers)\n if int(response['status']) in (200, 204):\n if content != \"\":\n res_text = json.loads(content)\n else:\n res_text = \"\"\n post_response = {\n 'status': response['status'],\n 'message': 'SUCCESS',\n 'content': []\n }\n post_response['content'].append(res_text)\n return post_response\n else:\n raise RuntimeError('{} responded with status code {}'.format(url, response['status']))", "def api_post(dev, api_call):\r\n import xmltodict\r\n import pdb\r\n try:\r\n r = requests.post(dev + ':8060' + api_call, timeout=10)\r\n except Exception as exc:\r\n response = [\"ERR\", exc]\r\n return response[0]\r\n except ConnectionError as connerr:\r\n response = [\"ERR\", connerr]\r\n return response[0]\r\n except TimeoutError as toerr:\r\n response = [\"ERR\", toerr]\r\n return response[0], toerr\r\n r_code = r.status_code\r\n if r_code == 200:\r\n print(\"REQUEST WAS A SUCCESS. DEVICE {} RETURNED: {} \".format(n.get(), str(r)))\r\n r2 = r.text\r\n response = f'{r_code} - OK'\r\n return msg_box(response)", "def post(self):\n created = post_tool(request.json)\n return created, 201", "def post(self, url, payload={}):\n response = self._make_request(\"POST\", url, payload)\n\n return response", "def _do_post(self, url, **kwargs):\n #TODO:\n # Add error handling. Check for HTTP status here would be much more conveinent than in each calling method\n scaleioapi_post_headers = {'Content-type':'application/json','Version':'1.0'}\n self.logger.debug(\"_do_post()\")\n\n if kwargs:\n for key, value in kwargs.iteritems():\n if key == 'headers':\n scaleio_post_headers = value\n print \"Adding custom POST headers\"\n if key == 'files':\n upl_files = value\n print \"Adding files to upload\"\n try:\n response = self._session.post(url, headers=scaleioapi_post_headers, verify_ssl=self._im_verify_ssl, files=upl_files)\n self.logger.debug(\"_do_post() - Response: \" + \"{}\".format(response.text))\n if response.status_code == requests.codes.ok:\n return response\n else:\n self.logger.error(\"_do_post() - Response Code: \" + \"{}\".format(response.status_code))\n raise RuntimeError(\"_do_post() - HTTP response error\" + response.status_code)\n except:\n raise RuntimeError(\"_do_post() - Communication error with ScaleIO gateway\")\n return response", "def __sign_POST(self, api_url, params, timeout):\r\n sign_str = ''\r\n for key in sorted(params.keys()):\r\n _ = '&' + key + '=' + str(params[key])\r\n sign_str += _\r\n payload_str = 'POST' + '&' + api_url + sign_str\r\n signature = hmac.new(bytes(self.secret, encoding='utf-8'), bytes(payload_str, encoding='utf-8'), digestmod=hashlib.sha256).hexdigest()\r\n params['sign'] = signature\r\n url = self.__base_url + api_url\r\n try:\r\n r = requests.post(url,data=params, timeout=timeout)\r\n r.raise_for_status()\r\n except ReadTimeout:\r\n print(\"post timeout\")\r\n return\r\n except requests.exceptions.HTTPError as err:\r\n print(err)\r\n return\r\n if r.status_code == 200:\r\n return r.json()", "def post(self, api_uri):\n request_body = self.doc.toxml('utf-8')\n\n if self.debug:\n print 'Connecting to %s/%s' % (self.api_host, api_uri)\n\n if self.api_ssl:\n if self.special_ssl:\n kwargs = self.special_ssl\n api = httplib.HTTPSConnection(self.api_host, **kwargs)\n else:\n api = httplib.HTTPSConnection(self.api_host)\n else:\n api = httplib.HTTPConnection(self.api_host)\n\n api.connect()\n api.putrequest('POST', api_uri, skip_host=True)\n api.putheader('Host', self.api_host)\n api.putheader('Content-type', 'text/xml; charset=\"utf-8\"')\n api.putheader(\"Content-length\", str(len(request_body)))\n api.putheader('User-Agent', 'yourdomain.net')\n api.endheaders()\n api.send(request_body)\n\n resp = api.getresponse()\n resp_data = resp.read()\n\n # debug request \n if self.debug:\n print '*** REQUEST:\\n%s' % self.doc.toprettyxml()\n\n # parse API call response\n if not resp.status == 200:\n raise RequestError('Gateway returned %i status' % resp.status)\n #debugging\n if self.debug:\n print 'Full response text: %s' % resp_data\n\n # parse XML response and return as dict\n try:\n resp_dict = self.parse_xml(resp_data)\n except:\n try:\n resp_dict = self.parse_xml('<?xml version=\"1.0\"?><response>%s</response>' % resp_data)\n except:\n raise RequestError('Could not parse XML into JSON')\n\n # optional debug output\n if self.debug:\n print '*** RESPONSE:\\n%s' % resp_data\n\n return resp_dict", "def post(self, *args, **kw):\n kw['method'] = 'POST'\n return self.open(*args, **kw)", "def post_store(url, data, only_status_code=False):\n import requests\n from requests.exceptions import ConnectionError\n logger.debug('POST url %s' % url)\n if isinstance(data, dict):\n try:\n r = requests.post(url, json=data)\n except ConnectionError as e:\n logger.error(e)\n return None\n else:\n try:\n r = requests.post(url, data=data)\n except ConnectionError as e:\n logger.error(e)\n return None\n logger.info('Request POST %s returned %s', url, r)\n if only_status_code:\n return r.status_code\n return r", "def _post(service, _params={'f': 'pjson'}, ret_json=True):\n proxies={\n 'http':'http://madara.inei.gob.pe:3128',\n #'http': 'http://kira.inei.gob.pe:3128',\n # 'https': 'https://kira.inei.gob.pe:3128',\n\n }\n\n auth=requests.auth.HTTPProxyAuth('fsoto', 'MBs0p0rt303')\n r = requests.get(service, params=_params, proxies=proxies)\n\n # make sure return\n if r.status_code != 200:\n raise NameError('\"{0}\" service not found!\\n{1}'.format(service, r.raise_for_status()))\n else:\n if ret_json:\n return r.json()\n else:\n return r", "def do_POST(self):\n self.send_response(httplib.OK)\n self.send_header('Content-Type', 'application/octet-stream')\n self.end_headers()\n\n response = remote_api_pb.Response()\n try:\n request = remote_api_pb.Request()\n\n\n\n request.ParseFromString(\n self.rfile.read(int(self.headers['content-length'])))\n api_response = _ExecuteRequest(request).Encode()\n response.set_response(api_response)\n except Exception, e:\n logging.debug('Exception while handling %s\\n%s',\n request,\n traceback.format_exc())\n response.set_exception(pickle.dumps(e))\n if isinstance(e, apiproxy_errors.ApplicationError):\n application_error = response.mutable_application_error()\n application_error.set_code(e.application_error)\n application_error.set_detail(e.error_detail)\n self.wfile.write(response.Encode())", "def post(cls: Type[U], **kwargs) -> U:\n res = Resource(cls.endpoint).post(**kwargs)\n return cls(res)", "def send_post(url):\n HEADERS['accept'] = 'application/vnd.yang.data+json'\n if not url.startswith('/'):\n url = \"/{}\".format(url)\n url = BASE_URL + url\n resp = requests.post(url, headers=HEADERS)\n return resp", "async def post(url, session=None, **kwargs):\n\n method = 'POST'\n resp = await _request(method, url, session=session, **kwargs)\n return resp", "def post_required(func):\n def post_wrapper(request,*args,**kwds):\n res = http.ResponseBuilder()\n if request.method != 'POST':\n return res.error(\"post is required\").build_json()\n return func(request,*args,**kwds)\n return post_wrapper", "def call(self):\n # if this is a POST request, process data\n if self.data:\n post_json = json.dumps(self.data)\n values = {'json': post_json, 'apikey': API_KEY}\n post = urllib.parse.urlencode(values)\n\n else:\n post = None\n\n req = urllib.request.Request(self.url, post)\n\n try:\n self.response = urllib.request.urlopen(req, timeout=self.timeout)\n\n except (URLError, HTTPError, timeout) as error:\n self.response = error", "def post(self, path, body):\n url = urljoin(self.api_endpoint, path)\n response = requests.post(url, json=body, headers=self.headers)\n return self._check_response(response)", "def post(self, *path, **data):\n\t\treturn self.request('POST', *path, **data)", "def apost(url, **kwargs):\n return requests.post(url, **kwargs)", "def post(self, endpoint, data, raw=False):\n url = f\"{self.url}/{endpoint}\"\n self.log.info(f\"creating: {endpoint!r}\")\n response = self.session.post(url, data=convert.dict_to_xml(data))\n \n # return raw requests.Response object before Exception can be raised\n if raw:\n return response\n \n # raise Exception if POST failed\n if response.status_code != 201:\n err = f\"POST failed: {url}: {response}\"\n self.log.error(err)\n self.log.debug(f\"TEXT: {response.text}\")\n raise APIError(response, err)\n \n # return succesful response data (usually {'id': jssid})\n return convert.xml_to_dict(response.text)", "def __http_post(self, data, url_path, with_authentication = True):\n\n res = requests.post(self.__http_build_url(url_path), json = data, headers = self.__http_build_headers(with_authentication))\n res.raise_for_status()\n return res.json()", "def mocked_requests_post(*args, **kwargs):\n response = {'message': 'Success!',\n 'data': {}}\n return MockResponse(json.dumps(response), 200, HEADERS)", "def test_api_use_method_post(self):\n body = Body()\n response = self.client.open(\n '/api/use/{method}/'.format(method='method_example'),\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def post(self, path, **post_args):\n return self.request(path, data=post_args, method='POST')", "def post(url_ext, query_params={}, payload={}, custom_err=None, timeout=DEFAULT_TIMEOUT):\r\n url = get_url() + url_ext\r\n headers = get_headers()\r\n r = requests.post(url, headers=headers, params=query_params, data=payload, timeout=timeout)\r\n return handle_response(r, \"POST\", custom_err)", "def post_request(\n self,\n alias,\n uri,\n data=None,\n json=None,\n params=None,\n headers=None,\n files=None,\n allow_redirects=None,\n timeout=None):\n session = self._cache.switch(alias)\n if not files:\n data = utils.format_data_according_to_header(session, data, headers)\n # XXX workaround to restore library default behaviour. Not needed in new keywords\n redir = True if allow_redirects is None else allow_redirects\n\n response = self._common_request(\n \"post\",\n session,\n uri,\n data=data,\n json=json,\n params=params,\n files=files,\n headers=headers,\n allow_redirects=redir,\n timeout=timeout)\n return response", "def post(self, url, obj):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return self.session.post(url, json=obj, verify=False)", "async def post(self, url, params=None, json_data=None):\n if self._authenticated:\n return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)", "def post(url, data=None, **_):\n # Checks input parameters\n assert '/process' in url\n if has_src:\n stream = data.fields['datafile'][1]\n stream.seek(0)\n assert stream.read() == file_content\n else:\n assert 'datafile' not in data.fields\n assert data.fields['configuration'] == dummy_url\n assert json.loads(data.fields['parameters']) == (\n client._process_parameters)\n\n # Returns fake response\n response = requests.Response()\n response._content = response_json\n response.status_code = 200\n return response" ]
[ "0.7550158", "0.72697294", "0.72025406", "0.7184173", "0.7127111", "0.7112929", "0.7065529", "0.7061984", "0.70534045", "0.7048021", "0.69344455", "0.6925657", "0.68850005", "0.68809766", "0.68611604", "0.6811874", "0.67909265", "0.6754902", "0.6729885", "0.67012155", "0.66943026", "0.66931474", "0.66680205", "0.6667124", "0.66576546", "0.66387516", "0.66087466", "0.659528", "0.65942526", "0.65917677", "0.6568101", "0.65667737", "0.65589434", "0.6552635", "0.65327114", "0.6529524", "0.6528625", "0.6503127", "0.6477651", "0.6476691", "0.64313805", "0.6426087", "0.6411676", "0.64089197", "0.640711", "0.6389375", "0.63724864", "0.637114", "0.6360011", "0.63461053", "0.6343775", "0.634205", "0.63414127", "0.63384604", "0.6329407", "0.6325967", "0.6323198", "0.6320876", "0.63161194", "0.63142884", "0.63117045", "0.6298353", "0.6297754", "0.62952733", "0.62938565", "0.62892604", "0.62776405", "0.6242042", "0.62320864", "0.6218204", "0.62040985", "0.6202643", "0.6201084", "0.61844295", "0.6181762", "0.61754984", "0.6175134", "0.61659116", "0.6160525", "0.6153855", "0.61480486", "0.61425376", "0.61371976", "0.61249995", "0.6109839", "0.610331", "0.6096908", "0.60959643", "0.6085637", "0.60850435", "0.60789704", "0.607863", "0.60691315", "0.60540545", "0.6040644", "0.6038663", "0.6035802", "0.6034752", "0.603379", "0.6032751" ]
0.6983514
10
Validate the response that came back from the API, return True if it's good, False if bad
def _validate_response(self, response): # Check for unexpected response - all should be JSON dicts that have # already been deserialised if not isinstance(response, types.DictionaryType): self.message( "\t\t[!] ERROR - Unexpected value returned from the API: '%s'" % (response)) return False # Check for valid errors if "error" in response and "msg" in response: self.message( "\t\t[!] ERROR - %s (%s)" % (response["msg"], response["timestamp"])) return False # Is this a valid response message if "msg" in response: return True # Catch all...dictionary returned but does not contain expected keys? # Who know's what's going on here?! else: self.message( "\t\t[!] ERROR - Unexpected dictionary response returned from the API: '%s'" % (response)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self, response):\n return response[\"status_code\"] == 1", "def is_valid_response(self, response):\r\n if response.status_code in VALID_CODES:\r\n return True\r\n return False", "def validate_response(self, response):\n pass", "def validate_response(response: json):\n if \"error\" in response:\n print(\"ERROR: Request returned error\")\n print_request_response(response)\n exit(1)", "def response_check(response):\n print(response)\n print(response.text)\n return response.status_code == 201", "def check_response_errors(self, resp):\n return True", "def validate_response(response):\n assert response.ok\n rpcdict = response.json()\n assert rpcdict['jsonrpc'] == '2.0'\n assert rpcdict['id']\n assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()", "def user_should_get_an_ok_response():\n assert web_app.validate_reponse()", "def check_response_invalid_fields(response: HTTPResponse) -> bool:\n return response.status_code == 422", "def is_okay(self) -> bool:\n if getattr(self.api_response, \"status_code\", 200) != 200:\n return False\n\n return self.result_code in [0, 810, 820]", "def response_check(response):\n print(response)\n print(response.text)\n return response.status_code == 200", "def check_http_response(self, response, url):\n try:\n response.raise_for_status()\n success = True\n except (requests.HTTPError, ValueError):\n success = False\n excpt_msg = \"Invalid API response.\"\n try:\n excpt_msg = response.headers[\"cause-message\"]\n except:\n try:\n excpt_msg = response.json()[\"error\"][\"message\"][\"value\"]\n except:\n excpt_msg = \"Unknown error ('{0}'), check url in a web browser: '{1}'\".format(response.reason, url)\n api_error = EODataDownResponseException(excpt_msg, response)\n api_error.__cause__ = None\n raise api_error\n return success", "def checkResponseOK(response):\n assert response['result'] == 'OK'", "def check_no_data_response(response: HTTPResponse) -> bool:\n return response.status_code == 422", "def check_response_no_data(response: HTTPResponse) -> bool:\n return response.status_code == 422", "def check_response(response):\n status = response.get('status')\n ret = status and status == 'OK'\n if not ret:\n logging.error('Received unexpected failure response from polyswarmd: %s', response)\n return ret", "def check_response_valid_update(response: HTTPResponse) -> bool:\n return response.status_code == 200", "def verif_response(response):\n if response.status_code >= 200 and response.status_code <= 299:\n logging.debug(\"response server OK::{}\".format(response.text))\n return True\n\n logging.error(\"response server KO::{}\".format(response.text))\n return False", "def validate_response(self, response: requests.Response) -> None:\n if 400 <= response.status_code < 500:\n msg = (\n f\"{response.status_code} Client Error: \"\n f\"{response.reason} for path: {self.path}. \"\n f\"Request payload: {response.request.body}\"\n )\n raise FatalAPIError(msg)\n\n elif 500 <= response.status_code < 600:\n msg = (\n f\"{response.status_code} Server Error: \"\n f\"{response.reason} for path: {self.path}\"\n )\n raise RetriableAPIError(msg)", "def is_success(self):\r\n if self.status_code < 400:\r\n return True\r\n return False", "def is_success(self):\r\n if self.status_code < 400:\r\n return True\r\n return False", "def _check_response(self, response):\n if response.status_code == requests.codes.ok:\n # Since the ZenHub REST API does not send back 204 when there is\n # no content, we have to check the Content-Length for 0 :(\n if int(response.headers['Content-Length']):\n return response.json()\n elif response.status_code == requests.codes.not_found:\n return None\n else:\n return response.raise_for_status()", "def is_response_valid(response):\n valid_keys = ('action', 'time', 'data', 'code', 'address')\n if all(key in response for key in valid_keys):\n return True\n raise ValueError", "def the_response_should_be_result(result):\n assert web_app.check_response(result)", "def _is_successful(response) -> bool:\n return response.status_code == 200", "def is_response_ok(cls, data):\n if data is None:\n cls.notify_widget.show_message(\"一个不好的消息\", \"网络出现一点问题\")\n return False\n\n if not isinstance(data, dict):\n return True\n\n if data['code'] == 200:\n return True\n\n cls.notify_widget.show_message(\"一个不好的消息\", \"网络出现一点问题\")\n return False", "def check_response(self, resp):\n\n # For successful API call, response code will be 200 (OK)\n if resp.ok:\n json = resp.json()\n self.response = ResponseHolder()\n self.response.response = json\n\n # Check the code\n if 'status' not in json:\n raise InvalidResponse('No status field')\n\n self.response.status = self.field_to_long(json['status'])\n if self.response.status != EBConsts.STATUS_OK:\n txt_status = self.get_text_status(json)\n raise InvalidStatus('Status is %s (%04X)'\n % (txt_status if txt_status is not None else \"\", self.response.status))\n\n if self.response_checker is not None:\n self.response_checker(self.response)\n\n return self.response\n\n else:\n # If response code is not ok (200), print the resulting http error code with description\n resp.raise_for_status()\n pass", "def is_raw_response(self, response: object) -> bool:", "def fusion_api_validate_response(self, respDict, valDict):\n success = True\n returnDict = {}\n keys = []\n for key in valDict:\n if not valDict[key]:\n continue\n # logger._log_to_console_and_log_file('key: %s' % (key))\n keyDict = {'key': key, 'expected': valDict[\n key], 'actual': respDict[key], 'success': True}\n if key in respDict:\n pattern = re.compile(str(valDict[key]))\n # if not re.search(str(valDict[key]), str(respDict[key])):\n # t = re.compile('(?i)Warning|Unknown|Terminated|Killed|Error|Completed')\n\n if not re.search(pattern, str(respDict[key])):\n\n success = False\n keyDict['success'] = False\n else:\n success = False\n keyDict['success'] = False\n keys.append(keyDict)\n\n returnDict['success'] = success\n returnDict['keys'] = keys\n return returnDict", "def check(self):\n invalid = []\n\n if not self.route:\n invalid.append(('route', 'missing'))\n elif not self.route[1] in ['GET', 'POST', 'PUT']:\n invalid.append(('route', 'invalid method: %s' % self.route[1]))\n\n has_2xx = False\n for rcode in self.return_codes:\n code = rcode[0]\n if code >= 200 and code < 300:\n has_2xx = True\n break\n if not has_2xx:\n invalid.append(('return_codes', 'Missing succes return code doc'))\n\n if self.client_auth is None:\n invalid.append(\n ('client_auth', 'Please provide client auth requirement'))\n\n if self.user_auth is None:\n invalid.append(\n ('user_auth', 'Please provide user auth requirement'))\n\n if invalid:\n msgs = []\n for error in invalid:\n msgs.append(\"%s: %s\" % error)\n raise ValueError(\n \"APIFunc for %s is invalid: %s\"\n % (self.viewfunc.__name__,\n ', '.join(msgs)))", "def validate_response(response):\n\n r = response\n try:\n r.raise_for_status()\n except HTTPError as e:\n message = dict(status_code=r.status_code, exception=e)\n\n try:\n response = r.json()\n message['response'] = response\n except JSONDecodeError as e:\n message['response'] = r.content\n\n raise HTTPError(message)", "def _check_response(self, res: requests.Response, token: str) -> None:\n return", "def validate_get_response(self, response):\n\n self.validate_response(response)\n if response.status_code not in self.model._meta['valid_get_status']:\n raise InvalidStatusError(\n self.model._meta['valid_get_status'], response\n )", "def check_status(response):\n if response.status_code == 200:\n return True", "def is_success_response(resp: Response) -> bool:\n return \\\n resp and \\\n is_dict(resp) and \\\n resp.get(\"success\", False) is True", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None)", "def has_error(self, response):\n return response.find(' Matched') == -1 and response.find(' Failed') == -1", "def error_invalid_response(self):\r\n return self.type() == 0x00", "def _is_error_call(self, response):\n status = response.get('ResponseMetadata', {}).get('HTTPStatusCode')\n return status != 200", "def is_error(response: str) -> bool:\n return \"ERROR\" in response", "def validation_check():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'Blockchain is valid',\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)}\n else:\n response = {'error': 'There are errors in the Blockchain',\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)}\n return jsonify(response), 200", "def is_good_response(res):\n content_type = res.headers['Content-Type'].lower()\n return (res.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def _check_200(self, response):\n if response.code != 200:\n raise YubiKeyVerificationError(\n \"Received {0} response.\".format(response.code))\n return response", "def is_good_response(resp):\n #Headers is a method in requests library\n content_type = resp.headers['Content-Type'].lower()\n \n return (resp.status_code == 200 # Returns true if the website is online and available (statuscode=200)\n #Returns true if content_type exists\n and content_type is not None\n #Returns true if it is an html document or a json document.\n and (content_type.find('json') > -1 or content_type.find('html')))", "def verify(self, response):", "def is_good_response(resp) -> bool:\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "async def test_is_error_response() -> None:\n assert not is_error_response(\"True\")\n assert not is_error_response(True)\n assert not is_error_response(\"\")\n assert is_error_response(\n json.loads(\n '{\"errorCode\": \"INVALID_NUMBER_PARAMETER_VALUE\", \"minValue\": 0.0, \"maxValue\": 1.01}'\n )\n )\n assert not is_error_response(json.loads('{\"errorCode\": \"\"}'))", "def is_error(self):\r\n if self.status not in [STATUS_CODES['200'], ]:\r\n return True\r\n else:\r\n return False", "def _process_unsuccessful_response(\n self,\n response: Response,\n case: Literal['validate_api_key', 'balances', 'trades', 'asset_movements'],\n ) -> Union[\n list,\n tuple[bool, str],\n ExchangeQueryBalances,\n ]:\n try:\n response_list = jsonloads_list(response.text)\n except JSONDecodeError as e:\n msg = f'{self.name} {case} returned an invalid JSON response: {response.text}.'\n log.error(msg)\n\n if case in ('validate_api_key', 'balances'):\n return False, msg\n if case in ('trades', 'asset_movements'):\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n raise AssertionError(f'Unexpected {self.name} response_case: {case}') from e\n\n error_data = self._get_error_response_data(response_list)\n if error_data.error_code == API_ERR_AUTH_NONCE_CODE:\n message = API_ERR_AUTH_NONCE_MESSAGE\n # Errors related with the API key return a human readable message\n elif case == 'validate_api_key' and error_data.error_code == API_KEY_ERROR_CODE:\n message = API_KEY_ERROR_MESSAGE\n else:\n # Below any other error not related with the system clock or the API key\n reason = error_data.reason or response.text\n message = (\n f'{self.name} query responded with error status code: {response.status_code} '\n f'and text: {reason}.'\n )\n log.error(message)\n\n if case in ('validate_api_key', 'balances'):\n return False, message\n if case in ('trades', 'asset_movements'):\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {message}',\n )\n return []\n\n raise AssertionError(f'Unexpected {self.name} response_case: {case}')", "def assert_valid_responses(response) -> None:\n assert valid_resp_name in response.text\n assert valid_resp_addr in response.text\n assert response.status_code == 200", "def valid_in_response(self):\n return self._repeatable[1] is not None", "def _check_response(response: requests.Response) -> None:\n logger.debug('Received response:\\n%s', response.content)\n try:\n response.raise_for_status()\n if not response.json()['status']:\n _report_failure('your e-mail address appears to be invalid')\n except requests.exceptions.HTTPError:\n _report_failure()\n except (ValueError, KeyError):\n _report_failure('there was a problem with the server response')", "def check_status():\n try:\n return HTTPClient().fetch(\"https://api.random.org/\").code == 200\n except Exception: # pylint: disable=broad-except\n return False", "def is_good_response(self, resp):\r\n\t\tcontent_type = resp.headers['Content-Type'].lower()\r\n\t\treturn (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def check_response(rv):\n if rv != 'OK':\n print \"No message found\"\n return False\n return True", "def IsOk(self):\r\n \r\n return True", "def is_good_response(self, resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None\n and content_type.find('html') > -1)", "def check(self, response, payload):\n # check response body\n if not response.text:\n return False\n\n # check for output\n # uid=user gid=group groups=groups\n regex = r\"(uid=\\d+[\\(\\)\\w\\-]+)(\\s+gid=\\d+[\\(\\)\\w\\-]+)(\\s+groups=\\d+[\\(\\)\\w\\-,]+)?\"\n if re.search(regex, response.text):\n return True\n else:\n return False", "def isGoodResponse(resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200\r\n and content_type is not None\r\n and content_type.find('html') > -1)", "def test_generating(resp):\n errors = []\n if not check_int(resp[\"tightness\"]):\n errors.append(\"Invalid type for Itinerary response's 'tightness' field.\")\n\n if not isinstance(resp, bool):\n errors.append(\"Invalid type for Itinerary response's 'start_from_airport' field.\")", "def check_event_registration_response_valid(\n response: HTTPResponse, user_id: user_models.UserId) -> bool:\n try:\n assert response.status_code == 201\n assert response.json()\n event_id = response.json().get(\"event_id\")\n assert check_event_id_added_to_user(event_id, user_id)\n assert check_event_status_set_properly(event_id)\n return True\n except AssertionError as assert_error:\n debug_msg = f\"failed at: {assert_error}. resp json: {response.json()}\"\n logging.debug(debug_msg)\n return False", "def is_good_response(resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200 \r\n and content_type is not None \r\n and content_type.find('html') > -1)", "def test_response_ok():\n\t\n\t# Send GET request to API given endpoint and store the response.\n\tresponse = get_items()\n\n\t# Confirm that the request-response cycle completed successfully.\n\t#assert_true(response.ok)\n\tif ('None' in response): print(\"Failed calling REST API: {}\".format(response))\n\telse: print(\"TC Passed, Response OK: {}\".format(response))", "def __CheckResponse(self, response):\n\n status = response.status\n if (status == httplib.OK or status == httplib.CREATED\n or status == httplib.NO_CONTENT):\n return\n elif (status == httplib.UNAUTHORIZED):\n raise BadCredentialsException\n elif (status == httplib.SERVICE_UNAVAILABLE):\n raise ServerBusyException\n elif (status == httplib.BAD_REQUEST\n or status == httplib.UNPROCESSABLE_ENTITY):\n raise BadArgumentsException\n elif (status == httplib.NOT_FOUND):\n raise NotFoundException\n else:\n raise BadOperationException", "def validate_token(self):\n r = requests.get(urljoin(self._url, Client._token_resource),\n params={\"tokenid\": self._token_id})\n\n if r.status_code == requests.status_codes.codes.unauthorized:\n raise ClientUnauthorized()\n elif r.status_code != requests.status_codes.codes.ok:\n error_messages = self._parse_invalid_request(r.text)\n raise ClientException(r.status_code, error_messages)\n\n try:\n type_, value = r.text.split(\"=\")\n value = value.strip(\" \\r\\n\")\n except Exception, e:\n raise ClientException(r.status_code,\n \"Some error has ocurred getting the result value from %s\"\n % r.text)\n\n return value == \"true\"", "def is_good_response(resp):\n\tcontent_type = resp.headers['Content-Type'].lower()\n\treturn (resp.status_code == 200 \n\t\tand content_type is not None \n\t\tand content_type.find('html') > -1)", "def validate_response(self, response):\n crypted = response[-0x100:]\n # check that not all values are the same\n if all(v == crypted[0] for v in crypted):\n return False\n # return if chunks of 0x10 repeat\n return (len([True for i in range(0x10, len(crypted), 0x10)\n if crypted[:0x10] == crypted[i:i+0x10]])) == 0xf", "def _validate_response(request, response, schema_data, schema_resolver):\n try:\n validate_outgoing_response(\n request,\n response,\n schema_data,\n schema_resolver\n )\n except jsonschema.exceptions.ValidationError as exc:\n # This will alter our stack trace slightly, but Pyramid knows how\n # to render it. And the real value is in the message anyway.\n raise HTTPInternalServerError(str(exc))", "def is_good_response(resp):\n\tcontent_type = resp.headers['Content-Type'].lower()\n\treturn (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def check_no_header_response(response: HTTPResponse) -> bool:\n return response.status_code == 422", "def is_good_response(resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200\r\n and content_type is not None\r\n and content_type.find('html') > -1)", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def assertValidJSONResponse(self, resp):\r\n self.assertHttpOK(resp)\r\n self.assertTrue(resp['Content-Type'].startswith('application/json'))\r\n self.assertValidJSON(resp.content)", "def verify_object(self, data):\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result", "def is_ok(r) -> bool:\n\tif r.status_code == 200:\n\t\treturn True", "def mora_assert(response):\n assert response.status_code in (200, 201, 400, 404), response.status_code\n if response.status_code == 400:\n # Check actual response\n assert (\n response.text.find(\"not give raise to a new registration\") > 0\n ), response.text\n logger.debug(\"Request had no effect\")\n return None", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)", "def _is_valid_result(result):\n return result.get(\"version\", \"\") != \"\"", "def _assert_bad_request(self, response, field, zendesk_mock_class, datadog_mock):\r\n self.assertEqual(response.status_code, 400)\r\n resp_json = json.loads(response.content)\r\n self.assertTrue(\"field\" in resp_json)\r\n self.assertEqual(resp_json[\"field\"], field)\r\n self.assertTrue(\"error\" in resp_json)\r\n # There should be absolutely no interaction with Zendesk\r\n self.assertFalse(zendesk_mock_class.return_value.mock_calls)\r\n self.assertFalse(datadog_mock.mock_calls)", "def is_good_response(resp):\n content_type = resp.headers[\"Content-Type\"].lower()\n return (\n resp.status_code == 200\n and content_type is not None\n and content_type.find(\"html\") > -1\n )", "def CheckSuccess(self, response):\n if 'error' in response:\n raise ComputeEngineApiError('An operation completed with errors:\\n%s'\n % response)", "async def get_invalid(self, **kwargs: Any) -> bool:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = kwargs.pop(\"params\", {}) or {}\n\n cls: ClsType[bool] = kwargs.pop(\"cls\", None)\n\n request = build_bool_get_invalid_request(\n headers=_headers,\n params=_params,\n )\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n if _stream:\n await response.read() # Load the body in memory and close the socket\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n if response.content:\n deserialized = response.json()\n else:\n deserialized = None\n\n if cls:\n return cls(pipeline_response, cast(bool, deserialized), {})\n\n return cast(bool, deserialized)", "def assert_response_correct(self, response, expected_status, expected_content):\n assert response.status_code == expected_status\n parsed_content = json.loads(response.content.decode('utf-8'))\n assert parsed_content == expected_content", "def assert_json_failure_response_is_missing_social_auth(self, response):\r\n self.assertEqual(200, response.status_code) # Yes, it's a 200 even though it's a failure.\r\n payload = json.loads(response.content)\r\n self.assertFalse(payload.get('success'))\r\n self.assertIn('associated with your %s account' % self.PROVIDER_CLASS.NAME, payload.get('value'))", "def successful(self) -> bool:\n return self._unparsed_response is not None", "def verify_response_dict(api_key, response):\n LOGGER.debug('Verifying WSAPI response signature')\n\n # Remove signature from the response\n r = dict(response)\n del r['h']\n\n # Convert to HTML query as that is used by Yubico to sign the response\n query = sorted_urlencode(list(r.iteritems()))\n\n # We unquote it because it's not the HTTP quoted version\n query = urllib.unquote_plus(query)\n\n status = sign(api_key, query) == response['h']\n LOGGER.debug('Signature result ' + str(status))\n return status" ]
[ "0.8024064", "0.78625363", "0.77986264", "0.7515456", "0.74777824", "0.74465626", "0.73384", "0.7265737", "0.7262182", "0.72441804", "0.7198832", "0.712405", "0.7028976", "0.701251", "0.69981617", "0.69959635", "0.6970569", "0.6948191", "0.6897628", "0.68025744", "0.68025744", "0.6800186", "0.67974865", "0.67858255", "0.6780485", "0.676281", "0.67326874", "0.6712715", "0.67036015", "0.66709816", "0.66679287", "0.66633785", "0.66413915", "0.6637445", "0.6618039", "0.6610584", "0.66000175", "0.6586345", "0.65496165", "0.6531632", "0.65289193", "0.6482799", "0.646065", "0.6457958", "0.6457733", "0.6433299", "0.6422639", "0.64125013", "0.6410823", "0.6407977", "0.6402173", "0.6393354", "0.6390981", "0.6384058", "0.6378157", "0.6358686", "0.6338603", "0.63369715", "0.633359", "0.6324581", "0.6315461", "0.6308886", "0.63048613", "0.6301798", "0.62915355", "0.6285658", "0.6281826", "0.62785673", "0.62781996", "0.62777996", "0.6270371", "0.6267701", "0.6242745", "0.6234679", "0.62320304", "0.62277687", "0.6223658", "0.6216222", "0.62107384", "0.62107384", "0.62107384", "0.62107384", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.62092763", "0.6208142", "0.6202829", "0.619694", "0.61769086", "0.61766976", "0.6169507", "0.61694765", "0.6152311", "0.61425745" ]
0.76538676
3
Validate the supplied json file to make sure it is json in the expected format
def _validate_json(self): # Do we find valid json? try: with open(self.batch_json_path, "rb") as fd: batch_json = json.loads(fd.read()) except Exception as err: raise self.message( "[-] Error reading JSON batch file '%s' : '%s'" % (self.batch_json_path, err)) return False # Does the json represent a dictionary of the expected form? if not isinstance(batch_json, types.DictionaryType): self.message( "[-] JSON batch file '%s' deserialises to unexpected object type '%s'" % (self.batch_json_path, type(batch_json))) return False # If it is a dictionary does it have the expected characteristics? for endpoint, sys_info in batch_json.items(): # Endpoint should be a hostname, IP or some other string # identifier, difficult to validate much beyond 'string' if type(endpoint) not in [types.StringType, types.UnicodeType]: self.message( "[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s" % (self.batch_json_path, type(endpoint), endpoint, sys_info)) return False # Does the sys_info dict contain the expected keys? if set(sys_info.keys()).symmetric_difference( set(self.json_batch_template)): self.message( "[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s" % (self.batch_json_path, self.json_batch_template, endpoint, sys_info)) return False # Create a psuedononymised hash of the uuid using MAC addr as salt mac_repr = "0x" + sys_info["mac_addr"].lower().replace(":", "") sys_info["hashed_uuid"] = hashlib.sha256( mac_repr + sys_info["sys_uuid"]).hexdigest() # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API # and remain confidential to the submitter del sys_info["sys_uuid"] del sys_info["mac_addr"] # Set the read in json structure as the structure of system data to # walk and send to the API self.endpoints_to_check = batch_json self.message("[+] Batch JSON file validated") return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def validate_json(self):\n pass", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def is_json(filename):\n try:\n with open(filename, 'r') as f:\n dstore = json.load(f)\n except JSONDecodeError:\n return False # In case the file is invalid json file\n return True # In case the file is a valid json file", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def test_verifies_token_file_contains_json(self):\n\n with open(self.sample_token_file, 'w',\n encoding=\"utf8\", errors=\"surrogateescape\") as stf_h:\n stf_h.write(\"Bad JSON\")\n\n with self.assertRaises(json.decoder.JSONDecodeError):\n badgr = BadgrLite(token_filename=self.sample_token_file)\n badgr.load_token()", "def validate(self, config_json):\n pass", "def validate_api_resp(actual_json_resp, json_schema_path: str, json_schema_file_name):\n with open(os.path.join(JSON_SCHEMA_ROOT, json_schema_path, json_schema_file_name), 'r') as f:\n json_schema = json.loads(f.read())\n actual_json = json.loads(str(actual_json_resp.data, 'utf-8'))\n jsonschema.validate(actual_json, json_schema)", "def assertValidJSON(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_json(data)", "def parse_json_format(file_path=None):\n is_file_res = check_is_file(file_path)\n if is_file_res['result']:\n with open(file_path) as f:\n if f.readline().strip().startswith('['):\n return generate_response(result='jsonl')\n return generate_response(result='json')\n else:\n return is_file_res", "def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True", "def is_valid_file(parser, arg):\n if not os.path.isfile(arg):\n parser.error(\"The file %s does not seem to be a file at all! Exiting for safety reasons.\" %arg)\n sys.exit(1)\n else:\n if validate_input(arg):\n return True\n else:\n print \"Invalid JSON. Exiting\"\n sys.exit(1)", "def from_json_file(cls, json_file:str):\n with open(json_file) as file:\n data = json.load(file)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance", "def loadFromFile(self, filename):\n with open(filename, 'r') as file:\n raw_data = file.read()\n # data = json.loads(raw_data, encoding='utf-8') # python 3.9 suppression de encoding\n try:\n data = json.loads(raw_data)\n self.deserialize(data)\n self.has_been_modified = False\n except json.JSONDecodeError:\n raise InvalidFile(f'{os.path.basename(filename)} is not a valid JSON file')\n except Exception as e:\n dumpException(e)", "def is_valid_json(json_str):\n assert json_str is not None\n try:\n json.loads(json_str)\n return True\n except (ValueError, TypeError):\n return False", "def is_valid_json(j):\n try:\n json.dumps(j)\n return True\n except json.JSONDecodeError:\n print(\"not valid json\")\n return False", "def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')", "def test_validate_json(self):\n # Lifted directly from the python-jsonschema docs\n test_schema = {\"type\": \"object\",\n \"properties\": {\n \"price\": {\"type\": \"number\"},\n \"name\": {\"type\": \"string\"},\n }}\n valid = {\"name\": \"Eggs\", \"price\": 34.99}\n invalid = {\"name\": \"Eggs\", \"price\": \"Invalid\"}\n\n test_model = RecordSchema(schema=test_schema)\n\n self.assertIsNone(test_model.validate_json(valid))\n\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n test_model.validate_json(invalid)", "def validate_json() -> bool:\n with Path(ROOT_DIR, \"seals\", \"seals.json\").open() as f:\n seals = json.load(f)\n\n seals_in_json = [k for k, v in seals.items() if v[\"has_seal\"]]\n\n seals = [\n x.split(\"/\")[-1][:-4] for x in glob.glob(f\"{ROOT_DIR}/seals/orig/*\")\n ]\n missing_seals = sorted(list(set(seals_in_json) ^ set(seals)))\n if not missing_seals:\n return True\n\n raise Exception(f\"Missing entry for: {' '.join(missing_seals)}\")", "def process_json(path):\n path = os.path.abspath(os.path.expanduser(path))\n try:\n with open(path) as f:\n return json.load(f, object_hook=ascii_encode_dict)\n except ValueError as e:\n logging.error(\"File: %s\\nInvalid JSON:\\n%s\", str(path), str(e))\n raise\n except IOError as io:\n logging.error(\"Provided json file path does not exist %s\", str(path))\n raise", "def _loadJson(self, file):\n # TODO : Is it paranoid checking?\n if os.path.isfile(file):\n try:\n with open(file, 'r') as f:\n data = json.load(f)\n return data\n except ValueError:\n msg = \"Corrupted JSON file => %s\" % file\n # logger.error(msg)\n self._exception(200, msg)\n # return -2 # code for corrupted json file\n else:\n msg = \"File cannot be found => %s\" % file\n self._exception(201, msg)", "def test_json(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(input_file_path), 'r') as input_file:\n with open(attach_path(answer_file_path), 'r') as answer_file:\n assert str(read_json(input_file.read().strip())) == answer_file.read().strip()", "def _check_json(json_data: Any, clean: bool) -> Any:\n try:\n json.loads(json_data)\n except ValueError:\n return \"unknown\" if clean else False\n return \"success\" if clean else True", "def _CheckJson(input_api, output_api):\n for affected_file in input_api.AffectedFiles(include_deletes=False):\n filename = affected_file.AbsoluteLocalPath()\n if os.path.splitext(filename)[1] != '.json':\n continue\n try:\n input_api.json.load(open(filename))\n except ValueError:\n return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]\n return []", "def validate_json(d):\n if d['type'] != 'FeatureCollection':\n raise Exception('JSON file is not a \\\"FeatureColleciton\\\".')\n\n if len(d['features']) != 1:\n raise Exception('JSON file should contain excactly one feature.')\n\n f = d['features'][0]\n\n if 'reference' not in f['properties'].keys():\n raise Exception('Feature property dictionary should contain '\n '\\\"referencey\\\" key.')\n\n if f['type'] != 'Feature':\n raise Exception('Feature type should be \\\"Feature\\\".')\n\n geom = f['geometry']\n\n if geom['type'] != 'MultiPolygon':\n raise Exception('Geometry type should be \\\"MultiPolygon\\\".')\n\n if 'coordinates' not in geom.keys():\n raise Exception('Geometry dictionary should contain \\\"coordinates\\\" '\n 'key.')\n\n polygons = geom['coordinates'][0]\n\n n_polygons = len(polygons)\n for i in range(n_polygons):\n p = polygons[i]\n n_points = len(p)\n if n_points % 2 == 0:\n raise Exception('Number of points in polyon must be odd.')\n\n if p[0] != p[-1]:\n raise Exception('First and last points in polygon must be '\n 'identical.')\n\n n_pairs = int((n_points - 1) / 2)\n for j in range(n_pairs):\n #------------------------------------------------------------------\n # Points are paired and in each pair the top is first, as in:\n #\n # _.-P1-._\n # P0' 'P2---P3\n # | \\\n # P7---P6----P5-------P4\n #\n # Pairs: P0-P7, P1-P6, P2-P5, P3-P4\n #------------------------------------------------------------------\n top_depth = p[j][2]\n bot_depth = p[-(j + 2)][2]\n if top_depth > bot_depth:\n raise Exception(\n 'Top points must be ordered before bottom points.')", "def file_jsoncheck(filename):\n with open(filename, 'r') as jsontable:\n try:\n json_object = json.load(jsontable)\n except ValueError, e:\n return False\n\n # DQLL.json number of lines should be 35\n # Will change with table version\n nlines = 35\n \n with open(filename, 'r') as f:\n l = [x for x in f.readlines()]\n # Default number of lines should be 35\n if len(l) != nlines:\n print \"Number of lines in DQLL.json is not default {} but {}\".format(nlines, len(l))\n return False\n\n return True", "def test_validate_json_validates_schema(self):\n invalid_schema = {\"type\": \"any\"}\n valid_json = {}\n test_model = RecordSchema(schema=invalid_schema)\n\n with self.assertRaises(jsonschema.exceptions.SchemaError):\n test_model.validate_json(valid_json)", "def is_json_valid(json_data: dict, json_schema: dict) -> bool:\r\n try:\r\n validate(instance=json_data, schema=json_schema)\r\n except jsonschema.exceptions.ValidationError as err:\r\n return False\r\n return True", "def parse_json(json_file):\n try:\n with open(json_file, \"r\") as file_reader:\n file_contents = file_reader.read()\n return json.loads(file_contents)\n except FileNotFoundError:\n raise FileNotFoundError(\"File not found.\")", "def parse_json():\n parsed = None\n try:\n path = sys.argv[1]\n except IndexError as idx_err:\n try:\n return json.load(sys.stdin)\n except ValueError as err:\n raise (ValueError,'Malformed JSON via stdin. Should have keys incomes, expenses. You can also pass a json file path as an argument')\n else:\n try:\n with open(path, 'r') as data:\n return json.load(data)\n except ValueError as val_err:\n raise(ValueError, 'Malformed JSON! Should have keys incomes, expenses')", "def read_json_file(json_file, project_logger, json_not_found_error_code, json_bad_format_error_code):\n try:\n # Open the file\n with open(json_file) as F:\n # Read the file's contents as a string\n json_str = F.read()\n # Return the data as a Python dictionary\n return json.loads(json_str)\n except FileNotFoundError:\n project_logger.log_error(\n json_not_found_error_code,\n 'Could not open json file \"' + str(json_file) + '\": JSON file could not be found'\n )\n sys.exit(1)\n except json.decoder.JSONDecodeError:\n project_logger.log_error(\n json_bad_format_error_code,\n 'Could not open json file \"' + str(json_file) + '\": JSON file not formatted properly'\n )\n sys.exit(1)", "def isjson(filepath):\n return filepath.lower().endswith('.json')", "def sniff( self, filename ):\r\n try:\r\n json.load( open(filename) )\r\n return True\r\n except Exception:\r\n return False", "def test_tap_config_json_validation_retry_with_invalid_state_and_then_fix(self):\n self._assert_retry_validation_of_json_file(config=self.valid_json_file,\n properties=self.valid_json_file,\n state=self.invalid_json_file)", "def test_tap_config_json_validation_retry_with_invalid_config_and_then_fix(self):\n self._assert_retry_validation_of_json_file(config=self.invalid_json_file,\n properties=self.valid_json_file,\n state=self.valid_json_file)", "def test_tap_config_json_validation_retry_with_invalid_properties_and_then_fix(self):\n self._assert_retry_validation_of_json_file(config=self.valid_json_file,\n properties=self.invalid_json_file,\n state=self.valid_json_file)", "def validator(data_json):\n fields = spec[\"fields\"]\n data = json.loads(data_json, object_pairs_hook=collections.OrderedDict)\n for k, v in fields.items():\n if v.get(\"required\"):\n found = False\n if k in data:\n found = True\n elif \".\" in k:\n # Dotted keys could be nested, like ecs.version\n subkeys = k.split(\".\")\n subval = data\n for subkey in subkeys:\n subval = subval.get(subkey, {})\n if subval:\n found = True\n if not found:\n raise ValidationError(\"Missing required key {}\".format(k))\n if k in data:\n if v[\"type\"] == \"string\" and not (\n isinstance(data[k], str) or isinstance(data[k], basestring)\n ):\n raise ValidationError(\n \"Value {0} for key {1} should be string, is {2}\".format(\n data[k], k, type(data[k])\n )\n )\n if v[\"type\"] == \"datetime\":\n try:\n datetime.datetime.strptime(data[k], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n raise ValidationError(\n \"Value {0} for key {1} doesn't parse as an ISO datetime\".format(\n data[k], k\n )\n )\n if v.get(\"index\") and list(data.keys())[v.get(\"index\")] != k:\n raise ValidationError(\"Key {0} is not at index {1}\".format(k, index))\n\n return data_json", "def isJson(data):\r\n try:\r\n json.loads(data)\r\n return True\r\n except ValueError:\r\n return False", "def load_from_json(self, file_name: str) -> bool:\n try:\n with open(file_name, 'r') as f:\n data = json.loads(f.read())\n self.__g = DiGraph.from_dict(data)\n return True\n except:\n traceback.print_exc()\n return False", "def test_load_unsupported_type(self):\n expected = {\n \"name\": \"Kevin\",\n \"age\": 21,\n \"pet\": {\n \"name\": \"Trippy Jack\",\n \"age\": 20762,\n \"__type__\": \"hyperdimensional.hamster\"\n }\n }\n with open('tests/unsupported_type.json', 'r') as json_file:\n self.assertEqual(expected, morejson.load(json_file))", "def read_json(file_name):\n try:\n with open(file_name, \"rt\") as input_file:\n return json.loads(input_file.read())\n except TypeError:\n print(\"No valid JSON data!\")\n raise\n except IOError:\n print(\"Could not read file from disk!\")\n raise", "def validate_insert_json(request_json):\n try:\n jsonschema.validate(request_json, schema_input)\n except (jsonschema.exceptions.ValidationError, jsonschema.exceptions.SchemaError, JSONDecodeError) as e:\n current_app.logger.info(\"Invalid json:{}\".format(str(e)))\n raise (InvalidJSONError(\"Invalid json:{}\".format(str(e))))", "def validate(json_resp, schema, validictory_path, schema_base=None):\n # assumes /extern/validictory exists (see /cm for instructions)\n if not validictory_path in sys.path:\n sys.path.append(validictory_path)\n import validictory\n\n try:\n if schema_base and not json_resp[\"$schema\"].startswith(schema_base):\n print \"Warning: JSON schema is \", json_resp[\"$schema\"], \"instead of \", schema_base\n validictory.validate(json_resp, schema, required_by_default=False)\n return True\n except Exception as e:\n print \"Received exception %s while trying to validate: %s\" % (\n str(e), json_resp)\n return False", "def load_json(jsonfile):\n try:\n with open(os.path.join(sys.path[0], jsonfile), \"r\") as f:\n try:\n return json.load(f)\n except json.JSONDecodeError:\n raise json.JSONDecodeError(f\"{jsonfile} is not valid JSON\")\n except IOError:\n raise IOError(f\"{jsonfile} does not exist. Please export it from the 'Daily' app.\")", "def read_json(file: str) -> Dict[str, Dict[str, Dict[str, str]]]:\n if os.path.exists(file):\n with open(file, 'r') as input_file:\n return json.load(input_file)\n else:\n print('Invalid signature file.')\n sys.exit(-1)", "def check_json_file(file_name: str, excel_file: str, sheet_name: str) -> list:\n try:\n with open(file_name) as json_file:\n data = json.load(json_file)\n return data\n except FileNotFoundError:\n period_index = excel_file.index(\".\")\n json_file_name = excel_file[:period_index] + \".json\"\n write_json_file(json_file_name, convert_excel_file(excel_file, sheet_name))\n return check_json_file(file_name, excel_file, sheet_name)", "def validate(self, json_data):\n try:\n self.process_json(json_data)\n except ValueError as e:\n # self.process_errors.append(e.args[0])\n self.process_errors = [e.args[0]]\n\n self.errors = list(self.process_errors)\n\n # Run validators\n if not self.errors:\n chain = itertools.chain(self.validators)\n self._run_validation_chain(chain)\n\n return len(self.errors) == 0", "def validate_against_schema(self, json_doc):\n if self.uri not in self.se.validation:\n raise RuntimeError(\"$validation is not defined for {} field; thus the json document could not be validated\".format(self.name))\n else:\n validate(json_doc, self.se.validation[self.uri])\n print('The JSON document is valid')", "def loadJSON(jsonData):\n\n if hasattr(jsonData, 'read'):\n loadedjson = json.load(jsonData)\n elif isinstance(jsonData, str):\n if os.path.exists(jsonData):\n with open(jsonData) as jsonFile:\n loadedjson = json.load(jsonFile)\n else:\n try:\n loadedjson = json.loads(jsonData)\n except JSONDecodeError as e:\n raise ValueError(f\" {str(e)}: Got {jsonData}, either bad format of file does not exist\")\n\n elif isinstance(jsonData, dict):\n loadedjson = jsonData\n else:\n err = f\"workflow type: {type(jsonData)} is unknonw. Must be str, file-like or dict. \"\n raise ValueError(err)\n\n\n return loadedjson", "def test_load_json_good_to_dictionary(self):\n self.assertIsInstance(LoadJsonConfig.read_config_file(LoadJsonConfig(),'data/json/conf_ok.json'),OrderedDict)", "def test_input_loadjson(self, fname, groups, hosts):\n with open(fname,'r') as fd:\n fcon = json.loads(fd.read())\n inventory = Inventory()\n inventory.load_inventoryjson(fcon)\n assert inventory.count_groups() == len(groups)\n assert inventory.count_hosts() == len(hosts)", "def is_json(json_str: str) -> bool:\n try:\n json.loads(json_str)\n except ValueError:\n return False\n return True", "def test_metadata_schema_json_invalid(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(files=files)\n assert not metadata_validation_form.is_valid()", "def test_load_an_object_json_file(self):\n from test.resources import object_json\n self.assertEqual(object_json._data, {'answer': 42})\n self.assertEqual(len(object_json), 1)\n self.assertEqual(object_json['answer'], 42)", "def is_correct_json(string):\n if len(string) == 0:\n return False\n\n if string[0] is not '{' and string[0] is not '[':\n return False\n\n try:\n json.loads(string)\n except ValueError:\n return False\n\n return True", "def test_load_json_file_not_found_error() -> None:\n fname = \"invalid_file.json\"\n\n assert load_json(fname) == {}\n assert load_json(fname, default=\"\") == \"\"\n assert load_json_object(fname) == {}\n assert load_json_object(fname, default={\"Hi\": \"Peter\"}) == {\"Hi\": \"Peter\"}\n assert load_json_array(fname) == []\n assert load_json_array(fname, default=[\"Hi\"]) == [\"Hi\"]", "def validate_json_schema(self, json_schema):\n cls = validators.validator_for(json_schema)\n cls.check_schema(json_schema)", "def example_json(example_json_file):\n return json.loads(example_json_file)", "def read_json(json_file):\n with open(json_file) as schema:\n val = json.load(schema)\n\n return val", "def load_from_json_file(filename):\n if type(filename) is not str:\n return\n\n with open(filename, mode=\"r\") as file:\n return json.loads(file.read())", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def test_badFormat(self):\n with open(os.path.join(self.directory, \"service2.json\"), \"w\") as f:\n f.write(\"this is not json\")\n self.pump()\n self.assertNodesEqual(knownNodes(self.disco, \"service2\", \"staging\"), [])", "def read_json_breakdown(cls, fname):\n if not os.path.exists(fname):\n raise RuntimeError\n\n with open(fname, 'r') as data_file:\n return cls.fixup_from_json(data_file.read())", "def test_validation_error_json():\n error = ValidationError(\n type=\"Syntax Error\",\n data={\"data\": [1, 2, 3]},\n )\n\n assert ValidationError(**json.loads(error.json())) == error", "def load_json(file_path):\n try:\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n data = json.load(f)\n except json.JSONDecodeError as e:\n raise ValueError(f\"Invalid JSON format in file {file_path}\") from e\n except FileNotFoundError as e:\n raise ValueError(f\"File not found: {file_path}\") from e\n return data", "def test_metadata_schema_json_valid(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n with open(schema_file_path, 'r') as file_obj:\n json_schema = file_obj.read()\n assert len(json_schema) > 0\n form_data = {\"mp_program_type\": \"Test Model Program\", \"mi_json_schema\": json_schema}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data)\n assert metadata_validation_form.is_valid()", "def test_data_parse_invalid_json(self):\n lines = ['{\"a\": \"val\" \"b\": \"val2\"}']\n self.assertRaises(TypeError, parser._parse_data, lines)", "def check_job_json(job_info):\n job_type_list = [job_type.value for _, job_type in JobType.__members__.items()]\n if 'source_id' not in job_info:\n raise ValueError(\"Json string Errors, key:source_id not found.\")\n if 'job_id' not in job_info:\n raise ValueError(\"Json string Errors, key:job_id not found.\")\n if 'job_type' not in job_info or not job_info['job_type']:\n raise ValueError(\"Json string Errors, key:job_type not found.\")\n if job_info['job_type'] not in job_type_list:\n raise ValueError(\"Invalid job type: {}.\".format(job_info['job_type']))\n if 'job_content' not in job_info:\n raise ValueError(\"Json string Errors, key:job_content not found.\")", "def test_get_users_from_invalid_json():\n with pytest.raises(ValueError):\n Users.from_json(file_path='{0}/json_input/invalid.json'.format(os.path.dirname(os.path.abspath(__file__))))", "def load_from_json(self, json_fp: str):\n # TODO:\n pass", "def test_tap_config_json_raise_exception_on_invalid_content_for_state_file(self):\n self._assert_raise_exception_on_invalid_file_content(\n test_case_invalid='state',\n invalid_file_contents=(' ', 'foo', '{\"foo\": 1')\n )", "def example_json42(example_json_file42):\n return json.loads(example_json_file42)", "def _read_json(self, filename):\n path = os.path.join(self.script_path, filename)\n try:\n with open(path) as file:\n return json.load(file)\n except Exception as exception:\n print('Error parsing {}: {}'.format(path, str(exception)))\n raise", "def _load_json_schema(filename):\n\n relative_path = join('schemas', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())", "def test_load_json_fobj():\n\n file_name = 'test_fooof_all'\n\n with open(os.path.join(TEST_DATA_PATH, file_name + '.json'), 'r') as f_obj:\n data = load_json(f_obj, '')\n\n assert data", "def _load_json_schema(filename: str):\n relative_path = path.join('schemas', filename)\n absolute_path = path.join(path.dirname(__file__), relative_path)\n\n with open(absolute_path, 'r', encoding='utf-8') as schema_file:\n schema = json.loads(schema_file.read())\n\n return schema", "def validate_json(self, data, schema):\n validator = jsonschema.Draft7Validator(schema, format_checker=jsonschema.draft7_format_checker)\n errors = validator.iter_errors(data)\n error_list = [(error.message, str(error.path), error) for error in errors]\n return error_list", "def test_read_json_schema():\n json_schema = os.path.join(TEST_DATA_PATH, 'example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)", "def test_load_json_str():\n\n file_name = 'test_fooof_all'\n\n data = load_json(file_name, TEST_DATA_PATH)\n\n assert data", "def load_json(jsonfile):\n with open(jsonfile) as f:\n return json.load(f)", "def load_json(jsonfile):\n with open(jsonfile) as f:\n return json.load(f)", "def is_match(cls, file_path, options=None):\n valid_json_line_count = 0\n total_line_count = 0\n\n if options is None:\n options = dict()\n\n file_encoding = None\n if not isinstance(file_path, StringIO):\n file_encoding = data_utils.detect_file_encoding(file_path=file_path)\n\n with FileOrBufferHandler(file_path, 'r', encoding=file_encoding) \\\n as data_file:\n try:\n json.load(data_file)\n return True\n except (json.JSONDecodeError, UnicodeDecodeError):\n data_file.seek(0)\n\n for k in range(1000):\n total_line_count += 1\n try:\n raw_line = data_file.readline()\n if not raw_line:\n break \n if raw_line.find(\":\") >= 0: # Ensure can be JSON\n json.loads(raw_line) # Check load\n valid_json_line_count += 1\n except UnicodeDecodeError:\n return False\n except ValueError:\n continue\n \n ratio_of_valid_json_line = float(\n valid_json_line_count) / total_line_count\n \n if ratio_of_valid_json_line >= 0.5:\n return True\n else:\n return False", "def example_json43(example_json_file43):\n return json.loads(example_json_file43)", "def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)", "def validate(self, json_data):\n self._errors = None\n success = True\n for item in self._schema:\n if not item.validate(json_data):\n success = False\n\n return success", "def read_json_file(jsonfile):\n with jsonfile.open('r') as cfile:\n return json.loads(cfile.read())", "def __valid_json(string):\n try:\n obj = json.loads(string)\n except ValueError:\n return False\n else:\n return json.dumps(obj)", "def load_json_file(i):\n\n import json\n\n fn = i['json_file']\n\n try:\n if sys.version_info[0] > 2:\n f = open(fn, 'r', encoding='utf8')\n else:\n f = open(fn, 'r')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening json file='+fn+' ('+format(e)+')'}\n\n try:\n s = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading json file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n try:\n if sys.version_info[0] > 2:\n d = json.loads(s)\n else:\n d = json.loads(s, encoding='utf8')\n except Exception as e:\n return {'return': 1, 'error': 'problem parsing json from file='+fn+' ('+format(e)+')'}\n\n return {'return': 0, 'dict': d}", "def load_from_json(file):\n with open(file, 'r') as f:\n return json.load(f)", "def test_schema_invalid_json(self):\n schema_0_input = schema_nested_2_invalid_JSON\n\n # if you uncomment this line:\n # schema_0_input = schema_nested_2\n # this will fail the test: Failed: DID NOT RAISE <class 'simplejson.scanner.JSONDecodeError'>\n # because this is a valid schema\n\n with pytest.raises(simplejson.scanner.JSONDecodeError):\n msg = singer.parse_message(schema_0_input)", "def parse_json(json_file: str) -> dict:\n json_data = {}\n\n if not os.path.isfile(json_file):\n error_log(os.path.basename(json_file) + ' does not exist.')\n sys.exit(1)\n\n with open(json_file) as f:\n json_data = json.load(f)\n\n return json_data", "def test_jfpv1_json_load_error(self):\n with self.assertRaises(JSONLoad):\n create('{\"foo\": bar}', hash_function=hash_functions.SHA256, version=1)", "def test_tap_config_json_valid_if_state_file_is_empty(self):\n self._assert_tap_config(\n config=self.valid_json_file,\n properties=self.valid_json_file,\n state=self.empty_file\n )" ]
[ "0.7859762", "0.77667236", "0.7729442", "0.7575843", "0.73721087", "0.7308936", "0.7206595", "0.7121494", "0.708791", "0.7065255", "0.7045056", "0.6959003", "0.6920872", "0.6845389", "0.68421036", "0.6783331", "0.6779957", "0.67763454", "0.67711896", "0.67469054", "0.67342323", "0.6730546", "0.6718417", "0.6714892", "0.66893893", "0.6680733", "0.6672231", "0.6655266", "0.66442186", "0.6641123", "0.66207534", "0.6585875", "0.65808505", "0.65263945", "0.65162015", "0.6511952", "0.6505606", "0.6504517", "0.64779407", "0.64554346", "0.641502", "0.64068645", "0.6400156", "0.6393035", "0.63461244", "0.6344479", "0.63408655", "0.6340276", "0.63341254", "0.63126993", "0.63071436", "0.63055557", "0.6303399", "0.62973946", "0.6284576", "0.626402", "0.62608045", "0.62555355", "0.6236974", "0.62315", "0.6231378", "0.62297815", "0.6222687", "0.6213813", "0.6211483", "0.6211483", "0.6205607", "0.6205607", "0.62012786", "0.61927944", "0.6175774", "0.6166272", "0.6161661", "0.61587626", "0.61449105", "0.6140117", "0.613689", "0.6131295", "0.61148095", "0.61115193", "0.60946983", "0.6092377", "0.6090794", "0.60891134", "0.607239", "0.6071886", "0.60696673", "0.60696673", "0.6067095", "0.6062507", "0.6041576", "0.6037778", "0.6037431", "0.6035301", "0.60261184", "0.6023143", "0.602242", "0.60223424", "0.6021814", "0.60213983" ]
0.70778465
9
Get versions of EFI, Boot ROM, OS & Mac Device as well as the SysUUID
def gather_system_versions(self): # Get Mac model ID self.hw_version = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "model", None, 0)).replace( "\x00", "") if "imacpro" in self.hw_version.lower(): # iMac Pro stores it's EFI data different due it's new architecture # so grab the EFI & SMC ROM versions appropriately raw_efi_list = [] raw_rom_info = str( IORegistryEntryCreateCFProperty( IORegistryEntryFromPath( 0, "IODeviceTree:/rom"), "apple-rom-info", None, 0)) for data in raw_rom_info.split("\n"): if data.strip().startswith("BIOS ID"): raw_efi_list = data.split(":")[1].strip().split(".") break else: self.message( "[-] Could not find raw EFI data to determine EFI versions. Exiting....") return False self.efi_version = "%s.%s.%s" % ( raw_efi_list[0], raw_efi_list[2], raw_efi_list[3]) # Can't currently find the SMC version like this on imac pros .... # self.smc_version = str(IORegistryEntryCreateCFProperty(IOServiceGetMatchingService(0, IOServiceMatching("AppleSMC")), "smc-version", None, 0)) self.smc_version = "" else: # EFI & SMC ROM versions self.smc_version = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("AppleSMC")), "smc-version", None, 0)) raw_efi = str( IORegistryEntryCreateCFProperty( IORegistryEntryFromPath( 0, "IODeviceTree:/rom"), "version", None, 0)).replace( "\x00", "").split(".") self.efi_version = "%s.%s.%s" % ( raw_efi[0], raw_efi[2], raw_efi[3]) # Set the salt to be the MAC address of the system, using the MAC as a salt in this manner # helps ensure that the hashed sysuuid is pseudonymous. We don't want to know the sysuuid's # value, but we do want it to be unique however. The Salt value is # never submitted to the API salt = hex(getnode()) sys_uuid = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "IOPlatformUUID", None, 0)).replace( "\x00", "") self.h_sys_uuid = hashlib.sha256(salt + sys_uuid).hexdigest() # Get the Board-ID, this is how EFI files are matched to running # hardware - Nastee self.board_id = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "board-id", None, 0)).replace( "\x00", "") # Get OS version self.os_version = commands.getoutput("sw_vers -productVersion") # Get build number self.build_num = commands.getoutput("sw_vers -buildVersion") # Carve out the major version as we use this a bunch # self.os_maj_ver = ".".join(self.os_version.split(".")[:2]) # Add gathered info to the dictionary to query the API with self.endpoints_to_check["127.0.0.1"] = { "hashed_uuid": self.h_sys_uuid, "hw_ver": self.hw_version, "rom_ver": self.efi_version, "smc_ver": self.smc_version, "board_id": self.board_id, "os_ver": self.os_version, "build_num": self.build_num} return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()", "def _get_release_infos():\n \n # support RHEL or CentOS, we don't care about the rest...\n with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True):\n infos = run('cat /etc/redhat-release')\n \n m = _lsb_release_version.match(infos)\n if m is not None:\n return tuple(m.groups())\n else:\n abort('OS not supported.')", "def software_versions():\n\n quiet = 1\n versions = collections.OrderedDict()\n for package in ['python', 'python3', 'robot', 'firefox', 'google-chrome']:\n # Note: \"robot --version\" returns 0x00000000000000fb.\n # Note: If package does not exist, 0x7f is returned.\n rc, version = gc.shell_cmd(package + \" --version\",\n valid_rcs=[0, 0x7f, 0xfb])\n versions[package] = \"Not installed\" if rc == 0x7f else version.rstrip('\\n')\n\n versions.update(import_versions)\n\n for package in ['robotframework-angularjs', 'robotframework-scplibrary',\n 'robotframework-extendedselenium2library']:\n rc, version = gc.shell_cmd(\"pip3 show \" + package\n + \" | grep Version | sed -re 's/.*: //g'\")\n versions[package] = \"Not installed\" if not version else version.rstrip('\\n')\n\n rc, version = gc.shell_cmd(\"lsb_release -d -s\")\n versions[\"host OS\"] = \"Failed\" if not version else version.rstrip('\\n')\n return versions", "def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)", "def micro_Version(self):\n return tuple(map(ord, self._serial_io(b'\\x56', 2)[0:2]))", "def test_get_hyperflex_server_firmware_version_list(self):\n pass", "def get_msi_versions(vm_address):\n content = \"\"\"\nimport _winreg, sys\nversions = {}\nfor base in [\n \"SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Uninstall\",\n \"SOFTWARE\\\\Wow6432Node\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Uninstall\"]:\n try:\n uninstall = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, base)\n except WindowsError:\n continue \n try:\n i = 0\n while 1:\n sub = _winreg.EnumKey(uninstall, i)\n subk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, base+'\\\\'+sub)\n j = 0\n progname = version = None\n try:\n while 1:\n name, value, _ = _winreg.EnumValue(subk, j)\n if name == 'DisplayName':\n progname = value\n if name == 'DisplayVersion':\n version = value\n #print >>sys.stderr, i,j, sub, 'entry', name, value\n j += 1\n except WindowsError:\n pass\n if progname:\n versions[progname] = version\n i += 1\n except WindowsError:\n pass\nprint versions\n\"\"\".replace('\\\\', '\\\\\\\\')\n call_exec_daemon('createFile', ['C:\\\\list_installed_programs.py', content],\n host=vm_address)\n try:\n versions = eval(run_via_exec_daemon(['C:\\\\list_installed_programs.py'],\n host=vm_address))\n print 'INSTALL_TOOLS: versions installed=', versions\n finally:\n call_exec_daemon('removeFile', ['C:\\\\list_installed_programs.py'], host=vm_address)\n pass\n return versions", "def check_fw_versions(self, sys_info, api_results):\n if not api_results.get(\"latest_efi_version\"):\n # Call the API to see what the latest version of EFI you are\n # expected to be running given OS ver and mac model\n api_results[\n self.current_endpoint][\"latest_efi_version\"] = self.__make_api_get(\n '/apple/latest_efi_firmware/%s/%s' %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\")))\n\n self.message(\"\\n\\tEFI firmware version check:\")\n\n # Validate response from API\n if self._validate_response(api_results[\"latest_efi_version\"]):\n # Valid response from API - now interpret it\n\n # This is kind messy but it's so as we can detect newer and older firmware and message accordingly rather than just looking for 'different' versions\n # the way that EFI versions are denoted by Apple makes this more of\n # a pain thatit really needs to be quite honestly\n api_efi_str = api_results[\"latest_efi_version\"][\"msg\"].split(\".\")\n my_efi_str = sys_info.get(\"rom_ver\").split(\".\")\n\n api_efi_ver = int(api_efi_str[1], 16)\n api_efi_build = int(api_efi_str[2].replace(\"B\", \"\"), 16)\n\n if all([x.isdigit() for x in my_efi_str]):\n # Newer EFI versions do not include a build number\n # or the Mac model code. The output will be something\n # like 256.0.0, whereas with the old format it would\n # be MBP133.0256.B00.\n my_efi_ver = int(my_efi_str[0], 16)\n my_efi_build = 0\n else:\n my_efi_ver = int(my_efi_str[1], 16)\n my_efi_build = int(my_efi_str[2].replace(\"B\", \"\"), 16)\n\n if api_efi_str == my_efi_str:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n elif my_efi_ver == api_efi_ver and my_efi_build == api_efi_build:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n\n elif (my_efi_ver > api_efi_ver) or (my_efi_ver > api_efi_ver and my_efi_build > api_efi_build) or (my_efi_ver == api_efi_ver and my_efi_build > api_efi_build):\n # Looks like you're running a beta or a dev build - pretty much\n # all bets are off here as the dataset doens't cover dev builds\n # but a nicer message makes sense\n self.message(\n \"\\t\\t[!] ATTENTION - It looks like your EFI version (%s) is NEWER than the latest production release that is in the dataset (%s). This is most likely because you are now, or have in the past, installed a developer preview OS and as part of that you also had newer EFI firmware installed. The EFIgy API currently only has reliable data for production OS releases.\" %\n (sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))\n\n else:\n self.message(\n \"\\t\\t[-] ATTENTION - You are running an unexpected firmware version given the model of your system (%s) and OS build you have installed (%s). Your firmware is %s, the firmware we expected to see is %s.\\n\" %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))", "def get_zhinst_firmware_versions(zi_instruments=None):\n if zi_instruments is None:\n zi_instruments = get_all_connected_zi_instruments()\n\n versions, exceptions = {}, {}\n for node in ['system/fwrevision', 'system/fpgarevision']:\n versions[node] = {}\n for dev in zi_instruments:\n try:\n versions[node][f'{dev.name} - {dev.devname}'] = dev.geti(node)\n except Exception:\n try:\n # for QCodes-based devices\n versions[node][f'{dev.name} - {dev.devname}'] = \\\n dev.daq.getInt(f'{dev.devname}/system/fwrevision')\n except Exception as e:\n exceptions[f'{node} for {dev.devname}'] = e\n return versions, exceptions", "def describe_operating_systems():\n pass", "def __getSuSEVersion(self):\n linuxVendor = \"SuSE\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"grep 'VERSION' /etc/SuSE-release | cut -d= -f2 | tr -d ' \\n'\")\n return linuxVendor.strip(), linuxRelease.strip()", "def get_os_details(self, result, host):\n if \"osmatch\" in result['scan'][host] and len(result['scan'][host][\"osmatch\"]) > 0:\n name = result['scan'][host][\"osmatch\"][0][\"name\"]\n os_family = result['scan'][host][\"osmatch\"][0][\"osclass\"][0][\"osfamily\"]\n os_gen = result['scan'][host][\"osmatch\"][0][\"osclass\"][0][\"osgen\"]\n return [name, os_family, os_gen]\n elif \"osclass\" in result['scan'][host]:\n name = result['scan'][host]['osclass']['vendor']\n os_family = result['scan'][host]['osclass']['osfamily']\n os_gen = result['scan'][host]['osclass']['osgen']\n return [name, os_family, os_gen]\n else:\n return [\"\", \"\", \"\"]", "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def get_version_and_model_spread(devices):\n if isinstance(devices[0], jss.Computer):\n os_type_search = \"hardware/os_name\"\n os_type = \"Mac OS X\"\n os_version_search = \"hardware/os_version\"\n model_search = \"hardware/model\"\n model_identifier_search = \"hardware/model_identifier\"\n else:\n os_type_search = \"general/os_type\"\n os_type = \"iOS\"\n os_version_search = \"general/os_version\"\n model_search = \"general/model\"\n model_identifier_search = \"general/model_identifier\"\n versions, models = [], []\n\n for device in devices:\n if device.findtext(os_type_search) == os_type:\n versions.append(device.findtext(os_version_search) or\n \"No Version Inventoried\")\n models.append(\"%s / %s\" % (\n device.findtext(model_search) or \"No Model\",\n device.findtext(model_identifier_search,) or\n \"No Model Identifier\"))\n version_counts = Counter(versions)\n # Standardize version number format.\n version_counts = fix_version_counts(version_counts)\n model_counts = Counter(models)\n\n total = len(devices)\n\n # Report on OS version spread\n strings = sorted(get_histogram_strings(version_counts, padding=8))\n version_metadata = {\"%s Version Histogram (%s)\" % (os_type, total):\n strings}\n\n # Report on Model Spread\n # Compare on the model identifier since it is an easy numerical\n # sort.\n strings = sorted(get_histogram_strings(model_counts, padding=8),\n cmp=model_identifier_cmp)\n model_metadata = {\"Hardware Model Histogram (%s)\" % total: strings}\n\n return (version_metadata, model_metadata)", "def _get_ilo_firmware_version(self):\n\n manager, reset_uri = self._get_ilo_details()\n ilo_firmware_version = manager['Firmware']['Current']['VersionString']\n return {'ilo_firmware_version': ilo_firmware_version}", "def get_hardware_revision():\n return _pigpio_command(_control, _PI_CMD_HWVER, 0, 0)", "def get_os_version(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']\n return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n for disk in compute.instances().get(instance=instance.name,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n if not disk.get('boot'):\n continue\n for value in disk.get('licenses', []):\n if '1604' in value:\n return '16.04'\n if '1404' in value:\n return '14.04'\n return '14.04'\n return '14.04'", "def _GetSystemVersion(self, component, info):\n # Check if we are on mario, then we need to use the legacy parser\n if self.ChromeOSBoard() == 'x86-mario':\n return self._GetSystemVersionMario(component, info)\n items = info.strip().splitlines()\n # This is going to give us a list of lines, we are looking for the\n # following ones:\n # BIOS version: board.xx.xx.xxx.xxx.xx\n # EC version: foobar\n for line in items:\n line_components = line.split(':')\n # The line we are looking for has at least 2 items\n if len(line_components) >= 2 and line_components[0] == component:\n return line_components[1].strip()\n self.fail('Could not locate the following item %s in the return value '\n 'of chromeos-firmwareupdate.' % component)", "def mac_ver(release='', versioninfo=('', '', ''), machine=''):\n\n # First try reading the information from an XML file which should\n # always be present\n info = _mac_ver_xml()\n if info is not None:\n return info\n\n # If that also doesn't work return the default values\n return release, versioninfo, machine", "def init_linuxVersion(self):\n releaseDic = collections.OrderedDict() # 排序的字典\n releaseDic['/etc/oracle-release'] = self.__getOracleVersion\n releaseDic['/etc/redhat-release'] = self.__getRedhatVersion\n releaseDic['/etc/debian_version'] = self.__getDebianVersion\n releaseDic['/etc/SuSE-release'] = self.__getSuSEVersion\n # for releaseFilePath in releaseDic.keys():\n # print(releaseFilePath)\n #\n # releaseDic = {'/etc/oracle-release': self.__getOracleVersion,\n # '/etc/redhat-release': self.__getRedhatVersion,\n # '/etc/debian_version': self.__getDebianVersion,\n # '/etc/SuSE-release': self.__getSuSEVersion}\n for releaseFilePath in releaseDic.keys():\n ret, resultErr = self.ksp_ssh.ssh_execute_command(\n '[[ -f %s ]] && echo \"exist\" || echo \"not exist\"' % releaseFilePath)\n if 'not' in ret:\n continue\n else:\n return releaseDic.get(releaseFilePath, self.__getNullVersion)()\n return \"unknownVendor\", \"unknownRelease\"", "def __getRedhatVersion(self):\n result, resultErr = self.ksp_ssh.ssh_execute_command('cat /etc/redhat-release')\n if \"Red\" in result:\n linuxVendor = \"RedHat\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/redhat-release | sed 's/^Red Hat Enterprise Linux.* release /EL/' | sed 's/[ .].*//'\")\n elif \"CentOS\" in result:\n linuxVendor = \"CentOS\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/os-release | grep -w \\\"VERSION\\\"| sed 's/VERSION=\\\"/EL/' | sed 's/[ .].*//'\")\n elif \"Cloud\" in result:\n linuxVendor = \"CloudLinux\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/redhat-release | sed 's/^CloudLinux.*release //' | sed 's/[ .].*//'\")\n else:\n linuxVendor = \"unknownVendor\"\n linuxRelease = \"unknownRelease\"\n return linuxVendor.strip(), linuxRelease.strip()", "def task_get_info(task):\n logger = logging.getLogger(__name__)\n logger.debug('Get JunOS firmware version')\n result = list()\n out = task.run(task=netmiko_send_command,\n command_string=\"show version\", use_textfsm=True)\n# print_result(out)\n if out.failed:\n for host in out.failed_hosts.keys():\n logger.warning(f'Failed task on device {host}')\n task.inventory.hosts[host]['error'] = True\n for host, res in out.items():\n if not res.failed:\n logger.debug(f'Fill JunOS properties {host}')\n task.inventory.hosts[host]['error'] = False\n# with open('output/qtech_show_version.txt','w+') as f:\n# f.write(r.result)\n result.append(parse_info(host, res.result))\n return result", "def get_os_version(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetOsVersion', self.handle)", "def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]", "def get_firmware_version(self):\n fw_version = {\n \"BIOS\": self._api_helper.read_txt_file(BIOS_VER_PATH),\n \"BMC\": self.__get_bmc_ver(),\n \"SWITCH_CPLD1\": self.__get_cpld_ver(SW_CPLD1_VER_PATH),\n \"SWITCH_CPLD2\": self.__get_cpld_ver(SW_CPLD2_VER_PATH),\n }.get(self.name, \"Unknown\")\n\n return fw_version", "def parse_os_info(self):\n pipe = subprocess.Popen([self.core_exe, '-o'], 0, None, None, subprocess.PIPE)\n lines = pipe.stdout.readlines()\n x = 0\n json_str = ''\n while x < len(lines):\n json_str += lines[x].decode('utf-8').strip()\n x += 1\n decoder = json.decoder.JSONDecoder()\n decoder.strict = False\n self.os_info = decoder.decode(json_str)\n return self.os_info", "def installedVersion():\n\n cmd = f'{dcm2niix()} -h'\n versionPattern = re.compile(r'v'\n r'(?P<major>[0-9]+)\\.'\n r'(?P<minor>[0-9]+)\\.'\n r'(?P<year>[0-9]{4})'\n r'(?P<month>[0-9]{2})'\n r'(?P<day>[0-9]{2})')\n\n try:\n output = sp.check_output(cmd.split()).decode()\n output = [l for l in output.split('\\n') if 'version' in l.lower()]\n output = '\\n'.join(output).split()\n\n for word in output:\n\n match = re.match(versionPattern, word)\n\n if match is not None:\n return (int(match.group('major')),\n int(match.group('minor')),\n int(match.group('year')),\n int(match.group('month')),\n int(match.group('day')))\n\n except Exception as e:\n log.debug(f'Error parsing dcm2niix version string: {e}')\n return None", "def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}", "def read_fw_version(self):\n\n # This function expects the firmware version to be in a line\n # prefixed with 'Product Extra'.\n # At the moment, it takes the form:\n # Product Extra : MCH FW V2.18.8 Final (r14042) (Mar 31 2017 - 11:29)\n # The following two parts will be extracted:\n # mch_fw_ver: V2.18.8 Final\n # mch_fw_date: Mar 31 2017 - 11:29\n # If NAT change the format, then this function will need to be updated\n\n pattern = \".*: MCH FW (.*) \\(.*\\) \\((.*)\\)\"\n\n for mch in range(1,3):\n try:\n result = self.mch_comms.call_ipmitool_command([\"fru\", \"print\", str(mch + MCH_FRU_ID_OFFSET)])\n\n for line in result.splitlines():\n if FW_TAG in line:\n match = re.match(pattern, line)\n if match:\n self.mch_fw_ver[mch] = match.group(1)\n self.mch_fw_date[mch] = match.group(2)\n else:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except CalledProcessError as e:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except TimeoutExpired as e:\n print(\"read_fw_version: caught TimeoutExpired exception: {}\".format(e))", "def get_osversion(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetOSVersion', self.handle)", "def uuid_table():\n device_table = popen(\"blkid\").read().splitlines()\n devices = {}\n for device in device_table:\n dev = device.split(\":\")[0].split(\"/\")[2]\n uuid = device.split('UUID=\"')[1].split('\"')[0]\n devices[dev] = uuid\n return devices", "def get_version_info():\n out = \"\\nmpsyt version : %s \" % __version__\n out += \"\\n notes : %s\" % __notes__\n out += \"\\npafy version : %s\" % pafy.__version__\n out += \"\\nPython version : %s\" % sys.version\n out += \"\\nProcessor : %s\" % platform.processor()\n out += \"\\nMachine type : %s\" % platform.machine()\n out += \"\\nArchitecture : %s, %s\" % platform.architecture()\n out += \"\\nPlatform : %s\" % platform.platform()\n out += \"\\nsys.stdout.enc : %s\" % sys.stdout.encoding\n out += \"\\ndefault enc : %s\" % sys.getdefaultencoding()\n out += \"\\nConfig dir : %s\" % get_config_dir()\n envs = \"TERM SHELL LANG LANGUAGE\".split()\n\n for env in envs:\n value = os.environ.get(env)\n out += \"\\nenv:%-11s: %s\" % (env, value) if value else \"\"\n\n return out", "def get_lvfs_detached_signature():\n url = \"https://cdn.fwupd.org/downloads/firmware.xml.gz.asc\"\n ua_string = \"fwupd/1.4.1\"\n r = requests.get(url, headers={\"User-Agent\": ua_string})\n return r.text", "def get_system_spec():\n import pkg_resources\n import platform\n\n if sys.platform == 'darwin':\n system_info = 'macOS {} {}'.format(\n platform.mac_ver()[0],\n platform.architecture()[0],\n )\n else:\n system_info = '{} {} {} {}'.format(\n platform.system(),\n '_'.join(platform.architecture()),\n platform.release(),\n platform.machine(),\n )\n\n system_spec = dict(\n raiden=pkg_resources.require(raiden.__name__)[0].version,\n python_implementation=platform.python_implementation(),\n python_version=platform.python_version(),\n system=system_info,\n )\n return system_spec", "def guest_os_features(self) -> Sequence['outputs.GuestOsFeatureResponse']:\n return pulumi.get(self, \"guest_os_features\")", "def load_os_version_dictionary(self):\n version_dict = {}\n rows = self.cursor.execute(\"SELECT * FROM os_version\")\n for row in rows:\n version_id = int(row[\"id\"])\n version_name = row[\"name\"]\n version_dict[version_id] = version_name\n return version_dict", "def _collect_sonic_os_and_platform_info(duthost, request):\n logger.info(\"Getting SONiC OS version and Testbed platform info.\")\n\n out = duthost.shell(\"cd {0} && show version\".format(DUT_WORKING_DIR))\n _parse_info(out['stdout'], request.config.option.sai_test_report_dir)", "def __getDebianVersion(self):\n ret, resultErr = self.ksp_ssh.ssh_execute_command(\n '[[ -f /etc/lsb-release ]] && echo \"exist\" || echo \"not exist\"')\n if 'not' in ret:\n linuxVendor = \"Debian\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\"awk -F. '{print $1}' /etc/debian_version\")\n else:\n linuxVendor, resultErr = self.ksp_ssh.ssh_execute_command(\n \"grep 'DISTRIB_ID' /etc/lsb-release | cut -d= -f2 | tr -d ' \\n'\")\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"grep 'DISTRIB_RELEASE' /etc/lsb-release | cut -d= -f2 | tr -d ' \\n'\")\n\n return linuxVendor.strip(), linuxRelease.strip()", "def test_get_simulator_udids_by_platform_and_version(self, _, _2):\n self.assertEqual(['A4E66321-177A-450A-9BA1-488D85B7278E'],\n iossim_util.get_simulator_udids_by_platform_and_version(\n 'iPhone 11', '13.2.2'))", "def get_os_version(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsVersion', self.handle)", "def vendor_list():\n return ['nxos', 'eos', 'cumulus']", "def version(self):\n done, data = self._request('GV')\n if done:\n return {\n 'firmware': data[0],\n 'protocol': data[1]\n }\n\n raise EvseError", "def get_version_info(self):\n\n try:\n nt_header = self.get_nt_header()\n except ValueError, ve:\n return obj.NoneObject(\"PE file failed initial sanity checks: {0}\".format(ve))\n\n try:\n unsafe = self.obj_vm.get_config().UNSAFE\n except AttributeError:\n unsafe = False\n\n for sect in nt_header.get_sections(unsafe):\n if str(sect.Name) == '.rsrc':\n root = obj.Object(\"_IMAGE_RESOURCE_DIRECTORY\", self.obj_offset + sect.VirtualAddress, self.obj_vm)\n for rname, rentry, rdata in root.get_entries():\n # We're a VERSION resource and we have subelements\n if rname == resource_types['RT_VERSION'] and rentry:\n for sname, sentry, sdata in rdata.get_entries():\n # We're the single sub element of the VERSION\n if sname == 1 and sentry:\n # Get the string tables\n for _stname, stentry, stdata in sdata.get_entries():\n if not stentry:\n return obj.Object(\"_VS_VERSION_INFO\", offset = (stdata.DataOffset + self.obj_offset), vm = self.obj_vm)\n\n return obj.NoneObject(\"Cannot find a _VS_VERSION_INFO structure\")", "def get_version(self):\n verxml = self._ncc.nxoscli('show version')\n self.logger.debug(verxml)\n verparsed = _begin_parse(verxml)\n sysmgrclischema = parse_get_nsmap(verparsed)\n self.logger.debug(\"NSMAP: {}\".format(sysmgrclischema))\n showversion = find_element(['sys_ver_str', 'chassis_id', 'host_name', 'loader_ver_str'], sysmgrclischema,\n verparsed)\n self.logger.debug(str(showversion))\n self.hostname = showversion['host_name']\n self.chassis_id = showversion['chassis_id']\n self.system_version = showversion['sys_ver_str']", "def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos", "def systemversionstr():\n return platform.uname().system", "def get_device_file_dict():\n cmd = 'lshw -class disk'\n desc = \"description\"\n log_name = \"logical name\"\n serial = \"serial\"\n\n dev = []\n dev_list = []\n\n ret, output, err = run_gluster_command(cmd)\n output = output.decode('ASCII')\n dev_info = output.split('\\n')\n for line in dev_info:\n if re.search(desc, line):\n if dev:\n dev_list.append(dev)\n\n dev = []\n if re.search(log_name, line) or re.search(serial, line):\n temp = line.split(':')\n temp[1] = temp[1].strip(' ')\n dev.append(temp[1])\n dev_list.append(dev)\n for line in dev_list:\n print(line)", "def get_ver(self, bootdefault):\n module = 'version/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n installedver = response.json()['version']['oper'][bootdefault]\n print(self.device + ' The version currently installed on ' + bootdefault + ' is: ' + installedver)", "def get_host_os_version(self):\n\t\treturn call_sdk_function('PrlLoginResponse_GetHostOsVersion', self.handle)", "def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]", "async def get_system_info(hass, include_components):\n\n gate_id = hass.states.get('sensor.ais_secure_android_id_dom').state\n info_object = {\n 'arch': platform.machine(),\n 'dev': 'dev' in current_version,\n 'docker': False,\n 'os_name': platform.system(),\n 'python_version': platform.python_version(),\n 'timezone': dt_util.DEFAULT_TIME_ZONE.zone,\n 'version': current_version,\n 'virtualenv': os.environ.get('VIRTUAL_ENV') is not None,\n 'hassio': hass.components.hassio.is_hassio(),\n 'gate_id': gate_id,\n }\n\n if include_components:\n info_object['components'] = list(hass.config.components)\n\n if platform.system() == 'Windows':\n info_object['os_version'] = platform.win32_ver()[0]\n elif platform.system() == 'Darwin':\n info_object['os_version'] = platform.mac_ver()[0]\n elif platform.system() == 'FreeBSD':\n info_object['os_version'] = platform.release()\n elif platform.system() == 'Linux':\n import distro\n linux_dist = await hass.async_add_job(\n distro.linux_distribution, False)\n info_object['distribution'] = linux_dist[0]\n info_object['os_version'] = linux_dist[1]\n info_object['docker'] = os.path.isfile('/.dockerenv')\n\n return info_object", "def get_idn(self):\n # not all IVVI racks support the version command, so return a dummy\n return -1\n\n idparts = ['QuTech', 'IVVI', 'None', self.version()]\n\n return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))", "def driver_version(self):\n data = fcntl.ioctl(self._fd, _EVIOCGVERSION, '\\x00\\x00\\x00\\x00')\n return struct.unpack(\"i\", data)[0]", "def get_version_info() -> Tuple[Text, Text]:", "def find_os_version(self):\n if self.osversion_id is not None:\n ItopapiPrototype.get_itop_class('OSVersion').find(self.osfamily_id)\n return None", "def device_info(self) -> Dict[str, Any]:\n agreement = self.toon.agreement\n model = agreement.display_hardware_version.rpartition('/')[0]\n sw_version = agreement.display_software_version.rpartition('/')[-1]\n return {\n 'identifiers': {\n (DOMAIN, agreement.id),\n },\n 'name': 'Toon Display',\n 'manufacturer': 'Eneco',\n 'model': model,\n 'sw_version': sw_version,\n }", "def test_get_bios_boot_mode_list(self):\n pass", "def get_platform_und_symbols():\n ret = None\n if osname_is_freebsd():\n ret = sorted([\"environ\", \"__progname\"])\n if is_verbose():\n print(\"Checking for required UND symbols... \" + str(ret))\n return ret", "def get_info():\n\n #Determine if running on Linux or Mac.\n if platform.system() == 'Linux':\n linux = True\n\n elif platform.system() == \"Darwin\":\n linux = False\n\n if linux:\n from . import linux\n linux.get_info()\n diskinfo = linux.DISKINFO\n\n else:\n from . import macos\n macos.get_info()\n diskinfo = macos.DISKINFO\n\n return diskinfo", "def version(serial, udp):\n\n if udp:\n solo.fido2.force_udp_backend()\n\n try:\n major, minor, patch = solo.client.find(serial).solo_version()\n print(f\"{major}.{minor}.{patch}\")\n except solo.exceptions.NoSoloFoundError:\n print(\"No Solo found.\")\n print(\"If you are on Linux, are your udev rules up to date?\")\n except (solo.exceptions.NoSoloFoundError, ApduError):\n # Older\n print(\"Firmware is out of date (key does not know the SOLO_VERSION command).\")", "def version(serial, udp):\n\n if udp:\n solo.fido2.force_udp_backend()\n\n try:\n major, minor, patch = solo.client.find(serial).solo_version()\n print(f\"{major}.{minor}.{patch}\")\n except solo.exceptions.NoSoloFoundError:\n print(\"No Solo found.\")\n print(\"If you are on Linux, are your udev rules up to date?\")\n except (solo.exceptions.NoSoloFoundError, ApduError):\n # Older\n print(\"Firmware is out of date (key does not know the SOLO_VERSION command).\")", "def get_firmware_version(self):\n request_command = self.parser_invoker.get_firmware_version_command_bytes(self.sequence_id, self.product_id)\n response_command_content = self.connectObj.send_receive_command(request_command)\n return response_command_content", "def system_info():\n requirements = get_requirements(\"sunpy\")\n groups = get_keys_list(requirements)\n extra_groups = get_extra_groups(groups, ['all', 'dev'])\n base_reqs = get_keys_list(requirements['required'])\n extra_reqs = get_keys_list(requirements['all'])\n missing_packages, installed_packages = find_dependencies(package=\"sunpy\", extras=extra_groups)\n extra_prop = {\"System\": platform.system(),\n \"Arch\": f\"{platform.architecture()[0]}, ({platform.processor()})\",\n \"Python\": platform.python_version(),\n \"sunpy\": version(\"sunpy\")}\n sys_prop = {**installed_packages, **missing_packages, **extra_prop}\n print(\"==============================\")\n print(\"sunpy Installation Information\")\n print(\"==============================\")\n print()\n print(\"General\")\n print(\"#######\")\n if sys_prop['System'] == \"Linux\":\n print(f\"OS: {distro.name()} ({distro.version()}, Linux {platform.release()})\")\n elif sys_prop['System'] == \"Darwin\":\n print(f\"OS: Mac OS {platform.mac_ver()[0]}\")\n elif sys_prop['System'] == \"Windows\":\n print(f\"OS: Windows {platform.release()} {platform.version()}\")\n else:\n print(\"Unknown OS\")\n for sys_info in ['Arch', 'sunpy']:\n print(f'{sys_info}: {sys_prop[sys_info]}')\n print(f'Installation path: {distribution(\"sunpy\")._path}')\n print()\n print(\"Required Dependencies\")\n print(\"#####################\")\n for req in base_reqs:\n print(f'{req}: {sys_prop[req]}')\n print()\n print(\"Optional Dependencies\")\n print(\"#####################\")\n for extra_req in extra_reqs:\n print(f'{extra_req}: {sys_prop[extra_req]}')", "def _get_info_about_sensor(self):\n reg_id = 0xD0\n chip_id, chip_version = self.bus.read_i2c_block_data(self.address,\n reg_id,\n 2)\n return chip_id, chip_version", "def _get_system_info(target: Optional[str],\n serial_num: Optional[str]) -> Tuple[str, str]:\n\n # TODO(b/242191374): Remove when devices in swarming are no longer booted\n # into zedboot.\n if running_unattended():\n try:\n boot_device(target, BootMode.REGULAR, serial_num)\n except (subprocess.CalledProcessError, StateTransitionError):\n logging.warning('Could not boot device. Assuming in ZEDBOOT')\n return ('', '')\n wait_cmd = common.run_ffx_command(cmd=('target', 'wait', '-t', '180'),\n target_id=target,\n check=False)\n if wait_cmd.returncode != 0:\n return ('', '')\n\n return get_system_info(target)", "def remote_info():\n run('uname -a')", "def osversion():\n return platform()", "def __getOracleVersion(self):\n linuxVendor = \"Oracle\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/oracle-release | sed 's/^Oracle Linux Server release /OL/' | sed 's/[ .].*//' \") # El8\n return linuxVendor.strip(), linuxRelease.strip() # strip()删除开头结尾的空格", "def _syscmd_ver(system='', release='', version='',\n\n supported_platforms=('win32', 'win16', 'dos')):\n if sys.platform not in supported_platforms:\n return system, release, version\n\n # Try some common cmd strings\n import subprocess\n for cmd in ('ver', 'command /c ver', 'cmd /c ver'):\n try:\n info = subprocess.check_output(cmd,\n stdin=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n text=True,\n encoding=\"locale\",\n shell=True)\n except (OSError, subprocess.CalledProcessError) as why:\n #print('Command %s failed: %s' % (cmd, why))\n continue\n else:\n break\n else:\n return system, release, version\n\n # Parse the output\n info = info.strip()\n m = _ver_output.match(info)\n if m is not None:\n system, release, version = m.groups()\n # Strip trailing dots from version and release\n if release[-1] == '.':\n release = release[:-1]\n if version[-1] == '.':\n version = version[:-1]\n # Normalize the version and build strings (eliminating additional\n # zeros)\n version = _norm_version(version)\n return system, release, version", "def get_devices_lsscsi(self):\n\n try:\n message = \"Find SCSI Devices\"\n if self._include_enclosures:\n command = \"lsscsi --generic --transport | egrep 'disk|0x14|enclo'\"\n else:\n command = \"lsscsi --generic --transport | fgrep 'disk|0x14'\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n #\n # Format:\n # $ lsscsi --generic --transport\n # [0] [1] [2] [3] [4]\n # [0:0:0:0] disk sas:0x5000cca25103b471 /dev/sda /dev/sg0 \n # [0:0:1:0] disk sas:0x5000cca251029301 /dev/sdb /dev/sg1 \n # ...\n # [0:0:14:0] enclosu sas:0x5001636001caa0bd - /dev/sg14\n # [7:0:0:0] cd/dvd usb: 1-1.3:1.2 /dev/sr0 /dev/sg15\n #\n # Special Case:\n # Handle lines without a transport (spaces only). (screen scrapping danger)\n # [0:0:10:0] enclosu sas:0x50030480091d71fd - /dev/sg10\n # [1:0:0:0] disk <spaces> /dev/sdk /dev/sg11 <- INTEL disk!\n #\n # Another SNAFU! (and why I hate screen scrapping!!!)\n # [15:0:53597:0]disk sas:0x5000cca23b359649 /dev/sdg /dev/sg6 \n # [15:0:53598:0]disk sas:0x5000cca23b0c0a99 /dev/sdh /dev/sg7 \n # [15:0:53599:0]disk sas:0x5000cca23b0b7531 /dev/sdi /dev/sg8 \n # ...\n # [15:0:53686:0]enclosu sas:0x5000ccab040001bc - /dev/sg165\n # [15:0:53766:0]enclosu sas:0x5000ccab040001fc - /dev/sg144\n #\n # Evidently, the author of lsscsi did not think of consistent output! ;(\n #\n for line in pdata['stdout'].splitlines():\n dinfo = line.split()\n device = dict()\n if len(dinfo) < 5:\n m = re.search('(?P<device>disk|\\(0x14\\)|enclosu)', dinfo[0])\n if m:\n device['Device Type'] = m.group('device')\n sas_index = 1\n dev_index = 2\n sg_index = 3\n else:\n continue\n else:\n device['Device Type'] = dinfo[1]\n sas_index = 2\n dev_index = 3\n sg_index = 4\n\n # lsscsi does not understand 'Host Managed' device type.\n if '0x14' in device['Device Type']:\n device['Device Type'] = 'disk'\n\n # Parse remaining information.\n if 'sas:' in dinfo[sas_index]:\n device['SAS Address'] = dinfo[sas_index][4:]\n self._sas_addresses += 1\n else:\n device['SAS Address'] = \"\"\n\n # Note: Enclosure has no driver, so reports '-' for name.\n if '/dev/' in dinfo[dev_index]:\n if self._drives and not dinfo[dev_index] in self._drives:\n continue\n if self._exclude and dinfo[dev_index] in self._exclude:\n continue\n device['Linux Device Name'] = dinfo[dev_index]\n else:\n device['Linux Device Name'] = \"\"\n if '/dev/sg' in dinfo[sg_index]:\n device['SCSI Device Name'] = dinfo[sg_index]\n else:\n device['SCSI Device Name'] = \"\"\n\n self._devices.append(device)\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI devices: {0}\".format(exc))\n raise exc", "def _dmi_methods(dmi):\n product_uuid = dmi.get('product_uuid')\n chassis_asset_tag = dmi.get('chassis_asset_tag')\n chassis_serial = dmi.get('chassis_serial')\n board_asset_tag = dmi.get('board_asset_tag')\n board_serial = dmi.get('board_serial')\n\n if product_uuid:\n LOG.debug('Generating mercury ID using product_uuid: %s' % product_uuid)\n return _build_hash(product_uuid, META_TYPE_PRODUCT_UUID)\n\n if DMI_DISQUALIFIED_STRING in [chassis_asset_tag, chassis_serial,\n board_asset_tag, board_serial]:\n LOG.debug('Junk in DMI tables: \\'%s\\'' % DMI_DISQUALIFIED_STRING)\n return\n\n if chassis_asset_tag and chassis_serial:\n LOG.debug('Generating mercury ID using chassis asset information: tag=%s, asset=%s' % (\n chassis_asset_tag, chassis_serial))\n return _build_hash(chassis_asset_tag + chassis_serial, META_TYPE_CHASSIS_ASSET_SERIAL)\n\n if board_asset_tag and board_serial:\n LOG.debug('Generating mercury ID using board asset information: tag=%s, asset=%s' % (\n board_asset_tag, board_serial))\n return _build_hash(board_asset_tag + board_serial, META_TYPE_BOARD_ASSET_SERIAL)", "async def get_firmware_version(self):\n current_time = time.time()\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n elapsed_time = time.time()\n if elapsed_time - current_time > 2:\n return None\n await asyncio.sleep(self.sleep_tune)\n reply = ''\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)", "def detect_installed_versions():\n versions=[]\n global detected_versions\n if is_windows:\n versions = detect_installed_versions_windows()\n if not versions or len(versions) == 0:\n if is_win64:\n keyname = 'Software\\\\WoW6432Node\\\\Intel\\\\Compilers\\\\C++'\n else:\n keyname = 'Software\\\\Intel\\\\Compilers\\\\C++'\n try:\n k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,\n keyname)\n except WindowsError:\n return []\n i = 0\n versions = []\n try:\n while i < 100:\n subkey = SCons.Util.RegEnumKey(k, i) # raises EnvironmentError\n # Check that this refers to an existing dir.\n # This is not 100% perfect but should catch common\n # installation issues like when the compiler was installed\n # and then the install directory deleted or moved (rather\n # than uninstalling properly), so the registry values\n # are still there.\n ok = False\n for try_abi in ('IA32', 'IA32e', 'IA64', 'EM64T'):\n try:\n d = get_intel_registry_value('ProductDir', subkey, try_abi)\n except MissingRegistryError:\n continue # not found in reg, keep going\n if os.path.exists(d): ok = True\n if ok:\n versions.append(subkey)\n else:\n try:\n # Registry points to nonexistent dir. Ignore this\n # version.\n value = get_intel_registry_value('ProductDir', subkey, 'IA32')\n except MissingRegistryError, e:\n\n # Registry key is left dangling (potentially\n # after uninstalling).\n\n print \\\n \"scons: *** Ignoring the registry key for the Intel compiler version %s.\\n\" \\\n \"scons: *** It seems that the compiler was uninstalled and that the registry\\n\" \\\n \"scons: *** was not cleaned up properly.\\n\" % subkey\n else:\n print \"scons: *** Ignoring \"+str(value)\n\n i = i + 1\n except EnvironmentError:\n # no more subkeys\n pass\n elif is_linux or is_mac:\n versions = detect_installed_versions_linux()\n if not versions or len(versions) == 0:\n for d in glob.glob('/opt/intel_cc_*'):\n # Typical dir here is /opt/intel_cc_80.\n m = re.search(r'cc_(.*)$', d)\n if m:\n versions.append(m.group(1))\n for d in glob.glob('/opt/intel/cc*/*'):\n # Typical dir here is /opt/intel/cc/9.0 for IA32,\n # /opt/intel/cce/9.0 for EMT64 (AMD64)\n m = re.search(r'([0-9][0-9.]*)$', d)\n if m:\n versions.append(m.group(1))\n for d in glob.glob('/opt/intel/Compiler/*'):\n # Typical dir here is /opt/intel/Compiler/11.1\n m = re.search(r'([0-9][0-9.]*)$', d)\n if m:\n versions.append(m.group(1))\n for d in glob.glob('/opt/intel/composerxe-*'):\n # Typical dir here is /opt/intel/composerxe-2011.4.184\n m = re.search(r'([0-9][0-9.]*)$', d)\n if m:\n versions.append(m.group(1))\n for d in glob.glob('/opt/intel/composer_xe_*'):\n # Typical dir here is /opt/intel/composer_xe_2011_sp1.11.344\n # The _sp1 is useless, the installers are named 2011.9.x, 2011.10.x, 2011.11.x\n m = re.search(r'([0-9]{0,4})(?:_sp\\d*)?\\.([0-9][0-9.]*)$', d)\n if m:\n versions.append(\"%s.%s\"%(m.group(1), m.group(2)))\n def keyfunc(str):\n \"\"\"Given a dot-separated version string, return a tuple of ints representing it.\"\"\"\n return [int(x) for x in str.split('.')]\n # split into ints, sort, then remove dups\n return sorted(SCons.Util.unique(versions), key=keyfunc, reverse=True)", "def get_device_info(platform_path: str):\n device_name = os.path.basename(platform_path)\n try:\n platform_file = next(\n glob.iglob(os.path.join(glob.escape(platform_path), 'hw', f'*.[xd]sa')))\n except StopIteration as e:\n raise ValueError('cannot find platform file for %s' % device_name) from e\n with zipfile.ZipFile(platform_file) as platform:\n # platform_file must end with .xsa or .dsa, thus [:-4]\n with platform.open(os.path.basename(platform_file)[:-4] +\n '.hpfm') as metadata:\n platform_info = ET.parse(metadata).find('./xd:component/xd:platformInfo',\n XILINX_XML_NS)\n if platform_info is None:\n raise ValueError('cannot parse platform')\n clock_period = platform_info.find(\n \"./xd:systemClocks/xd:clock/[@xd:id='0']\", XILINX_XML_NS)\n if clock_period is None:\n raise ValueError('cannot find clock period in platform')\n part_num = platform_info.find('xd:deviceInfo', XILINX_XML_NS)\n if part_num is None:\n raise ValueError('cannot find part number in platform')\n return {\n 'clock_period':\n clock_period.attrib['{{{xd}}}period'.format(**XILINX_XML_NS)],\n 'part_num':\n part_num.attrib['{{{xd}}}name'.format(**XILINX_XML_NS)]\n }", "def get_devices_summary():\n\n # This function was created to replace get_devices_information\n # because it wasn't detecting virtual systems in Palo Alto Virtual Systems\n global nipper_xml\n devices = {}\n headings = []\n\n # Add the table headings to a list\n for h in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/headings/heading\"):\n if h not in headings:\n headings.append(h.text)\n\n for device in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/tablebody/tablerow\"):\n values = []\n for i in device.findall('./tablecell/item'):\n if i not in values:\n values.append(i.text)\n if DEBUG:\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Name')], values[headings.index('Name')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Device')], values[headings.index('Device')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[0])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[1])\n devices[values[headings.index('Name')]] = {'name': values[headings.index('Name')],\n 'type': values[headings.index('Device')],\n 'os': values[headings.index('OS')].split(' ')[0],\n 'osversion': values[headings.index('OS')].split(' ')[1]\n }\n\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices", "def get_os_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_OS_INFO)", "def __getAamazonVersion(self):\n ret, resultErr = self.ksp_ssh.ssh_execute_command('cat /etc/system-release')\n linuxVendor = \"amzn\"\n # print(ret)\n if 'AMI' in ret:\n linuxRelease = '1'\n else:\n linuxRelease = '2'\n\n return linuxVendor.strip(), linuxRelease.strip()", "def get_machine_version():\n return get_file_content(\"/home/pi/.machineconfig/latest_version\")", "def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):\n # Import the needed APIs\n try:\n import java.lang\n except ImportError:\n return release, vendor, vminfo, osinfo\n\n vendor = _java_getprop('java.vendor', vendor)\n release = _java_getprop('java.version', release)\n vm_name, vm_release, vm_vendor = vminfo\n vm_name = _java_getprop('java.vm.name', vm_name)\n vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)\n vm_release = _java_getprop('java.vm.version', vm_release)\n vminfo = vm_name, vm_release, vm_vendor\n os_name, os_version, os_arch = osinfo\n os_arch = _java_getprop('java.os.arch', os_arch)\n os_name = _java_getprop('java.os.name', os_name)\n os_version = _java_getprop('java.os.version', os_version)\n osinfo = os_name, os_version, os_arch\n\n return release, vendor, vminfo, osinfo", "def show_versions():\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n\n print(\"\\nSystem:\")\n for k, stat in sys_info.items():\n print(\"{k:>10}: {stat}\".format(k=k, stat=stat))\n\n print(\"\\nPython dependencies:\")\n for k, stat in deps_info.items():\n print(\"{k:>13}: {stat}\".format(k=k, stat=stat))", "async def get_firmware_version(self):\n if self.debug:\n print(\"Sending GET_FIRMWARE_VERSION\")\n\n response = await self.call_function(_COMMAND_GETFIRMWAREVERSION)\n if response is None:\n raise RuntimeError('Failed to detect the PN532')\n return tuple(response)", "def getHostFsInfo(hostfs):\n pattern = re.compile('^([^\\.]+)\\.([^\\.]+)\\.([^\\.]+)-(([0-9]+\\.)+([0-9]+))\\.([^\\.]+)$')\n result = pattern.match(hostfs)\n if result is None:\n return None\n else:\n version = result.group(4)\n platform = result.group(1)\n cpu = result.group(2)\n endian = result.group(3)\n ext = result.group(7)\n return {\n 'name': hostfs,\n 'file': hostfs,\n 'filepath': hostfs,\n 'version': version,\n 'platform': platform,\n 'cpu': cpu,\n 'endian': endian,\n 'type': ext\n }", "def unsafe_get_stack_versions():\n stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)\n code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))\n versions = []\n if 0 == code:\n for line in out.splitlines():\n versions.append(line.rstrip('\\n'))\n return (code, out, versions)", "def get_versions():\n version_py = os.path.join(os.path.split(__file__)[0], \"src/osmium/version.py\")\n v = {}\n with open(version_py) as version_file:\n # Execute the code in version.py.\n exec(compile(version_file.read(), version_py, 'exec'), v)\n\n return v['pyosmium_release'], v['libosmium_version'], v['protozero_version']", "def getFirmwareVersion(self, *id_list):\n if id_list == ():#Empty list\n return -1\n elif len(id_list) == 1:#Just one ID.\n pkt = Packet.makeReadPacket(id_list[0],xl320.XL320_FIRMWARE_VERSION)\n else:\n pkt = Packet.makeSyncReadPacket(xl320.XL320_FIRMWARE_VERSION,id_list)\n\n ans,err_num,err_str = self.serial.sendPkt(pkt)\n if ans == []:#In case of an empty packet arrives\n return -2\n else:\n data = []\n for index,val in enumerate(id_list):\n #print (index,val)\n data.append(val) #Append the ID value\n data.append(ans[index*12+9])#Append the respective ID's data\n return data", "def get_sw_version():\n done = False\n if len(sys.argv) != 2:\n print(\"Give hostname of the device please!\")\n return\n in_host = sys.argv[1]\n #device_list = ret_device_list()\n token = get_auth_token() # Get Token\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device\"\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n for device in device_list['response']:\n if str(device['hostname']) != in_host:\n continue\n device_ip = device['managementIpAddress']\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device/ip-address/\" + device_ip\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n image_details = resp.json()\n sw_version = image_details['response']['softwareVersion']\n print(\"Host: \" + in_host + \" IP: \" + device_ip + \" software version: \" + sw_version + \"\\n\")\n\n # Now suggest the patches\n\n print(\"You need the following Patches: \") \n print(patches[sw_version])\n #pdb.set_trace()\n #page = requests.get('https://wwwin-ottawa.cisco.com/tfoggoa/Scrubber/showquery.html?query=tmondal-7')\n #processed_page = BeautifulSoup(page.content, 'html.parser') \n #page = requests.get('http://www.fabpedigree.com/james/mathmen.htm')\n #processed_page = BeautifulSoup(page.content, 'html.parser')\n #for td in processed_page.select('td'):\n # print(td.text)", "async def _report_firmware(self, sysex_data):\n # first byte after command is major number\n major = sysex_data[1]\n version_string = str(major)\n\n # next byte is minor number\n minor = sysex_data[2]\n\n # append a dot to major number\n version_string += '.'\n\n # append minor number\n version_string += str(minor)\n # add a space after the major and minor numbers\n version_string += ' '\n\n # slice the identifier - from the first byte after the minor\n # number up until, but not including the END_SYSEX byte\n\n name = sysex_data[3:-1]\n\n # convert the identifier to printable text and add each character\n # to the version string\n for e in name:\n version_string += chr(e)\n\n # store the value\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = version_string", "def VersionSelect(versions, flavor):\n\n if isinstance(flavor, tuple):\n ids = [versions[i] for i in flavor[1:]]\n return ','.join(ids)\n if toolchainbinaries.IsPnaclFlavor(flavor):\n return versions['PNACL_VERSION']\n if toolchainbinaries.IsX86Flavor(flavor):\n if toolchainbinaries.IsNotNaClNewlibFlavor(flavor):\n return versions['GLIBC_VERSION']\n else:\n return versions['NEWLIB_VERSION']\n if toolchainbinaries.IsArmTrustedFlavor(flavor):\n return versions['ARM_TRUSTED_VERSION']\n raise Exception('Unknown flavor \"%s\"' % flavor)", "def get_tasmota_version():\n matches = []\n with open(tasmotadir + \"/sonoff/sonoff_version.h\", \"r\") as f:\n for line in f:\n matches += findall('0x\\d+', line)\n if len(matches) == 0:\n raise Exception('No tasmota version found.')\n elif len(matches) == 1:\n return matches[0]\n else:\n raise IndexError('Too many tasmota versions found.')", "def usefulFunction():\n print(platform.uname()) #displayed this computer's specifications", "def get_os_match(host):\n for h in host:\n os_match = h.osmatch\n if os_match is not None:\n os_match = str(os_match)\n return os_match.split('\"')[1].split('\"')[0]\n else:\n return \"No OS version available.\"", "def show_versions():\n sys_info = _get_sys_info()\n versions = _get_autogluon_versions()\n sorted_keys = sorted(versions.keys(), key=lambda x: x.lower())\n\n maxlen = 0 if len(versions) == 0 else max(len(x) for x in versions)\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n for k, v in sys_info.items():\n print(f\"{k:<{maxlen}}: {v}\")\n print(\"\")\n for k in sorted_keys:\n print(f\"{k:<{maxlen}}: {versions[k]}\")", "def ex_get_hypervisor_sysinfo(self):\n xml = self.connection.getSysinfo()\n etree = ET.XML(xml)\n\n attributes = [\"bios\", \"system\", \"processor\", \"memory_device\"]\n\n sysinfo = {}\n for attribute in attributes:\n element = etree.find(attribute)\n entries = self._get_entries(element=element)\n sysinfo[attribute] = entries\n\n return sysinfo", "def get_kernel_version():\r\n try:\r\n return utils.run('uname -r').stdout.strip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def _extract_nos_version(self, data: str) -> None:\n if self.devtype == \"linux\":\n for line in data.splitlines():\n if line.startswith(\"VERSION_ID\"):\n self.version = line.split('=')[1] \\\n .strip().replace('\"', '')\n break\n else:\n self.version = \"all\"\n self.logger.error(\n f'Cannot parse version from {self.address}:{self.port}')", "def _GetSystemVersionMario(self, component, info):\n items = info.strip().splitlines()\n # This is going to give us a list of lines, we are looking for the\n # following ones:\n # BIOS image: <hash> <path>/board.xx.xx.xxx.xxx.xx\n # EC image: <hash> <path>/board_xxx\n\n # Convert the passed component string into the format of the mario\n if component == 'BIOS version':\n component = 'BIOS'\n if component == 'EC version':\n component = 'EC'\n for line in items:\n line_components = line.split()\n if len(line_components) >= 4 and line_components[0].strip() == component:\n return os.path.basename(line_components[3])\n self.fail('Could not locate the following item %s in the return value of '\n 'chromeos-firmwareupdate.' % component)", "def gather_metric(self):\n result = self._shell.run(self.FASTBOOT_COMMAND)\n # If '--version' flag isn't recognized, will print to stderr\n if result.stderr:\n version = self.FASTBOOT_ERROR_MESSAGE\n else:\n # The version is the last token on the first line\n version = result.stdout.splitlines()[0].split()[-1]\n\n response = {self.FASTBOOT_VERSION: version}\n return response", "def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value", "def get_devices(self):\n\n \"\"\"\n # Note: This code is no longer required with the latest spt updates.\n # But that said, leaving for now so I don't risk breaking folks!\n if not self._use_lsscsi:\n message = \"Find Number of IOM's\"\n command = \"lsscsi | fgrep enclo | egrep 'HGST|WDC' | wc -l\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n ioms = (int)(pdata['stdout'].strip())\n if ioms > 1:\n self._use_lsscsi = True\n if not self._use_lsscsi and os.path.exists('/etc/multipath.conf'):\n self._use_lsscsi = True\n \"\"\"\n # Allow above logic or options to override lsscsi vs. spt usage.\n if not self._use_lsscsi or self._force_spt:\n self.get_devices_spt()\n else:\n self.get_devices_lsscsi()\n return", "def extract_version_info():\n version = None\n if os.path.exists('.version'):\n with open('.version') as f:\n line = f.read().rstrip()\n log.info('.version contains \"%s\"', line)\n if line.startswith('openafs-'):\n # Extract version from the git tag name.\n version = re.sub('openafs-[^-]*-', '', line).replace('_', '.')\n elif line.startswith('BP-'):\n # Branch point tags do not contain the version number.\n log.info('.version file has old branch point tag name.')\n else:\n # Use the given version string.\n version = line\n if not version:\n # Unable to lookup version from the .version file, try to extract the\n # version from the source directory name.\n root = os.path.basename(os.path.abspath('.'))\n m = re.match(r'openafs-(.*)', root)\n if m:\n version = m.group(1)\n if not version:\n module.fail_json(msg='Unable to determine version.')\n\n # Determine package version and release from the OpenAFS version.\n m1 = re.match(r'(.*)(pre[0-9]+)', version) # prerelease\n m2 = re.match(r'(.*)dev', version) # development\n m3 = re.match(r'(.*)-([0-9]+)-(g[a-f0-9]+)$', version) # development\n m4 = re.match(r'(.*)-([a-z]+)([0-9]+)', version) # custom\n if m1:\n v = m1.group(1)\n r = \"0.{0}\".format(m1.group(2))\n elif m2:\n v = m2.group(1)\n r = \"0.dev\"\n elif m3:\n v = m3.group(1)\n r = \"{0}.{1}\".format(m3.group(2), m3.group(3))\n elif m4:\n v = m4.group(1).replace('-', '')\n r = \"1.2.{0}.{1}\".format(m4.group(3), m4.group(2))\n else:\n v = version # standard release\n r = \"1\" # increment when repackaging this version\n # '-' are used as delimiters by rpm.\n v = v.replace('-', '_')\n r = r.replace('-', '_')\n return dict(openafs_version=version, package_version=v, package_release=r)" ]
[ "0.6673707", "0.6660692", "0.6526378", "0.628747", "0.62810564", "0.62695277", "0.62047887", "0.619877", "0.6136058", "0.613013", "0.61185354", "0.61024153", "0.60582775", "0.6051702", "0.5979974", "0.5965632", "0.59549516", "0.59407663", "0.59400725", "0.5934368", "0.5919144", "0.591702", "0.59029275", "0.5858749", "0.58469945", "0.58390516", "0.5832411", "0.58212596", "0.58085746", "0.5805433", "0.57807493", "0.5778628", "0.57608485", "0.5760208", "0.575006", "0.5730053", "0.571837", "0.5716534", "0.56997067", "0.568724", "0.56838036", "0.5679563", "0.56680125", "0.5659949", "0.5658879", "0.5657397", "0.56544363", "0.5646125", "0.5644681", "0.56377757", "0.56342345", "0.5634084", "0.5617679", "0.5614962", "0.5612769", "0.56114805", "0.5600987", "0.5592565", "0.55911183", "0.55873185", "0.55873185", "0.55872065", "0.55858576", "0.55833805", "0.55685467", "0.5566351", "0.55565727", "0.5549598", "0.55472004", "0.5546491", "0.5542805", "0.55383617", "0.55357325", "0.55264103", "0.5524779", "0.5522643", "0.551238", "0.551055", "0.55076295", "0.5507307", "0.5497732", "0.54874694", "0.5484026", "0.5478937", "0.5470138", "0.54688233", "0.54517794", "0.54500866", "0.54462284", "0.5444899", "0.5439682", "0.54362106", "0.5435219", "0.5429354", "0.5422643", "0.54218394", "0.5412712", "0.54124653", "0.5411973", "0.54118776" ]
0.7456519
0
Send the System info to the API so as the expected EFI version and other data can be returned relevant to this system
def submit_system_data(self, data_to_submit=None): endpoint = "/apple/oneshot" # if not data_to_submit: # data_to_submit = {"hashed_uuid":self.h_sys_uuid, "hw_ver":self.hw_version, "rom_ver":self.efi_version, # "smc_ver":self.smc_version, "board_id":self.board_id, "os_ver":self.os_version, "build_num":self.build_num} # POST this data to the API to get relevant info back result_dict = self.__make_api_post(endpoint, data=data_to_submit) return result_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def system_info(self, system_id):\n\n\t\tpath = f'{self.BIKE_ENDPOINT}system/{system_id}/{self.secret_key}'\n\t\tresponse = requests.get(path).json()\n\t\tself.check_api_key(response)\n\n\t\treturn response", "async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n return await self._client.invoke_method(\"system.info\")", "def get_system_info(self):\r\n method = self.public_endpoints['system_info']['method']\r\n url = self.base_url + self.public_endpoints['system_info']['url']\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def gather_system_versions(self):\n # Get Mac model ID\n self.hw_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"model\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n if \"imacpro\" in self.hw_version.lower():\n # iMac Pro stores it's EFI data different due it's new architecture\n # so grab the EFI & SMC ROM versions appropriately\n raw_efi_list = []\n raw_rom_info = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"apple-rom-info\",\n None,\n 0))\n for data in raw_rom_info.split(\"\\n\"):\n if data.strip().startswith(\"BIOS ID\"):\n raw_efi_list = data.split(\":\")[1].strip().split(\".\")\n break\n else:\n self.message(\n \"[-] Could not find raw EFI data to determine EFI versions. Exiting....\")\n return False\n\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi_list[0], raw_efi_list[2], raw_efi_list[3])\n # Can't currently find the SMC version like this on imac pros ....\n # self.smc_version = str(IORegistryEntryCreateCFProperty(IOServiceGetMatchingService(0, IOServiceMatching(\"AppleSMC\")), \"smc-version\", None, 0))\n self.smc_version = \"\"\n else:\n # EFI & SMC ROM versions\n self.smc_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"AppleSMC\")),\n \"smc-version\",\n None,\n 0))\n raw_efi = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"version\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\").split(\".\")\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi[0], raw_efi[2], raw_efi[3])\n\n # Set the salt to be the MAC address of the system, using the MAC as a salt in this manner\n # helps ensure that the hashed sysuuid is pseudonymous. We don't want to know the sysuuid's\n # value, but we do want it to be unique however. The Salt value is\n # never submitted to the API\n salt = hex(getnode())\n sys_uuid = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"IOPlatformUUID\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n self.h_sys_uuid = hashlib.sha256(salt + sys_uuid).hexdigest()\n\n # Get the Board-ID, this is how EFI files are matched to running\n # hardware - Nastee\n self.board_id = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"board-id\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n # Get OS version\n self.os_version = commands.getoutput(\"sw_vers -productVersion\")\n\n # Get build number\n self.build_num = commands.getoutput(\"sw_vers -buildVersion\")\n\n # Carve out the major version as we use this a bunch\n # self.os_maj_ver = \".\".join(self.os_version.split(\".\")[:2])\n\n # Add gathered info to the dictionary to query the API with\n self.endpoints_to_check[\"127.0.0.1\"] = {\n \"hashed_uuid\": self.h_sys_uuid,\n \"hw_ver\": self.hw_version,\n \"rom_ver\": self.efi_version,\n \"smc_ver\": self.smc_version,\n \"board_id\": self.board_id,\n \"os_ver\": self.os_version,\n \"build_num\": self.build_num}\n\n return True", "def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}", "def system_info():\n requirements = get_requirements(\"sunpy\")\n groups = get_keys_list(requirements)\n extra_groups = get_extra_groups(groups, ['all', 'dev'])\n base_reqs = get_keys_list(requirements['required'])\n extra_reqs = get_keys_list(requirements['all'])\n missing_packages, installed_packages = find_dependencies(package=\"sunpy\", extras=extra_groups)\n extra_prop = {\"System\": platform.system(),\n \"Arch\": f\"{platform.architecture()[0]}, ({platform.processor()})\",\n \"Python\": platform.python_version(),\n \"sunpy\": version(\"sunpy\")}\n sys_prop = {**installed_packages, **missing_packages, **extra_prop}\n print(\"==============================\")\n print(\"sunpy Installation Information\")\n print(\"==============================\")\n print()\n print(\"General\")\n print(\"#######\")\n if sys_prop['System'] == \"Linux\":\n print(f\"OS: {distro.name()} ({distro.version()}, Linux {platform.release()})\")\n elif sys_prop['System'] == \"Darwin\":\n print(f\"OS: Mac OS {platform.mac_ver()[0]}\")\n elif sys_prop['System'] == \"Windows\":\n print(f\"OS: Windows {platform.release()} {platform.version()}\")\n else:\n print(\"Unknown OS\")\n for sys_info in ['Arch', 'sunpy']:\n print(f'{sys_info}: {sys_prop[sys_info]}')\n print(f'Installation path: {distribution(\"sunpy\")._path}')\n print()\n print(\"Required Dependencies\")\n print(\"#####################\")\n for req in base_reqs:\n print(f'{req}: {sys_prop[req]}')\n print()\n print(\"Optional Dependencies\")\n print(\"#####################\")\n for extra_req in extra_reqs:\n print(f'{extra_req}: {sys_prop[extra_req]}')", "async def get_system_info(hass, include_components):\n\n gate_id = hass.states.get('sensor.ais_secure_android_id_dom').state\n info_object = {\n 'arch': platform.machine(),\n 'dev': 'dev' in current_version,\n 'docker': False,\n 'os_name': platform.system(),\n 'python_version': platform.python_version(),\n 'timezone': dt_util.DEFAULT_TIME_ZONE.zone,\n 'version': current_version,\n 'virtualenv': os.environ.get('VIRTUAL_ENV') is not None,\n 'hassio': hass.components.hassio.is_hassio(),\n 'gate_id': gate_id,\n }\n\n if include_components:\n info_object['components'] = list(hass.config.components)\n\n if platform.system() == 'Windows':\n info_object['os_version'] = platform.win32_ver()[0]\n elif platform.system() == 'Darwin':\n info_object['os_version'] = platform.mac_ver()[0]\n elif platform.system() == 'FreeBSD':\n info_object['os_version'] = platform.release()\n elif platform.system() == 'Linux':\n import distro\n linux_dist = await hass.async_add_job(\n distro.linux_distribution, False)\n info_object['distribution'] = linux_dist[0]\n info_object['os_version'] = linux_dist[1]\n info_object['docker'] = os.path.isfile('/.dockerenv')\n\n return info_object", "def _get_host_details(self):\n # Assuming only one system present as part of collection,\n # as we are dealing with iLO's here.\n status, headers, system = self._rest_get('/rest/v1/Systems/1')\n if status < 300:\n stype = self._get_type(system)\n if stype not in ['ComputerSystem.0', 'ComputerSystem.1']:\n msg = \"%s is not a valid system type \" % stype\n raise exception.IloError(msg)\n else:\n msg = self._get_extended_error(system)\n raise exception.IloError(msg)\n\n return system", "def get_version_info(self):\n sys_info_service = self.robot.all_services.get(\"sys_info\")\n if sys_info_service is not None:\n log.info(\"System version info: %s\" % sys_info_service.system_version)\n else:\n log.warning(\"Service get_version_info is not enabled!\")", "def remote_getSysinfo(self, request):\r\n # TODO : replace these calls with call to rce.util.sysinfo\r\n response_table = {\r\n 'size':self._size,\r\n 'cpu':self._cpu,\r\n 'memory': self._memeory,\r\n 'bandwidth': self._bandwidth,\r\n # 'keyword': some value or function to provide the data\r\n }\r\n\r\n return response_table[request]", "def sys_info(self):\n\n for i in self._nodes.items():\n print(\"\\n==============================\")\n name = i[0]\n node = i[1]\n\n print(\"NODE: {}\\n\".format(name))\n\n # CPU\n print(\"CPU:\")\n self.cpu_info(node)\n\n # Grub\n print(\"\\nGrub Command Line:\")\n if \"grub\" in node:\n print(\" Current: {}\".format(node[\"grub\"][\"current_cmdline\"]))\n print(\" Configured: {}\".format(node[\"grub\"][\"default_cmdline\"]))\n\n # Huge Pages\n print(\"\\nHuge Pages:\")\n self.hugepage_info(node)\n\n # Devices\n print(\"\\nDevices:\")\n self.device_info(node)\n\n # Status\n print(\"\\nVPP Service Status:\")\n state, errors = VPPUtil.status(node)\n print(\" {}\".format(state))\n for e in errors:\n print(\" {}\".format(e))\n\n # Minimum system resources\n self.min_system_resources(node)\n\n print(\"\\n==============================\")", "def get_system_info():\n query = {\"type\": \"op\", \"cmd\": \"<show><system><info></info></system></show>\"}\n\n return __proxy__[\"panos.call\"](query)", "async def get_system(self) -> dict[str, Any]:\n cmd = await self.send_command(\"SYSTEM\", timeout=1)\n if not cmd.succeeded():\n raise ArchonError(f\"Command finished with status {cmd.status.name!r}\")\n\n keywords = str(cmd.replies[0].reply).split()\n system = {}\n for (key, value) in map(lambda k: k.split(\"=\"), keywords):\n system[key.lower()] = value\n if match := re.match(r\"^MOD([0-9]{1,2})_TYPE\", key, re.IGNORECASE):\n name_key = f\"mod{match.groups()[0]}_name\"\n system[name_key] = ModType(int(value)).name\n\n return system", "def system_session(self):\n self.user['kernel'] = self.packages['kernel'][self.user['kernel']]\n\n # Set cpu parameters\n if 'intel' in self.system['cpu'].lower():\n self.user['cpu'] = {'name': self.system['cpu'],\n 'microcode': self.packages['microcode'][0]}\n elif 'AMD' in self.system['cpu']:\n self.user['cpu'] = {'name': self.system['cpu'],\n 'microcode': self.packages['microcode'][1]}\n else:\n self.user['cpu'] = {'name': self.system['cpu'], 'microcode': None}\n\n # Crypt and append passwords\n rootpasswd = crypt(self.user['root_passwd'], mksalt(METHOD_SHA512))\n userpasswd = crypt(self.user['user_passwd'], mksalt(METHOD_SHA512))\n self.user['passwords'] = {'root': rootpasswd, 'user': userpasswd}\n\n # Set keymap\n if 'keymap' not in self.system:\n self.user['keymap'] = self.user['language'].split('_')[0]\n else:\n self.user['keymap'] = self.system['keymap']\n\n # Append NTFS packages\n self.user['ntfs'] = self.system['ntfs']\n if self.system['ntfs'] is True:\n self.user['ntfs'] = self.packages['ntfs']\n\n # Set system firmware\n self.user['firmware'] = {'type': self.system['firmware'],\n 'version': self.system['efi'],\n 'driver': self.user['firmware']}\n\n # Append firmware packages\n if self.user['firmware']['driver'] is True:\n self.user['firmware']['driver'] = self.packages['firmware']\n\n # Set mirrorlist\n self.user['mirrorlist'] = self.system['mirrorlist']", "def test_get_info(self):\n self.addCleanup(self.sdkapi.guest_delete, self.userid)\n\n self.sdkapi.guest_create(self.userid, 1, 1024, disk_list=self.disks)\n self.sdkapi.guest_deploy(self.userid, self.image_name)\n\n # get info in shutdown state\n info_off = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_off['power_state'], 'off')\n self.assertEquals(info_off['mem_kb'], 0)\n self.assertEquals(info_off['cpu_time_us'], 0)\n\n # get info in active state\n self.sdkapi.guest_start(self.userid)\n self.assertTrue(self.sdkutils.wait_until_guest_in_power_state(\n self.userid, 'on'))\n time.sleep(1)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)\n\n # get info in paused state\n self.sdkapi.guest_pause(self.userid)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)", "def system_status(system_ip):\n\n click.secho(\"\\nRetrieving the System Status\")\n\n url = base_url + \"/device/system/status?deviceId={0}\".format(system_ip)\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get system status \" + str(response.text))\n exit()\n\n print(\"\\nSystem status for Device:\",system_ip)\n\n headers = [\"Host name\", \"Up time\", \"Version\", \"Memory Used\", \"CPU system\"]\n table = list()\n\n for item in items:\n tr = [item['vdevice-host-name'], item['uptime'], item['version'], item['mem_used'], item['cpu_system']]\n table.append(tr)\n\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")", "def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]", "def test_get_system(self):\n pass", "def device_info(self) -> Dict[str, Any]:\n agreement = self.toon.agreement\n model = agreement.display_hardware_version.rpartition('/')[0]\n sw_version = agreement.display_software_version.rpartition('/')[-1]\n return {\n 'identifiers': {\n (DOMAIN, agreement.id),\n },\n 'name': 'Toon Display',\n 'manufacturer': 'Eneco',\n 'model': model,\n 'sw_version': sw_version,\n }", "def getSysinfo(self, request):\r\n return self._ref.callRemote('getSysinfo')", "def _get_system_status(self):\n sysinfo_strings = self._command(self.commands[\"SYSTEM_STATUS\"])\n sysinfo_dict = {\"name\": sysinfo_strings[0]}\n for line in sysinfo_strings:\n if \":\" in line:\n key, value = line.split(\":\", 1)\n sysinfo_dict[key.lower()] = value.strip()\n\n return sysinfo_dict", "def add_system_info(session, data, node_info, username='system_user'):\n session = validate_session(session)\n operation = operation_exists(session, data['operation_id'])\n node_id = node_info.id\n\n if node_id:\n if 'computer_name' in data:\n node_info.computer_name = data['computer_name']\n session.commit()\n system_info = session.query(SystemInfo).\\\n filter(SystemInfo.node_id == node_id).first()\n\n if system_info:\n system_info.os_code = data['os_code']\n system_info.os_string = data['os_string']\n system_info.version_minor = data['version_minor']\n system_info.version_build = data['version_build']\n system_info.meta = data['meta']\n system_info.bit_type = data['bit_type']\n try:\n session.commit()\n except Exception as e:\n session.rollback()\n else:\n system_info = SystemInfo(node_id, data['os_code'],\n data['os_string'], data['version_major'],\n data['version_minor'], data['version_build'],\n data['meta'], data['bit_type']\n )\n\n try:\n session.add(system_info)\n session.commit()\n\n except Exception as e:\n session.rollback()\n\n if 'hardware' in data:\n for key, values in data['hardware'].items():\n if 'nic' in key:\n for network in values:\n net_info = session.query(NetworkInterface).\\\n filter(NetworkInterface.node_id == node_id).\\\n filter(NetworkInterface.interface == \\\n network['name']).first()\n\n if net_info:\n net_info.mac_address = network['mac']\n net_info.ip_address = network['ip_address']\n session.commit()\n\n else:\n net_info = NetworkInterface(node_id=node_id,\n mac_address=network['mac'],\n ip_address=network['ip_address'],\n interface=network['name']\n )\n session.add(net_info)\n if 'storage' in key:\n for storage in values:\n storage_info = session.query(StorageInfo).\\\n filter(StorageInfo.node_id == node_id).\\\n filter(StorageInfo.name == storage['name']).\\\n first()\n\n if storage_info:\n storage_info.free_size_kb = storage['free_size_kb']\n storage_info.size_kb = storage['size_kb']\n session.commit()\n\n else:\n storage_info = StorageInfo(node_id=node_id,\n free_size_kb=storage['free_size_kb'],\n size_kb=storage['size_kb'],\n file_system=storage['file_system'],\n name=storage['name']\n )\n session.add(storage_info)\n if 'cpu' in key:\n for cpu in values:\n cpu_info = session.query(CpuInfo).\\\n filter(CpuInfo.node_id == node_id).\\\n filter(CpuInfo.name == cpu['cpu_id']).first()\n\n if cpu_info:\n cpu_info.speed_mhz = cpu['speed_mhz']\n cpu_info.cores = cpu['cores']\n cpu_info.cache_kb = cpu['cache_kb']\n session.commit()\n\n else:\n cpu_info = CpuInfo(node_id=node_id,\n cores=cpu['cores'],\n speed_mhz=cpu['speed_mhz'],\n bit_type=cpu['bit_type'],\n cache_kb=cpu['cache_kb'],\n name=cpu['name']\n )\n session.add(cpu_info)\n if 'display' in key:\n for video in values:\n video_info = session.query(DisplayInfo).\\\n filter(DisplayInfo.node_id == node_id).\\\n filter(DisplayInfo.name == video['name']).\\\n first()\n if video_info:\n video_info.speed_mhz = video['speed_mhz']\n video_info.ram_kb = video['ram_kb']\n video_info.name = video['name']\n session.commit()\n else:\n video_info = DisplayInfo(node_id=node_id,\n speed_mhz=video['speed_mhz'],\n ram_kb=video['ram_kb'],\n name=video['name']\n )\n session.add(video_info)\n if 'memory' in key:\n mem_info = session.query(MemoryInfo).\\\n filter(MemoryInfo.node_id == node_id).\\\n first()\n if mem_info:\n mem_info.total_memory = values\n session.commit()\n else:\n mem_info = MemoryInfo(\n node_id=node_id,\n total_memory=values\n )\n session.add(mem_info)\n\n try:\n session.commit()\n except Exception as e:\n print e, 'BAM'\n session.rollback()\n\n return system_info", "def remote_setSysinfo(self, request, value):\r\n raise NotImplementedError", "async def sysinfo(self, ctx: Context):\n\t\tstart = time.perf_counter()\n\t\tend = time.perf_counter()\n\t\tduration = (end - start) * 1000\n\t\tcpuavg = psutil.cpu_percent(interval=None)\n\t\tmem = psutil.virtual_memory()[2]\n\t\tdurround = round(duration, 3)\n\t\tosun = os.uname()\n\t\tawait self.send(f\"System Info | CPU: {cpuavg}% | RAM: {mem}% | Latency: {durround * 1000}ms | OS: {sys.platform}\", whisper=[ctx.author.id])", "def ex_get_hypervisor_sysinfo(self):\n xml = self.connection.getSysinfo()\n etree = ET.XML(xml)\n\n attributes = [\"bios\", \"system\", \"processor\", \"memory_device\"]\n\n sysinfo = {}\n for attribute in attributes:\n element = etree.find(attribute)\n entries = self._get_entries(element=element)\n sysinfo[attribute] = entries\n\n return sysinfo", "def get_software_info():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><system><software><info></info></software></system></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": \"Somfy\",\n \"name\": self.name,\n \"model\": self.tahoma_device.widget,\n \"sw_version\": self.tahoma_device.type,\n }", "def create_system_data():\n system_data = dict()\n system_data['system'] = dict()\n system_data['system']['primary'] = dict()\n system_data['system']['primary']['controllers'] = dict()\n system_data['system']['primary']['controllers']['re0'] = dict()\n system_data['system']['primary']['controllers']['re0']['hostname'] = 'abc'\n system_data['system']['primary']['controllers']['re0']['mgt-ip'] = '1.1.1.1'\n system_data['system']['primary']['controllers']['re0']['osname'] = 'Paragon'\n system_data['system']['primary']['name'] = 'abc'\n system_data['system']['primary']['model'] = 'Paragon'\n system_data['system']['primary']['make'] = 'Calnex'\n system_data['system']['primary']['server-ip'] = '1.1.1.2'\n system_data['system']['primary']['osname'] = 'Paragon'\n return system_data", "def getSystemByName(self,systemName):\n\n logger.debug(\"Call to getSystemByName - systemName: {}\".format(systemName))\n try:\n\n response = self.httpHandler.sendHttpRequest(\n CIC_SYSTEM_ENDPOINT+\"?\"+\n urllib.urlencode({ \"name\": systemName }))\n\n except urllib2.HTTPError as e:\n\n logger.debug(traceback.format_exc())\n\n if e.code == 404:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n flag = _checkSystemNotFound(body)\n if flag == True:\n raise KeyError(\n \"System with name '{}' was not found in TMS because it does not exist, {}\".format(systemName, body),\n \"CIC_SYSTEM_NOT_FOUND_ERR\")\n else:\n raise IOError(\n \"System with name '{}' was not found in TMS because of network/communication error, {}\".format(systemName, body),\n \"CIC_SYSTEM_COMMUNICATION_NETWORK_ERR\")\n\n elif e.code == 403:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"User {} has no permission to look up the specified system {} in {} {}\".format(self.cicUser,systemName, self.cicUrl, body),\n \"CIC_NO_ACCESS\"\n )\n\n else:\n raise\n else:\n responseString = response.read()\n return json.loads(responseString)", "def update_storage_systems_info(self):\n try:\n rc, existing_systems = self.request(\"storage-systems\")\n\n # Mark systems for adding or removing\n for system in self.systems:\n for existing_system in existing_systems:\n if system[\"ssid\"] == existing_system[\"id\"]:\n system[\"current_info\"] = existing_system\n\n if system[\"current_info\"][\"passwordStatus\"] in [\"unknown\", \"securityLockout\"]:\n system[\"failed\"] = True\n self.module.warn(\"Skipping storage system [%s] because of current password status [%s]\"\n % (system[\"ssid\"], system[\"current_info\"][\"passwordStatus\"]))\n if system[\"current_info\"][\"metaTags\"]:\n system[\"current_info\"][\"metaTags\"] = sorted(system[\"current_info\"][\"metaTags\"], key=lambda x: x[\"key\"])\n break\n else:\n self.systems_to_add.append(system)\n\n # Mark systems for removing\n for existing_system in existing_systems:\n for system in self.systems:\n if existing_system[\"id\"] == system[\"ssid\"]:\n\n # Leave existing but undiscovered storage systems alone and throw a warning.\n if existing_system[\"id\"] in self.undiscovered_systems:\n self.undiscovered_systems.remove(existing_system[\"id\"])\n self.module.warn(\"Expected storage system exists on the proxy but was failed to be discovered. Array [%s].\" % existing_system[\"id\"])\n break\n else:\n self.systems_to_remove.append(existing_system[\"id\"])\n except Exception as error:\n self.module.fail_json(msg=\"Failed to retrieve storage systems. Error [%s].\" % to_native(error))", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:\n return pulumi.get(self, \"system_data\")", "def setSysinfo(self, request, value):\r\n return self._ref.callRemote('setSysinfo', value)", "def parse_os_info(self):\n pipe = subprocess.Popen([self.core_exe, '-o'], 0, None, None, subprocess.PIPE)\n lines = pipe.stdout.readlines()\n x = 0\n json_str = ''\n while x < len(lines):\n json_str += lines[x].decode('utf-8').strip()\n x += 1\n decoder = json.decoder.JSONDecoder()\n decoder.strict = False\n self.os_info = decoder.decode(json_str)\n return self.os_info", "def subcmd_getsystem_main(args, parameter_info):\n \n from get_system_inventory import get_system_inventory\n result = get_system_inventory(parameter_info['ip'], parameter_info['user'], parameter_info['passwd'], parameter_info['sysid'])\n \n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])", "def update_system(self, system):\n try:\n rc, storage_system = self.request(\"storage-systems/%s\" % system[\"ssid\"], method=\"POST\", data=system[\"changes\"])\n except Exception as error:\n self.module.warn(\"Failed to update storage system. Array [%s]. Error [%s]\" % (system[\"ssid\"], to_native(error)))", "def get(self, section=None):\n logging.info(\"GET Request for System information, section=\\\"%s\\\"\", section)\n\n system_info = get_system_info(section)\n\n return jsonify(system_info)", "def device_info(self):\n info = {\n \"identifiers\": {\n (\n DOMAIN,\n \"serial-number\",\n self._ctrl.data[\"routerboard\"][\"serial-number\"],\n \"switch\",\n \"Scripts\",\n )\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} Scripts\",\n }\n return info", "def systemRead():\n return", "def test_update_storage_systems_info_pass(self):\n self._set_args({\"password\": \"password\", \"subnet_mask\": \"192.168.1.0/24\",\n \"systems\": [{\"ssid\": \"1\", \"serial\": \"1\"}, {\"addresses\": [\"192.168.1.36\"]}, {\"serial\": \"2\"}, {\"serial\": \"5\"}]})\n systems = NetAppESeriesProxySystems()\n systems.systems = [\n {\"ssid\": \"1\", \"serial\": \"1\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.5\", \"192.168.1.6\"], \"embedded_available\": True, \"accept_certificate\": True,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"192.168.1.36\", \"serial\": \"\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.35\", \"192.168.1.36\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"2\", \"serial\": \"2\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.15\", \"192.168.1.16\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True}]\n\n with mock.patch(self.REQUEST_FUNC, return_value=(200, [{\"id\": \"1\", \"passwordStatus\": \"valid\", \"metaTags\": []},\n {\"id\": \"5\", \"passwordStatus\": \"valid\", \"metaTags\": []}])):\n systems.update_storage_systems_info()\n self.assertEquals(systems.systems_to_remove, [\"5\"])\n self.assertEquals(systems.systems_to_add, [\n {\"ssid\": \"192.168.1.36\", \"serial\": \"\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None,\n \"stored_password_valid\": None, \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.35\", \"192.168.1.36\"], \"embedded_available\": False,\n \"accept_certificate\": False, \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True},\n {\"ssid\": \"2\", \"serial\": \"2\", \"password\": \"password\", \"password_valid\": None, \"password_set\": None, \"stored_password_valid\": None,\n \"meta_tags\": [], \"controller_addresses\": [\"192.168.1.15\", \"192.168.1.16\"], \"embedded_available\": False, \"accept_certificate\": False,\n \"current_info\": {}, \"changes\": {}, \"updated_required\": False, \"failed\": False, \"discovered\": True}])", "def describe_operating_systems():\n pass", "def check_fw_versions(self, sys_info, api_results):\n if not api_results.get(\"latest_efi_version\"):\n # Call the API to see what the latest version of EFI you are\n # expected to be running given OS ver and mac model\n api_results[\n self.current_endpoint][\"latest_efi_version\"] = self.__make_api_get(\n '/apple/latest_efi_firmware/%s/%s' %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\")))\n\n self.message(\"\\n\\tEFI firmware version check:\")\n\n # Validate response from API\n if self._validate_response(api_results[\"latest_efi_version\"]):\n # Valid response from API - now interpret it\n\n # This is kind messy but it's so as we can detect newer and older firmware and message accordingly rather than just looking for 'different' versions\n # the way that EFI versions are denoted by Apple makes this more of\n # a pain thatit really needs to be quite honestly\n api_efi_str = api_results[\"latest_efi_version\"][\"msg\"].split(\".\")\n my_efi_str = sys_info.get(\"rom_ver\").split(\".\")\n\n api_efi_ver = int(api_efi_str[1], 16)\n api_efi_build = int(api_efi_str[2].replace(\"B\", \"\"), 16)\n\n if all([x.isdigit() for x in my_efi_str]):\n # Newer EFI versions do not include a build number\n # or the Mac model code. The output will be something\n # like 256.0.0, whereas with the old format it would\n # be MBP133.0256.B00.\n my_efi_ver = int(my_efi_str[0], 16)\n my_efi_build = 0\n else:\n my_efi_ver = int(my_efi_str[1], 16)\n my_efi_build = int(my_efi_str[2].replace(\"B\", \"\"), 16)\n\n if api_efi_str == my_efi_str:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n elif my_efi_ver == api_efi_ver and my_efi_build == api_efi_build:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n\n elif (my_efi_ver > api_efi_ver) or (my_efi_ver > api_efi_ver and my_efi_build > api_efi_build) or (my_efi_ver == api_efi_ver and my_efi_build > api_efi_build):\n # Looks like you're running a beta or a dev build - pretty much\n # all bets are off here as the dataset doens't cover dev builds\n # but a nicer message makes sense\n self.message(\n \"\\t\\t[!] ATTENTION - It looks like your EFI version (%s) is NEWER than the latest production release that is in the dataset (%s). This is most likely because you are now, or have in the past, installed a developer preview OS and as part of that you also had newer EFI firmware installed. The EFIgy API currently only has reliable data for production OS releases.\" %\n (sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))\n\n else:\n self.message(\n \"\\t\\t[-] ATTENTION - You are running an unexpected firmware version given the model of your system (%s) and OS build you have installed (%s). Your firmware is %s, the firmware we expected to see is %s.\\n\" %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))", "def test_get_systems(self):\n pass", "def system_info() -> str:\n return \"\\n\".join(\n [\n f\"Python version: {platform.python_version()}\",\n f\"Python implementation: {platform.python_implementation()}\",\n f\"Python compiler: {platform.python_compiler()}\",\n f\"PyTorch version: {torch.__version__}\",\n f\"System: {platform.system() or 'Unable to determine'}\",\n f\"System version: {platform.release() or 'Unable to determine'}\",\n f\"Processor: {platform.processor() or 'Unable to determine'}\",\n f\"Number of CPUs: {multiprocessing.cpu_count()}\",\n ]\n )", "def get_info(self):\n\t\tret = 'Flash info\\n'\n\t\tret += '\\tGPNVM bits: ' + str(self.read_gpnvm()) + '\\n'\n\t\tret += '\\tUnique identifier area: ' + self.read_unique_identifier_area().decode('ascii', 'replace') + '\\n'\n\t\tret += '\\tDescriptor: ' + str(self.read_descriptor()) + '\\n'\n\t\treturn ret", "def get_os_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_OS_INFO)", "def test_ucs_get_sys(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n self.assertIn(\"Fabric Interconnects\", api_data[\"json\"],\n \"Results did not contain 'Fabric Interconnects'\")\n self.assertIn(\"Servers\", api_data[\"json\"], \"Results did not contain 'Servers\")\n self.assertIn(\"FEX\", api_data[\"json\"], \"Results did not contain 'FEX\")\n self.assertIn(\"Chassis\", api_data[\"json\"], \"Results did not contain 'Chassis\")", "def platform_info(self):\n return self.msg.platform_info", "def system(self):\n return self['system']", "def set_bios_bootmode_uefi(ip, login_account, login_password, system_id):\n result = {}\n login_host = \"https://\" + ip\n try:\n # Connect using the BMC address, account name, and password\n # Create a REDFISH object\n REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account, timeout=utils.g_timeout,\n password=login_password, default_prefix='/redfish/v1', cafile=utils.g_CAFILE)\n # Login into the server and create a session\n REDFISH_OBJ.login(auth=utils.g_AUTH)\n except:\n traceback.print_exc()\n result = {'ret': False, 'msg': \"Please check the username, password, IP is correct\"}\n return result\n\n # GET the ComputerSystem resource\n system = utils.get_system_url(\"/redfish/v1\", system_id, REDFISH_OBJ)\n if not system:\n result = {'ret': False, 'msg': \"This system id is not exist or system member is None\"}\n REDFISH_OBJ.logout()\n return result\n for i in range(len(system)):\n system_url = system[i]\n response_system_url = REDFISH_OBJ.get(system_url, None)\n if response_system_url.status != 200:\n error_message = utils.get_extended_error(response_system_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (system_url, response_system_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else:\n # Get the bios resource\n bios_url = response_system_url.dict['Bios']['@odata.id']\n response_bios_url = REDFISH_OBJ.get(bios_url, None)\n if response_bios_url.status != 200:\n error_message = utils.get_extended_error(response_bios_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (bios_url, response_bios_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else: # Get bios success\n # Seek boot mode from bios attributes\n attribute_bootmode = None\n attributes = response_bios_url.dict['Attributes']\n for attribute in attributes:\n if attribute == \"BootMode\" or attribute == \"SystemBootMode\":\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"SystemBootMode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"Boot\" in attribute and \"Mode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n result = {'ret': False, 'msg': \"Can not found BootMode attribute in response of url %s\" %(bios_url)}\n REDFISH_OBJ.logout()\n return result\n\n # Get boot mode setting guide from bios registry\n WarningText = None\n ValueName = None\n bios_registry_url = \"/redfish/v1/Registries/\" + response_bios_url.dict['AttributeRegistry']\n response_bios_registry_url = REDFISH_OBJ.get(bios_registry_url, None)\n if response_bios_registry_url.status == 200:\n locations = response_bios_registry_url.dict['Location']\n bios_regjson_url = None\n for location in locations:\n if 'en' in location['Language']:\n bios_regjson_url = location['Uri']\n break\n if bios_regjson_url:\n response_bios_regjson_url = REDFISH_OBJ.get(bios_regjson_url, None)\n if response_bios_regjson_url.status == 200:\n regattributes = response_bios_regjson_url.dict['RegistryEntries']['Attributes']\n for regattribute in regattributes:\n if regattribute['AttributeName'] == attribute_bootmode:\n if 'WarningText' in regattribute:\n WarningText = regattribute['WarningText']\n for value in regattribute['Value']:\n if 'legacy' in value['ValueName'].lower():\n continue\n if 'uefi' in value['ValueName'].lower():\n ValueName = value['ValueName']\n break\n ValueName = value['ValueName']\n break\n \n # Perform patch to set\n if ValueName == None:\n ValueName = \"UEFIMode\"\n pending_url = response_bios_url.dict['@Redfish.Settings']['SettingsObject']['@odata.id']\n parameter = {attribute_bootmode: ValueName}\n attribute = {\"Attributes\": parameter}\n headers = {\"If-Match\": '*'}\n response_pending_url = REDFISH_OBJ.patch(pending_url, body=attribute, headers=headers)\n if response_pending_url.status in [200,204]:\n if WarningText:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful. WarningText: %s'% (WarningText) }\n else:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful'}\n elif response_pending_url.status == 405:\n result = {'ret': False, 'msg': \"Resource not supported\"}\n else:\n error_message = utils.get_extended_error(response_pending_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (\n pending_url, response_pending_url.status, error_message)}\n\n # Logout of the current session\n try:\n REDFISH_OBJ.logout()\n except:\n pass\n return result", "def device_info(self) -> DeviceInfo:\n return {\n \"identifiers\": {(DOMAIN, str(self.coordinator.gios.station_id))},\n \"name\": DEFAULT_NAME,\n \"manufacturer\": MANUFACTURER,\n \"entry_type\": \"service\",\n }", "def get_system_info() -> SystemInfo:\n\n assert is_windows(), 'This function is only available on Windows systems'\n\n from win32api import GetSystemInfo\n return SystemInfo(*GetSystemInfo())", "def drawSystemInfo(self):\n for sim in self.systemSims:\n # draw name\n (x,y) = anwp.sl.engine.worldToScreen(sim.mySystemDict['x'], sim.mySystemDict['y'])\n pyui.desktop.getRenderer().drawText(sim.mySystemDict['name'], \n (x-30,y-70),\n sim.color1, self.game.app.planetFont, \n flipped = 1)\n # draw city number\n pyui.desktop.getRenderer().drawText(str(sim.mySystemDict['cities']), \n (x-10,y-6),\n sim.color2, self.game.app.systemFont, \n flipped = 1)", "def system_data(self) -> pulumi.Output['outputs.ProxyResourceResponseSystemData']:\n return pulumi.get(self, \"system_data\")", "def add_system(self, system):\n self.set_password(system)\n\n body = {\"id\": system[\"ssid\"],\n \"controllerAddresses\": system[\"controller_addresses\"],\n \"password\": system[\"password\"]}\n if system[\"accept_certificate\"]: # Set only if embedded is available and accept_certificates==True\n body.update({\"acceptCertificate\": system[\"accept_certificate\"]})\n if system[\"meta_tags\"]:\n body.update({\"metaTags\": system[\"meta_tags\"]})\n\n try:\n rc, storage_system = self.request(\"storage-systems\", method=\"POST\", data=body)\n except Exception as error:\n self.module.warn(\"Failed to add storage system. Array [%s]. Error [%s]\" % (system[\"ssid\"], to_native(error)))\n return # Skip the password validation.\n\n # Ensure the password is validated\n for retries in range(5):\n sleep(1)\n try:\n rc, storage_system = self.request(\"storage-systems/%s/validatePassword\" % system[\"ssid\"], method=\"POST\")\n break\n except Exception as error:\n continue\n else:\n self.module.warn(\"Failed to validate password status. Array [%s]. Error [%s]\" % (system[\"ssid\"], to_native(error)))", "def _initialize_system(self):\n # Make sure that the system is actually valid before trying anything\n self._validate_system()\n\n # Do any necessary template resolution\n self._system.template = resolve_template(self._system.template)\n\n existing_system = self._ez_client.find_unique_system(\n name=self._system.name,\n version=self._system.version,\n namespace=self._system.namespace,\n )\n\n if not existing_system:\n try:\n # If this succeeds can just finish here\n return self._ez_client.create_system(self._system)\n except ConflictError:\n # If multiple instances are starting up at once and this is a new system\n # the create can return a conflict. In that case just try the get again\n existing_system = self._ez_client.find_unique_system(\n name=self._system.name,\n version=self._system.version,\n namespace=self._system.namespace,\n )\n\n # If we STILL can't find a system something is really wrong\n if not existing_system:\n raise PluginValidationError(\n \"Unable to find or create system {0}\".format(self._system)\n )\n\n # We always update with these fields\n update_kwargs = {\n \"new_commands\": self._system.commands,\n \"metadata\": self._system.metadata,\n \"description\": self._system.description,\n \"display_name\": self._system.display_name,\n \"icon_name\": self._system.icon_name,\n \"template\": self._system.template,\n }\n\n # And if this particular instance doesn't exist we want to add it\n if not existing_system.has_instance(self._config.instance_name):\n update_kwargs[\"add_instance\"] = Instance(name=self._config.instance_name)\n\n return self._ez_client.update_system(existing_system.id, **update_kwargs)", "def _get_system_info(target: Optional[str],\n serial_num: Optional[str]) -> Tuple[str, str]:\n\n # TODO(b/242191374): Remove when devices in swarming are no longer booted\n # into zedboot.\n if running_unattended():\n try:\n boot_device(target, BootMode.REGULAR, serial_num)\n except (subprocess.CalledProcessError, StateTransitionError):\n logging.warning('Could not boot device. Assuming in ZEDBOOT')\n return ('', '')\n wait_cmd = common.run_ffx_command(cmd=('target', 'wait', '-t', '180'),\n target_id=target,\n check=False)\n if wait_cmd.returncode != 0:\n return ('', '')\n\n return get_system_info(target)", "def _GetSystemVersion(self, component, info):\n # Check if we are on mario, then we need to use the legacy parser\n if self.ChromeOSBoard() == 'x86-mario':\n return self._GetSystemVersionMario(component, info)\n items = info.strip().splitlines()\n # This is going to give us a list of lines, we are looking for the\n # following ones:\n # BIOS version: board.xx.xx.xxx.xxx.xx\n # EC version: foobar\n for line in items:\n line_components = line.split(':')\n # The line we are looking for has at least 2 items\n if len(line_components) >= 2 and line_components[0] == component:\n return line_components[1].strip()\n self.fail('Could not locate the following item %s in the return value '\n 'of chromeos-firmwareupdate.' % component)", "def get_guest_info(self):\n url = \"http://api.applezhuan.com/api/c/get_guestinfo?&\"\n params = {\n \"android_id\": self.mobile.android_id,\n \"platform\": \"2\",\n \"av\": \"2\",\n \"token\": \"\",\n \"time\": self.get_current_time,\n \"ov\": self.mobile.os,\n \"lon\": self.mobile.lon,\n \"lat\": self.mobile.lat,\n \"device_name\": \"dpi\",\n \"device_code\": self.device_code,\n \"brand\": self.mobile.brand,\n \"mac\": self.mobile.mac,\n \"vn\": \"1.0.2\",\n \"network\": self.mobile.network\n }\n params_str = self.encrypt.get_secret_param(params)\n url = url + \"s=\" + params_str\n headers = {\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; U; Android \" + self.mobile.os + \"; zh-cn; GT-N7100 Build/\" +\n self.mobile.brand + \") AppleWebKit/534.30 (KHTML, like Gecko) \"\n \"Version/4.0 Mobile Safari/534.30\",\n \"Host\": \"api.applezhuan.com\",\n \"Connection\": \"Keep-Alive\",\n \"Accept-Encoding\": \"gzip\",\n \"Cookie\": self.cookie\n }\n\n res = requests.get(url, headers=headers)\n # print(res.text)\n result = json.loads(res.text)\n print(result)\n self.guest_info = result[\"d\"]\n self.guest_info.pop(\"h5_url\")\n self.guest_info.pop(\"banner\")\n self.guest_info.pop(\"menu\")\n self.guest_info.pop(\"headimg\")", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {\n (\n DOMAIN,\n self._api.information.serial,\n SynoSurveillanceStation.INFO_API_KEY,\n )\n },\n \"name\": \"Surveillance Station\",\n \"manufacturer\": \"Synology\",\n \"model\": self._api.information.model,\n \"sw_version\": self._version,\n \"via_device\": (DOMAIN, self._api.information.serial),\n }", "def getSystemUpdate(self, listAttr, systemID):\n try:\n serverResult = self.game.server.getSystemUpdate(listAttr, systemID, self.game.authKey)\n if type(serverResult) == types.StringType:\n self.modeMsgBox(serverResult)\n else:\n mySystemDict = self.game.allSystems[systemID]\n for key, value in serverResult.iteritems():\n mySystemDict[key] = value\n except:\n self.modeMsgBox('getSystemUpdate->Connection to Server Lost')", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.tesla_device.id())},\n \"name\": self.tesla_device.car_name(),\n \"manufacturer\": \"Tesla\",\n \"model\": self.tesla_device.car_type,\n \"sw_version\": self.tesla_device.car_version,\n }", "def get_system_info(baseurl, cookie_header):\n url = baseurl + 'stacking/vsf/members/system_info'\n headers = {'cookie': cookie_header}\n response = requests.get(url, verify=False, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return response.status_code", "def system_data(self) -> 'outputs.DataCollectionEndpointResourceResponseSystemData':\n return pulumi.get(self, \"system_data\")", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self.name,\n \"manufacturer\": \"Brightech\",\n }", "def changeSystemMetadata(self,systemName,parameter,value):\n\n logger.debug(\"Call to changeTenantMetadata - systemName: {} parameter: {} value: {}\".format(systemName, parameter, value))\n logger.debug(\"Next line contains json payload\")\n\n sysObj = self.getSystemByName(systemName)\n\n payload = {\n \"versionUuid\": sysObj[\"versionUuid\"],\n \"uuid\": sysObj[\"uuid\"],\n \"landscape\": sysObj[\"landscape\"],\n parameter: value\n }\n logger.debug(payload)\n\n try:\n response = self.httpHandler.sendHttpRequest(CIC_SYSTEM_ENDPOINT, payload, \"PATCH\", \"metadata\")\n except urllib2.HTTPError as e:\n\n logger.debug(traceback.format_exc())\n\n if e.code == 403:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"User {} has no permission to update 'systems' in {} {}\".format(self.cicUser, self.cicUrl, body),\n \"CIC_NO_ACCESS\"\n )\n\n else:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"An http error occured during system metatdata update: \"\n \"{}, Response body: {}\".format(e, body),\n \"CIC_SYSTEM_METADATA_UPDATE_ERR\")\n\n else:\n\n responseString = response.read()\n returnDict = json.loads(responseString)\n logger.debug(\"Return dict is: {}\".format(returnDict))\n\n rc = self._validateResponse(returnDict, parameter, value)\n if rc == 0 or rc == 1:\n return returnDict\n elif rc == 2:\n raise RuntimeError(\n \"System metadata update failed. \"\n \"Parameter '{}' not written. Maybe invalid parameter.\".format(parameter),\n \"CIC_SYSTEM_METADATA_UPDATE_NOTWRITE\")\n elif rc == 3:\n returnValue = returnDict[parameter]\n raise RuntimeError(\n \"System metadata update failed. \"\n \"Parameter '{}' written but different return value: {} != {}.\".format(\n parameter, value, returnValue),\n \"CIC_SYSTEM_METADATA_UPDATE_MISMATCH\")", "def get_device_info(self): # pylint: disable=no-self-use\r\n serial = get_serial_number()\r\n model = get_model()\r\n\r\n return {\r\n \"serial\": serial,\r\n \"model\": model,\r\n }" ]
[ "0.71641546", "0.6972458", "0.6943339", "0.6877875", "0.68028116", "0.6581513", "0.6516118", "0.65054244", "0.6417727", "0.62935936", "0.62915426", "0.6286806", "0.6262276", "0.6224786", "0.6207814", "0.61550796", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6130741", "0.6112216", "0.61083627", "0.6072666", "0.60528505", "0.60407865", "0.6033713", "0.6026644", "0.59887904", "0.5969324", "0.5966155", "0.5950677", "0.5946592", "0.59193426", "0.5912429", "0.5912429", "0.5912429", "0.5912429", "0.5912429", "0.5912429", "0.5912429", "0.5912429", "0.5912429", "0.5912429", "0.5912429", "0.5912429", "0.5907034", "0.5877798", "0.58518565", "0.5842437", "0.583236", "0.5817573", "0.57897043", "0.5787886", "0.5776605", "0.57574874", "0.5755794", "0.5753966", "0.5751283", "0.57451814", "0.5735955", "0.5733041", "0.57227534", "0.57118523", "0.56988937", "0.5698345", "0.5683511", "0.56728524", "0.566705", "0.565638", "0.565457", "0.563447", "0.562168", "0.5617794", "0.5610203", "0.5608949", "0.56076366", "0.56027335", "0.55935", "0.5586271", "0.55782896" ]
0.0
-1
Given the OS version are you running, what is the highest available build number? Are you running it?
def check_highest_build(self, sys_info, api_results): if not api_results.get("latest_build_number"): self.results[self.current_endpoint]["latest_build_number"] = self.__make_api_get( '/apple/latest_build_number/%s' % (".".join(sys_info.get("os_ver").split(".")[:2]))) self.message("\n\tHighest build number check:") # Validate response from API if self._validate_response(api_results["latest_build_number"]): # Valid response from API - now interpret it if api_results["latest_build_number"][ "msg"] == sys_info.get("build_num"): self.message( "\t\t[+] SUCCESS - You are running the latest build number (%s) of the OS version you have installed (%s)" % (sys_info.get("build_num"), sys_info.get("os_ver"))) elif sys_info.get("build_num")[-1].isalpha(): self.message( "\t\t[!] ATTENTION - It looks like you might be running a development OS build '%s' (%s). The EFIgy API currently only has reliable data for production OS releases." % (sys_info.get("build_num"), sys_info.get("os_ver"))) else: self.message( "\t\t[-] ATTENTION - You are NOT running the latest release build number of your OS version (%s). Your build number is %s, the latest release build number is %s" % (sys_info.get("os_ver"), sys_info.get("build_num"), api_results["latest_build_number"]["msg"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_max():\n return VERSION_MAX", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')", "def python_build():\n return _sys_version()[4:6]", "def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num", "def osversion():\n return platform()", "def get_os_release():\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"", "def _get_build_os_name():\n system = platform.system()\n if 'Darwin' in system or 'Macintosh' in system:\n return 'darwin-x86'\n\n # TODO: Add more values if needed.\n return 'linux-x86'", "def last_available_os_version(self) -> str:\n return pulumi.get(self, \"last_available_os_version\")", "def systemversionstr():\n return platform.uname().system", "def _get_version(self):\n version = self.job_config.get(\"os_version\")\n if not version:\n version = DEFAULT_OS_VERSION.get(self.os_type)\n\n return str(version)", "def getOsVersion():\n os_version_tuple = platform.mac_ver()[0].split('.')\n return int(os_version_tuple[1])", "def get_chromeos_version():\r\n try:\r\n get_board_property('CHROMEOS_RELEASE_VERSION')\r\n except:\r\n logging.info(\"CHROMEOS_RELEASE_VERSION not found\")\r\n return -1", "def get_host_os_minor(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsMinor', self.handle)", "def var_BUILD_OS(self):\n return _get_build_os_name()", "def get_version():\n # this implementation avoids calling Foundation and will work on\n # non Apple OSes.\n vers = \"UNKNOWN\"\n build = \"\"\n # find the munkilib directory, and the version file\n munkilibdir = os.path.dirname(os.path.abspath(__file__))\n versionfile = os.path.join(munkilibdir, \"version.plist\")\n if os.path.exists(versionfile):\n try:\n vers_plist = readPlist(versionfile)\n except (IOError, OSError, ExpatError):\n pass\n else:\n try:\n vers = vers_plist['CFBundleShortVersionString']\n build = vers_plist['BuildNumber']\n except KeyError:\n pass\n if build:\n vers = vers + \".\" + build\n return vers", "def commcare_minor_release(self):\n return '%d.%d' % self.build_spec.minor_release()", "def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')", "def latestidd():\n pth, _ = run_functions.install_paths(version='8.8.0') # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith('EnergyPlus')]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def getBuild(number):", "def getBuild(number):", "def latestidd():\n pth, _ = run_functions.install_paths(\n version=\"8.8.0\"\n ) # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith(\"EnergyPlus\")]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def version():\n return uname().version", "def version():\n return uname().version", "def platform_version(self) -> Optional[str]:\n return pulumi.get(self, \"platform_version\")", "def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]", "def fpga_minor():\n return int, None", "def find_xcode_major_version():\n cmd = ['xcodebuild', '-version']\n command_trace.log(cmd)\n\n result = str(subprocess.check_output(cmd))\n version = result.split('\\n', 1)[0]\n version = re.sub(r'Xcode ', '', version)\n version = re.sub(r'\\..*', '', version)\n return int(version)", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def current_platform() -> str:\n if sys.platform.startswith('linux'):\n return 'linux'\n elif sys.platform.startswith('darwin'):\n return 'mac'\n elif (sys.platform.startswith('win') or\n sys.platform.startswith('msys') or\n sys.platform.startswith('cyg')):\n if sys.maxsize > 2 ** 31 - 1:\n return 'win64'\n return 'win32'\n else:\n print('Error: DO NOT SUPPORT OS', file=sys.stderr)\n sys.exit(1)", "def __get_best_version(self):\n\t\tif self.length < 32:\n\t\t\treturn 2 # version 2\n\t\telif self.length < 53:\n\t\t\treturn 3 # version 3\n\t\telif self.length < 78:\n\t\t\treturn 4 # version 4\n\t\telif self.length < 106:\n\t\t\treturn 5 # version 5\n\t\telif self.length < 134:\n\t\t\treturn 6 # version 6\n\t\telse:\n\t\t\treturn \"Too long data\"", "def is_release():\n return VERSION[-1]", "def get_version():\n click.echo(get_current_version_number())", "def minor_version(self):\n return self.unpack_dword(0x18)", "def oskernel_isgreater(self, version):\n match = re.search(r\"([0-9.]+)\", self.oskernel())\n if match:\n os_release = match.group(1)\n else:\n return True\n\n for (idx, os_version) in enumerate(os_release.split('.')):\n if idx >= len(version):\n break\n if int(os_version) > int(version[idx]):\n return True\n if int(os_version) < int(version[idx]):\n return False\n\n return True", "def os_release() -> str:\n\tassert(platform.system() == 'Darwin')\n\n\tver, _, _ = platform.mac_ver()\n\n\treturn ver", "def platform():\n if 'OS' in gyp_defines():\n if 'android' in gyp_defines()['OS']:\n return 'android'\n else:\n return gyp_defines()['OS']\n elif IsWindows():\n return 'win'\n elif IsLinux():\n return 'linux'\n else:\n return 'mac'", "def os_version(self) -> Optional[pulumi.Input['WindowsNodeConfigOsVersion']]:\n return pulumi.get(self, \"os_version\")", "def version_number() -> int:\n return 0", "async def osversion(self):\n\n await self.bot.say(box(release(), 'Bash'))", "def test_get_build_number(self):\n pass", "def get_kernel_version():\r\n try:\r\n return utils.run('uname -r').stdout.strip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def get_platform():\r\n platforms = [\r\n \"Android\",\r\n \"Linux.RaspberryPi\",\r\n \"Linux\",\r\n \"XBOX\",\r\n \"Windows\",\r\n \"ATV2\",\r\n \"IOS\",\r\n \"OSX\",\r\n \"Darwin\",\r\n ]\r\n\r\n for platform in platforms:\r\n if xbmc.getCondVisibility('System.Platform.%s' % platform):\r\n return platform\r\n return \"Unknown\"", "def get_min_build_version(version: str) -> str:\n return Version(version).replace(micro=0).get_stable().dumps()", "def operatingsystem_version_minor(self):\n # type: () -> string_types\n return self._operatingsystem_version_minor", "def version_min():\n return VERSION_MIN", "def os_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_version\")", "def get_os_version(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsVersion', self.handle)", "def minor_version(self) -> str:\n return pulumi.get(self, \"minor_version\")", "def get_platform():\n system_name = platform.system()\n if system_name == \"Linux\":\n # Previously we'd use either \"-gnu\" or \"-musl\" indicate which version\n # of libc we were built against. We now default to musl since it\n # reliably works on all platforms.\n return \"unknown-linux-musl\"\n elif system_name == \"Darwin\":\n return \"apple-darwin\"\n else:\n return \"unknown\"", "def get_version(self):\n rows = self.env.db_query(\"\"\"\n SELECT value FROM system WHERE name = %s\n \"\"\", (DB_SYSTEM_KEY,))\n return int(rows[0][0]) if rows else -1", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def last_installed_os_version(self) -> str:\n return pulumi.get(self, \"last_installed_os_version\")", "def get_arch():\n with settings(hide('running', 'stdout')):\n arch = run('uname -m')\n return arch", "def find_latest_version(versions):\n\n highest_version = 0\n for version in versions:\n version = parse_version(version)\n\n if version > highest_version:\n highest_version = version\n\n return highest_version", "def get_version():\n return 1", "def max_version(*versions):\n max_version = max([StrictVersion(x) for x in versions])\n return str(max_version)", "def verify_Version_buildNumber():\r\n msg, flag = \"\", False\r\n try:\r\n 'Getting Build number for IOS '\r\n if g.platform == 'ios':\r\n flag1, msg1 = verify_ios_versionNumber()\r\n msg += msg1\r\n flag2, msg2 = verify_ios_buildNumber()\r\n msg += msg2\r\n 'go back'\r\n flag3=ui_controls.image(get_obj_identifier('about_back_btn'))\r\n print 'cliked on back button'\r\n flag = False if not (flag1 and flag2 and flag3) else True\r\n else:\r\n text_view = ui_controls.text_view(get_obj_identifier('about_buildVersion_lbl'))\r\n \r\n if text_view.strip() == g.android_version_no.strip():\r\n \r\n print \"Version and Build number matched. Expected : %s. Actual : %s\" % (g.android_version_no, text_view.strip())\r\n flag = True \r\n else:\r\n \r\n print \"Version and Build number does not match. Expected : %s. Actual : %s\" % (g.android_version_no, text_view.strip())\r\n flag1=ui_controls.back_button()\r\n \r\n flag = False if not (flag1) else True\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return flag, msg", "def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)", "def get_arch():\n arch = platform.machine()\n if arch == \"i686\":\n return \"i686\"\n elif arch == \"x86_64\":\n return \"x86_64\"\n elif arch == \"aarch64\":\n return \"aarch64\"\n else:\n return \"unknown\"", "def get_most_used_os(records):\n systems = {}\n for r in records:\n systems[r.os] = systems.get(r.os, 0) + 1\n max_req = 0\n max_system = None\n for k, v in systems.items():\n if v > max_req:\n max_req, max_source = v, k\n return max_system", "def platform_num(self) -> str:\n return pulumi.get(self, \"platform_num\")", "def get_host_os_sub_minor(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsSubMinor', self.handle)", "def latest_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"latest_version_number\")", "def get_version_for(self,platform,version):\n def supports_platform(test_platforms):\n if test_platforms.upper() in ['ALL','ANY']:\n platforms = PLATFORMS\n else:\n platforms = test_platforms.split(':')\n return platform in platforms\n\n # Minimal required version check (for mainline releases)\n if self.min_versions:\n base_version = '.'.join(version.split('.')[:2])\n for base_min_version, min_version in (('.'.join(x.split('.')[:2]),x)\n for x in self.min_versions.split(';')):\n if compare_versions(base_version,base_min_version) == 0:\n if compare_versions(version,min_version) < 0:\n return None\n # Find the suitable test version\n candidate = '0'\n test = None\n for t in (t for t in self.versions if supports_platform(t.platform)):\n if compare_versions(version,t.firebird_version) >= 0:\n if compare_versions(candidate,t.firebird_version) < 0:\n candidate = t.firebird_version\n test = t\n return test", "def get_ostag() -> str:\n if sys.platform.startswith(\"linux\"):\n return \"linux\"\n elif sys.platform.startswith(\"win\"):\n return \"win\" + (\"64\" if sys.maxsize > 2**32 else \"32\")\n elif sys.platform.startswith(\"darwin\"):\n return \"mac\"\n raise ValueError(f\"platform {sys.platform!r} not supported\")", "def get_platform():\n platforms = [\n \"Android\",\n \"Linux.RaspberryPi\",\n \"Linux\",\n \"XBOX\",\n \"Windows\",\n \"ATV2\",\n \"IOS\",\n \"OSX\",\n \"Darwin\",\n ]\n\n for platform in platforms:\n if xbmc.getCondVisibility('System.Platform.'+platform):\n return platform\n return \"Unknown\"", "def detect_centos_release(self) -> str:\n return run('cat /etc/redhat-release').split()[-2][0]", "def get_version():\n version = \"unknown\"\n try:\n version_file = open(VERSIONFILE, \"r\")\n for line in version_file:\n if line.startswith('__version__'):\n version = line.split(\"'\")[1]\n break\n except EnvironmentError:\n pass # Okay, there is no version file.\n return version", "def known_os_type():\n return 'Linux'", "def get_version():\n return '%d.%d.%d' % version_info", "def system_info() -> str:\n return \"\\n\".join(\n [\n f\"Python version: {platform.python_version()}\",\n f\"Python implementation: {platform.python_implementation()}\",\n f\"Python compiler: {platform.python_compiler()}\",\n f\"PyTorch version: {torch.__version__}\",\n f\"System: {platform.system() or 'Unable to determine'}\",\n f\"System version: {platform.release() or 'Unable to determine'}\",\n f\"Processor: {platform.processor() or 'Unable to determine'}\",\n f\"Number of CPUs: {multiprocessing.cpu_count()}\",\n ]\n )", "def architecture():\n if is_darwin:\n # Darwin's platform.architecture() is buggy and always\n # returns \"64bit\" event for the 32bit version of Python's\n # universal binary. So we roll out our own (that works\n # on Darwin).\n if sys.maxsize > 2 ** 32:\n return '64bit'\n else:\n return '32bit'\n else:\n return platform.architecture()[0]", "def get_version(cls) -> str:\n if not cls.is_available():\n return 'None'\n else:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('ncnn').version\n except Exception:\n return 'None'", "def get_most_complete_version(versions):\n if not versions:\n return\n\n return max(versions)", "def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()", "def hardware_version(self) -> str:\n return self.camera_info[\"main_hw_version\"]", "def build_number(self):\n return self.get_data(\"build_number\")", "def getLibVersion():\n return \"Software Development Library for Linux 1.999.1\"", "def os_version(self):\n return self._os_version", "def get_machine_version():\n return get_file_content(\"/home/pi/.machineconfig/latest_version\")", "def fpga_major():\n return int, None", "def version():\n from MotionWise.log_proc import __version__ as log_ver\n from MotionWise.pm_measurement import __version__ as pm_ver\n from MotionWise.MotionWise_perf_proxy import __version__ as proxy_ver \n from MotionWise.MotionWise_perf_client import __version__ as client_ver \n \n ver = \"$Revision: 80204 $\".split()[1]\n batch = max([ pm_instrument.version().split('.')[-1], log_ver\n , ver, pm_ver, proxy_ver, client_ver, FP.__version__])\n return \"3.0.{}\".format(batch)", "def get_current_kernel_arch():\r\n try:\r\n return os.popen('uname -m').read().rstrip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch", "def get_osversion(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetOSVersion', self.handle)", "def os_version(self):\n version_data = self._raw_version_data()\n if self._os_version is None:\n self._os_version = version_data[\"version\"]\n\n return self._os_version", "def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]", "def operating_system_version(self):\n return self._operating_system_version", "def get_os_version(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']\n return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n for disk in compute.instances().get(instance=instance.name,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n if not disk.get('boot'):\n continue\n for value in disk.get('licenses', []):\n if '1604' in value:\n return '16.04'\n if '1404' in value:\n return '14.04'\n return '14.04'\n return '14.04'", "def get_version():\n return magpy.get_version()", "def armory_version_tbi():\n try:\n expected_armory_version = subprocess.check_output(\n \"python setup.py --version\".split(\" \")\n )\n except subprocess.CalledProcessError:\n print(\"armory .git not avaiable...trying armory\")\n expected_armory_version = subprocess.check_output(\"armory version\".split(\" \"))\n expected_armory_version = expected_armory_version.decode(\"utf-8\")\n expected_armory_version = expected_armory_version.replace(\"\\n\", \"\").strip()\n return expected_armory_version", "def test_osx_version_number_value(self):\n \n running_version_number = get_osx_version()[0]\n \n # Check to make sure the returned valued is 10.11.1\n self.assertEqual(running_version_number, '10.11.1')", "def build_api_version(self):\n return self._build_api_version", "def software_version(self) -> str:\n return self.camera_info[\"main_sw_version\"]", "def runtime_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"runtime_version\")", "def get_build_line(latest_build):\n proc = Popen([\"osg-koji\", \"buildinfo\", latest_build],\n stdout=PIPE)\n build_line = proc.stdout.readline().decode(\"latin-1\").strip()\n ret = proc.wait()\n if ret != 0 or not build_line:\n return\n return build_line", "def android_version(self):\n if not self._android_version:\n # Get the Android version from the connected device\n cmd = [\"getprop\", \"ro.build.version.release\"]\n # TODO: surround with try/except?\n tmp = subprocess.check_output(self.shell + cmd).decode()\n self._android_version = tmp.strip('\\r\\n')\n return self._android_version", "def get_python_version() -> str:\n return \"{} {} on {}\".format(\n platform.python_implementation(),\n platform.python_version(),\n platform.system(),\n )", "def software_versions():\n\n quiet = 1\n versions = collections.OrderedDict()\n for package in ['python', 'python3', 'robot', 'firefox', 'google-chrome']:\n # Note: \"robot --version\" returns 0x00000000000000fb.\n # Note: If package does not exist, 0x7f is returned.\n rc, version = gc.shell_cmd(package + \" --version\",\n valid_rcs=[0, 0x7f, 0xfb])\n versions[package] = \"Not installed\" if rc == 0x7f else version.rstrip('\\n')\n\n versions.update(import_versions)\n\n for package in ['robotframework-angularjs', 'robotframework-scplibrary',\n 'robotframework-extendedselenium2library']:\n rc, version = gc.shell_cmd(\"pip3 show \" + package\n + \" | grep Version | sed -re 's/.*: //g'\")\n versions[package] = \"Not installed\" if not version else version.rstrip('\\n')\n\n rc, version = gc.shell_cmd(\"lsb_release -d -s\")\n versions[\"host OS\"] = \"Failed\" if not version else version.rstrip('\\n')\n return versions" ]
[ "0.74187696", "0.71662873", "0.7020225", "0.70104754", "0.6995423", "0.6940619", "0.67905074", "0.6742232", "0.66234714", "0.6523554", "0.65176624", "0.6482276", "0.6474209", "0.6407805", "0.6395835", "0.63706833", "0.63454497", "0.6344706", "0.6331214", "0.6328722", "0.6328722", "0.6296596", "0.6291574", "0.6291574", "0.62871385", "0.6262032", "0.6254447", "0.62434715", "0.620778", "0.6196568", "0.618329", "0.61732703", "0.6156282", "0.61548465", "0.6152011", "0.6151375", "0.6148373", "0.6145763", "0.612742", "0.6126294", "0.6080155", "0.6066607", "0.6066499", "0.6065789", "0.6064409", "0.60623705", "0.60442936", "0.60408276", "0.60398656", "0.6035782", "0.6024395", "0.60222846", "0.6018895", "0.60146165", "0.6001575", "0.5999244", "0.5996828", "0.5994178", "0.5984417", "0.59701186", "0.59677064", "0.5962443", "0.59619147", "0.59424245", "0.594076", "0.5935566", "0.5935058", "0.593112", "0.59253865", "0.5924414", "0.5921864", "0.59215283", "0.59078044", "0.58931196", "0.5890521", "0.58877164", "0.5873153", "0.58649325", "0.5864129", "0.5855189", "0.5848561", "0.5848333", "0.58452094", "0.5831339", "0.58247507", "0.58224225", "0.5821464", "0.58195794", "0.5813126", "0.58069134", "0.57957375", "0.5790588", "0.5781717", "0.57813483", "0.57803655", "0.57794374", "0.5776672", "0.5770394", "0.576204", "0.5761818" ]
0.7350579
1
Given your major OS version are you running the latest minor patch?
def check_os_up_to_date(self, sys_info, api_results): if not api_results.get("latest_os_version"): self.results[self.current_endpoint]["latest_os_version"] = self.__make_api_get( '/apple/latest_os_version/%s' % (".".join(sys_info.get("os_ver").split(".")[:2]))) self.message("\n\tUp-to-date OS check:") # Validate response from API if self._validate_response(api_results["latest_os_version"]): # Valid response from API - now interpret it my_os_ver_str = sys_info.get("os_ver").split(".") my_os_ver_num = int( "%s%s%s" % (my_os_ver_str[0], my_os_ver_str[1], my_os_ver_str[2])) api_os_ver_str = api_results["latest_os_version"]["msg"].split(".") api_os_ver_num = int( "%s%s%s" % (api_os_ver_str[0], api_os_ver_str[1], api_os_ver_str[2])) # if sys_info.get("os_ver") != # api_results["latest_os_version"]["msg"]: if my_os_ver_num < api_os_ver_num: self.message( "\t\t[-] ATTENTION - You are NOT running the most up to date version of the OS. Your OS version is %s, the latest versions is %s" % (sys_info.get("os_ver"), api_results["latest_os_version"]["msg"])) elif my_os_ver_num > api_os_ver_num: self.message( "\t\t[!] ATTENTION - It looks like you might be running a development OS build %s. The EFIgy API currently only has reliable data for production OS releases." % (sys_info.get("os_ver"))) else: self.message( "\t\t[+] SUCCESS - You are running the latest major/minor/micro version of the OS you have installed (%s)" % (sys_info.get("os_ver")))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def osversion():\n return platform()", "def minor_version(self):\n return self.unpack_dword(0x18)", "def operatingsystem_version_minor(self):\n # type: () -> string_types\n return self._operatingsystem_version_minor", "def get_host_os_minor(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsMinor', self.handle)", "def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)", "def fpga_minor():\n return int, None", "def test_minor(self):\n self.assertEqual(1, self._version1.minor())\n self.assertEqual(3, self._version2.minor())", "def commcare_minor_release(self):\n return '%d.%d' % self.build_spec.minor_release()", "def is_new_osx():\n name = distutils.util.get_platform()\n if sys.platform != \"darwin\":\n return False\n elif name.startswith(\"macosx-10\"):\n minor_version = int(name.split(\"-\")[1].split(\".\")[1])\n if minor_version >= 7:\n return True\n else:\n return False\n else:\n return False", "def get_version():\n return 1", "def test_major(self):\n self.assertEqual(\"0\", self._version1.major())\n self.assertEqual(\"1.2\", self._version2.major())", "def minor_version(self) -> str:\n return pulumi.get(self, \"minor_version\")", "def python_build():\n return _sys_version()[4:6]", "def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value", "def systemversionstr():\n return platform.uname().system", "def version_max():\n return VERSION_MAX", "def version_minor(self):\n assert self._version_patch != NotImplemented\n return self._version_patch", "def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def get_host_os_sub_minor(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsSubMinor', self.handle)", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def operatingsystem_version_major(self):\n # type: () -> string_types\n return self._operatingsystem_version_major", "def version_min():\n return VERSION_MIN", "def version():\n from MotionWise.log_proc import __version__ as log_ver\n from MotionWise.pm_measurement import __version__ as pm_ver\n from MotionWise.MotionWise_perf_proxy import __version__ as proxy_ver \n from MotionWise.MotionWise_perf_client import __version__ as client_ver \n \n ver = \"$Revision: 80204 $\".split()[1]\n batch = max([ pm_instrument.version().split('.')[-1], log_ver\n , ver, pm_ver, proxy_ver, client_ver, FP.__version__])\n return \"3.0.{}\".format(batch)", "def version():\n\n pass", "def _major_version(self):\n version_tuple = StrictVersion(self.plugin.version).version\n major = '.'.join(map(str, version_tuple[:2]))\n\n return major", "def version():\n return uname().version", "def version():\n return uname().version", "def getOsVersion():\n os_version_tuple = platform.mac_ver()[0].split('.')\n return int(os_version_tuple[1])", "def major_version(self):\n return self.unpack_dword(0x14)", "def version_number() -> int:\n return 0", "def major_version(self) -> str:\n return pulumi.get(self, \"major_version\")", "def fpga_major():\n return int, None", "def get_ilo_firmware_version_as_major_minor(self):\n try:\n manager, reset_uri = self._get_ilo_details()\n ilo_fw_ver_str = (\n manager['Oem']['Hp']['Firmware']['Current']['VersionString']\n )\n return common.get_major_minor(ilo_fw_ver_str)\n except Exception:\n return None", "def get_version():\n return '%d.%d.%d' % version_info", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def _get_version(self):", "def _getVersionMinor(self):\n return int(self.model.getroot().attrib['versionMinor'])", "def browser_version_minor(self):\n # type: () -> string_types\n return self._browser_version_minor", "def get_minor_device_class(self) -> str:\n major = self.get_major_device_class()\n match major:\n case \"computer\":\n flag = mod.MinorComputerClass(self.minorDeviceClass())\n return MINOR_COMPUTER_CLASS.inverse[flag]\n case \"phone\":\n flag = mod.MinorPhoneClass(self.minorDeviceClass())\n return MINOR_PHONE_CLASS.inverse[flag]\n case \"network\":\n flag = mod.MinorNetworkClass(self.minorDeviceClass())\n return MINOR_NETWORK_CLASS.inverse[flag]\n case \"audio_video\":\n flag = mod.MinorAudioVideoClass(self.minorDeviceClass())\n return MINOR_AUDIO_VIDEO_CLASS.inverse[flag]\n case \"peripheral\":\n flag = mod.MinorPeripheralClass(self.minorDeviceClass())\n return MINOR_PERIPHERAL_CLASS.inverse[flag]\n case \"imaging\":\n flag = mod.MinorImagingClass(self.minorDeviceClass())\n return MINOR_IMAGING_CLASS.inverse[flag]\n case \"wearable\":\n flag = mod.MinorWearableClass(self.minorDeviceClass())\n return MINOR_WEARABLE_CLASS.inverse[flag]\n case \"toy\":\n flag = mod.MinorToyClass(self.minorDeviceClass())\n return MINOR_TOY_CLASS.inverse[flag]\n case \"health\":\n flag = mod.MinorHealthClass(self.minorDeviceClass())\n return MINOR_HEALTH_CLASS.inverse[flag]\n case \"uncategorized\":\n return \"misc\"\n case _:\n raise ValueError(major)", "def test_python_version(self):\n assert 2 == sys.version_info.major\n assert 7 == sys.version_info.minor\n assert 6 <= sys.version_info.micro", "def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)", "def getwindowsversion(): # real signature unknown; restored from __doc__\n pass", "def test_minor(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[1] = int(new_version_parts[1]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]", "def hardware_version(self):\n version = self._dll.JLINKARM_GetHardwareVersion()\n major = version / 10000 % 100\n minor = version / 100 % 100\n return '%d.%02d' % (major, minor)", "def _platform_compatible():\r\n raise NotImplementedError", "async def osversion(self):\n\n await self.bot.say(box(release(), 'Bash'))", "def is_release():\n return VERSION[-1]", "def python_branch():\n\n return _sys_version()[2]", "def _getMajorMinorVersion( self, sVersion ):\n\n\t\ttry:\n\t\t\trgs = sVersion.split( '.' )\n\t\t\tif len( rgs ) == 2:\n\t\t\t\treturn sVersion\n\n\t\t\treturn rgs[ 0 ] + '.' + rgs[ 1 ]\n\n\t\texcept Exception, e:\n\t\t\terrMsg( 'error getting major.minor version' )\n\t\t\terrMsg( e )\n\t\t\treturn ''", "def get_major_version(version):\n return str(check_version(version)[0])", "def get_version(self):\n return 0", "def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")", "def version():\n print(\"Code writen for Python3.6.4. You're using python version:\")\n print(platform.python_version())", "def version():\n\n print(VERSION_CODE)", "def latestidd():\n pth, _ = run_functions.install_paths(version='8.8.0') # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith('EnergyPlus')]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def latestidd():\n pth, _ = run_functions.install_paths(\n version=\"8.8.0\"\n ) # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith(\"EnergyPlus\")]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def test_get_next_version_MAJOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def usefulFunction():\n print(platform.uname()) # Yay it told me about my computer - no idea what it means but thats cool", "def major_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"major_version\")", "def detect_centos_release(self) -> str:\n return run('cat /etc/redhat-release').split()[-2][0]", "def major_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"major_version\")", "def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions", "def get_host_os_major(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsMajor', self.handle)", "def get_os_release():\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"", "def get_version():\n return magpy.get_version()", "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def version():\n return '%d.%d' % (sys.version_info[0], sys.version_info[1])", "def get_kernel_version(level=2):\n linux_version = platform.release()\n actual_level = 0\n for idx, char in enumerate(linux_version):\n if char == \".\":\n actual_level += 1\n if actual_level > level or (not char.isdigit() and char != \".\"):\n linux_version = linux_version[0:idx]\n break\n return linux_version", "def good_py_version() -> bool:\n return sys.version_info.major >= 3 and sys.version_info.minor >= 6", "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v", "def platform_version(self) -> Optional[str]:\n return pulumi.get(self, \"platform_version\")", "def _GetSystemVersion(self, component, info):\n # Check if we are on mario, then we need to use the legacy parser\n if self.ChromeOSBoard() == 'x86-mario':\n return self._GetSystemVersionMario(component, info)\n items = info.strip().splitlines()\n # This is going to give us a list of lines, we are looking for the\n # following ones:\n # BIOS version: board.xx.xx.xxx.xxx.xx\n # EC version: foobar\n for line in items:\n line_components = line.split(':')\n # The line we are looking for has at least 2 items\n if len(line_components) >= 2 and line_components[0] == component:\n return line_components[1].strip()\n self.fail('Could not locate the following item %s in the return value '\n 'of chromeos-firmwareupdate.' % component)", "def oskernel_isgreater(self, version):\n match = re.search(r\"([0-9.]+)\", self.oskernel())\n if match:\n os_release = match.group(1)\n else:\n return True\n\n for (idx, os_version) in enumerate(os_release.split('.')):\n if idx >= len(version):\n break\n if int(os_version) > int(version[idx]):\n return True\n if int(os_version) < int(version[idx]):\n return False\n\n return True", "def compatible_version(self):\n note_version = self.py_version\n py_version = sys.version_info\n if note_version[0] != py_version[0]:\n return False\n if len(note_version) > 1 and note_version[1] > py_version[1]:\n return False\n return True", "def platform():\n return ['linux']", "def known_os_type():\n return 'Linux'", "def usefulFunction():\n print(platform.uname()) #displayed this computer's specifications", "def hardware_version(self) -> str:\n return self.camera_info[\"main_hw_version\"]", "def version(self):", "def version_major(self):\n assert self._version_major != NotImplemented\n return self._version_major", "def major_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"major_version\")", "def _getVersionMajor(self):\n return int(self.model.getroot().attrib['versionMajor'])", "def os_release() -> str:\n\tassert(platform.system() == 'Darwin')\n\n\tver, _, _ = platform.mac_ver()\n\n\treturn ver", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def get_version():\n click.echo(get_current_version_number())", "def test_get_next_version_PATCH99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, 100): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, 99)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def getLibVersion():\n return \"Software Development Library for Linux 1.999.1\"", "def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def Version(self) -> _n_0_t_12:", "def Version(self) -> _n_0_t_12:", "def browser_version_major(self):\n # type: () -> string_types\n return self._browser_version_major", "def version_major_minor(version_string):\n return '.'.join(version_string.split('.')[0:2])", "def get_version():\n # this implementation avoids calling Foundation and will work on\n # non Apple OSes.\n vers = \"UNKNOWN\"\n build = \"\"\n # find the munkilib directory, and the version file\n munkilibdir = os.path.dirname(os.path.abspath(__file__))\n versionfile = os.path.join(munkilibdir, \"version.plist\")\n if os.path.exists(versionfile):\n try:\n vers_plist = readPlist(versionfile)\n except (IOError, OSError, ExpatError):\n pass\n else:\n try:\n vers = vers_plist['CFBundleShortVersionString']\n build = vers_plist['BuildNumber']\n except KeyError:\n pass\n if build:\n vers = vers + \".\" + build\n return vers", "def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def getversion():\n major_ = ctypes.c_int32()\n minor_ = ctypes.c_int32()\n revision_ = ctypes.c_int32()\n res = __library__.MSK_XX_getversion(ctypes.byref(major_),ctypes.byref(minor_),ctypes.byref(revision_))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n major_ = major_.value\n _major_return_value = major_\n minor_ = minor_.value\n _minor_return_value = minor_\n revision_ = revision_.value\n _revision_return_value = revision_\n return (_major_return_value,_minor_return_value,_revision_return_value)", "def get_kernel_version():\r\n try:\r\n return utils.run('uname -r').stdout.strip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def test_get_next_version_MINOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, 100, MAJOR, 100, 0): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, 99, 0)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)" ]
[ "0.72008204", "0.68455917", "0.6837146", "0.679873", "0.6781404", "0.67812794", "0.675668", "0.6753822", "0.6739434", "0.6729564", "0.6697316", "0.6688061", "0.65758175", "0.6563506", "0.65488297", "0.6537565", "0.64876926", "0.6484937", "0.64745766", "0.64729935", "0.64725363", "0.646655", "0.6464266", "0.6447704", "0.6432709", "0.6399195", "0.63818485", "0.63818485", "0.63811713", "0.63781136", "0.6372123", "0.63589406", "0.6354855", "0.6347272", "0.63081735", "0.6300954", "0.6293722", "0.62814415", "0.6257304", "0.62490284", "0.624426", "0.61915624", "0.61860526", "0.6179795", "0.61691654", "0.6158445", "0.61548984", "0.6146418", "0.61384916", "0.61365795", "0.613356", "0.61315477", "0.61291414", "0.6127702", "0.6114161", "0.6110736", "0.6109663", "0.6102389", "0.609688", "0.609252", "0.60891104", "0.60858685", "0.6085831", "0.6081573", "0.608099", "0.6073533", "0.60666096", "0.6065041", "0.60628206", "0.6052041", "0.60510224", "0.60413796", "0.6035168", "0.6022512", "0.60112196", "0.6008079", "0.6004199", "0.59889406", "0.598881", "0.5983728", "0.5976597", "0.59697133", "0.59691495", "0.5968303", "0.59615993", "0.59600616", "0.5958068", "0.5955718", "0.5946205", "0.59427524", "0.59427524", "0.5940655", "0.5940655", "0.594045", "0.5930546", "0.5919933", "0.5915063", "0.5906401", "0.59056723", "0.58993745", "0.5893569" ]
0.0
-1
Does it look like this mac model is still receiving EFI firmware updates?
def check_fw_being_updated(self, sys_info, api_results): if not api_results.get("efi_updates_released"): # Call the API to see what the latest version of EFI you are # expected to be running given OS ver and mac model self.results[ self.current_endpoint]["efi_updates_released"] = self.__make_api_get( '/apple/no_firmware_updates_released/%s' % (sys_info.get("hw_ver"))) # Validate response from API if self._validate_response(api_results["efi_updates_released"]): # Check to see if this is a model that has seen any EFI firmware # updates if api_results["efi_updates_released"]["msg"] is False: self.message("\n\tEFI firmware version check:") self.message( "\t\t[-]ATTENTION - Your Mac model (%s) is older than the models Apple currently provides updates for, EFIgy has no data for it." % (sys_info.get("hw_ver"))) return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_system_versions(self):\n # Get Mac model ID\n self.hw_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"model\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n if \"imacpro\" in self.hw_version.lower():\n # iMac Pro stores it's EFI data different due it's new architecture\n # so grab the EFI & SMC ROM versions appropriately\n raw_efi_list = []\n raw_rom_info = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"apple-rom-info\",\n None,\n 0))\n for data in raw_rom_info.split(\"\\n\"):\n if data.strip().startswith(\"BIOS ID\"):\n raw_efi_list = data.split(\":\")[1].strip().split(\".\")\n break\n else:\n self.message(\n \"[-] Could not find raw EFI data to determine EFI versions. Exiting....\")\n return False\n\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi_list[0], raw_efi_list[2], raw_efi_list[3])\n # Can't currently find the SMC version like this on imac pros ....\n # self.smc_version = str(IORegistryEntryCreateCFProperty(IOServiceGetMatchingService(0, IOServiceMatching(\"AppleSMC\")), \"smc-version\", None, 0))\n self.smc_version = \"\"\n else:\n # EFI & SMC ROM versions\n self.smc_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"AppleSMC\")),\n \"smc-version\",\n None,\n 0))\n raw_efi = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"version\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\").split(\".\")\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi[0], raw_efi[2], raw_efi[3])\n\n # Set the salt to be the MAC address of the system, using the MAC as a salt in this manner\n # helps ensure that the hashed sysuuid is pseudonymous. We don't want to know the sysuuid's\n # value, but we do want it to be unique however. The Salt value is\n # never submitted to the API\n salt = hex(getnode())\n sys_uuid = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"IOPlatformUUID\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n self.h_sys_uuid = hashlib.sha256(salt + sys_uuid).hexdigest()\n\n # Get the Board-ID, this is how EFI files are matched to running\n # hardware - Nastee\n self.board_id = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"board-id\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n # Get OS version\n self.os_version = commands.getoutput(\"sw_vers -productVersion\")\n\n # Get build number\n self.build_num = commands.getoutput(\"sw_vers -buildVersion\")\n\n # Carve out the major version as we use this a bunch\n # self.os_maj_ver = \".\".join(self.os_version.split(\".\")[:2])\n\n # Add gathered info to the dictionary to query the API with\n self.endpoints_to_check[\"127.0.0.1\"] = {\n \"hashed_uuid\": self.h_sys_uuid,\n \"hw_ver\": self.hw_version,\n \"rom_ver\": self.efi_version,\n \"smc_ver\": self.smc_version,\n \"board_id\": self.board_id,\n \"os_ver\": self.os_version,\n \"build_num\": self.build_num}\n\n return True", "def IsRetiredMac(self, serial):\n return False", "def _is_boot_mode_uefi(self):\n boot_mode = self.get_current_boot_mode()\n if boot_mode == 'UEFI':\n return True\n else:\n return False", "def test_firmware_version(self):\n self._verify_firmware_version()", "def has_efi():\n return os.path.exists(\"/sys/firmware/efi\")", "def is_old_firmware():\n # Read firmware version from runt.\n fw_version = get_runt(PROP_FW_VERSION)\n\n # Compare firmware year and month with old versions.\n year = int(fw_version.split(\".\")[0])\n month = int(fw_version.split(\".\")[1])\n if year < OLD_FW_YEAR:\n return True\n if year == OLD_FW_YEAR:\n if month < OLD_FW_MONTH:\n return True\n\n return False", "def update_firmware(self) -> str:", "def _verify_firmware_version(self):\n firmware_version = self.device.firmware_version\n self.assertTrue(firmware_version)\n self.assertIsInstance(firmware_version, str)", "def check_fw_versions(self, sys_info, api_results):\n if not api_results.get(\"latest_efi_version\"):\n # Call the API to see what the latest version of EFI you are\n # expected to be running given OS ver and mac model\n api_results[\n self.current_endpoint][\"latest_efi_version\"] = self.__make_api_get(\n '/apple/latest_efi_firmware/%s/%s' %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\")))\n\n self.message(\"\\n\\tEFI firmware version check:\")\n\n # Validate response from API\n if self._validate_response(api_results[\"latest_efi_version\"]):\n # Valid response from API - now interpret it\n\n # This is kind messy but it's so as we can detect newer and older firmware and message accordingly rather than just looking for 'different' versions\n # the way that EFI versions are denoted by Apple makes this more of\n # a pain thatit really needs to be quite honestly\n api_efi_str = api_results[\"latest_efi_version\"][\"msg\"].split(\".\")\n my_efi_str = sys_info.get(\"rom_ver\").split(\".\")\n\n api_efi_ver = int(api_efi_str[1], 16)\n api_efi_build = int(api_efi_str[2].replace(\"B\", \"\"), 16)\n\n if all([x.isdigit() for x in my_efi_str]):\n # Newer EFI versions do not include a build number\n # or the Mac model code. The output will be something\n # like 256.0.0, whereas with the old format it would\n # be MBP133.0256.B00.\n my_efi_ver = int(my_efi_str[0], 16)\n my_efi_build = 0\n else:\n my_efi_ver = int(my_efi_str[1], 16)\n my_efi_build = int(my_efi_str[2].replace(\"B\", \"\"), 16)\n\n if api_efi_str == my_efi_str:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n elif my_efi_ver == api_efi_ver and my_efi_build == api_efi_build:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n\n elif (my_efi_ver > api_efi_ver) or (my_efi_ver > api_efi_ver and my_efi_build > api_efi_build) or (my_efi_ver == api_efi_ver and my_efi_build > api_efi_build):\n # Looks like you're running a beta or a dev build - pretty much\n # all bets are off here as the dataset doens't cover dev builds\n # but a nicer message makes sense\n self.message(\n \"\\t\\t[!] ATTENTION - It looks like your EFI version (%s) is NEWER than the latest production release that is in the dataset (%s). This is most likely because you are now, or have in the past, installed a developer preview OS and as part of that you also had newer EFI firmware installed. The EFIgy API currently only has reliable data for production OS releases.\" %\n (sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))\n\n else:\n self.message(\n \"\\t\\t[-] ATTENTION - You are running an unexpected firmware version given the model of your system (%s) and OS build you have installed (%s). Your firmware is %s, the firmware we expected to see is %s.\\n\" %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))", "def firmware_newer(self):\n if self.firmware_outdated():\n return False\n return self.firmware_version != self.compatible_firmware_version", "def check_device_state(self):", "def test_update_bios_boot_mode(self):\n pass", "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def test_patch_bios_boot_mode(self):\n pass", "def is_booted(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def firmware_outdated(self):\n datefmt = ' %b %d %Y %H:%M:%S'\n\n compat_date = self.compatible_firmware_version.split('compiled')[1]\n compat_date = datetime.datetime.strptime(compat_date, datefmt)\n\n fw_date = self.firmware_version.split('compiled')[1]\n fw_date = datetime.datetime.strptime(fw_date, datefmt)\n return (compat_date > fw_date)", "def erase_sfp_info(self):\n if(uart.SerialTxEsc(self.device)):\n return 1\n uart_read = uart.SerialTx(self.device,\"sfp erase\")\n return 0", "def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()", "def get_lvfs_detached_signature():\n url = \"https://cdn.fwupd.org/downloads/firmware.xml.gz.asc\"\n ua_string = \"fwupd/1.4.1\"\n r = requests.get(url, headers={\"User-Agent\": ua_string})\n return r.text", "def check_fw_mode(self, cat_cpuinfo_out):\n for line in cat_cpuinfo_out.splitlines():\n if \"firmware\" in line:\n if \"OPAL\" in line:\n return True\n else:\n return False\n return False", "def invalidate_firmware(self):\n self.exec_command('InvalidateFW')\n return None", "def get_firmware_update_status(self):\n\n response = self.execute_command(CMD_GET_FIRMWARE_UPDATE_STATUS)[0]\n inprogress = (response & 0x80) == 0x80\n return {\n \"inprogress\": inprogress,\n \"error\": response & 0x7f,\n }", "def test_firmware_update_status(self):\n status = FirmwareUpdateStatus(FirmwareUpdateStatusType.INSTALLING)\n expected_topic = (\n self.factory.common_topic + WAPMF.FIRMWARE_UPDATE_STATUS\n )\n\n expected_payload = json.dumps({\"status\": status.status.value})\n expected_message = Message(expected_topic, expected_payload)\n serialized_message = self.factory.make_from_firmware_update_status(\n status\n )\n\n self.assertEqual(expected_message, serialized_message)", "def test_verify_state_of_a_device():", "async def get_firmware_version(self):\n current_time = time.time()\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n elapsed_time = time.time()\n if elapsed_time - current_time > 2:\n return None\n await asyncio.sleep(self.sleep_tune)\n reply = ''\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)", "async def get_firmware_version(self):\n current_time = time.time()\n #logstring(\"setting current time {}\".format(current_time))\n #logstring(\"1\")\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n #logstring(\"2\")\n #logstring(\"checking time now 1 {}\".format(time.time()))\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n #logstring(\"checking time now 2 {}\".format(time.time()))\n #logstring(\"3\")\n if self.serial_port.IsPortOpen == False:\n #logstring(\"Looks like that port wasn't working!!!!!!!!!!!!!????\")\n return None\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n #logstring(\"4\")\n elapsed_time = time.time()\n #logstring(\"setting elapsed time {}\".format(elapsed_time))\n #logstring(\"5\")\n if elapsed_time - current_time > 3:\n #logstring(\"really took too long: {} {} {}\".format(elapsed_time, current_time, elapsed_time - current_time))\n return None\n #logstring(\"7\")\n if self.serial_port.IsPortOpen == False:\n #logstring(\"Looks like that port wasn't working!!!!!!!!!!!!!\")\n return None\n await asyncio.sleep(self.sleep_tune)\n #logstring(\"8\")\n #logstring(\"Geez, that took: {} {} {} ??????????????????\".format(elapsed_time, current_time, elapsed_time - current_time))\n\n reply = ''\n #logstring(\"9\")\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n #logstring(\"10\")\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)", "def update_firmware(self):\n return self._dll.JLINKARM_UpdateFirmwareIfNewer()", "def test_device_status(self):\n #071031031E3067\n self.ms.add_response({'\\x14071031031E3067\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.status((49, 3))\n self.assertTrue(response)", "def is_bootable(self):\n return self.bootable_flag == 0x80", "def is_boot_code_present(self):\n\n\t\treturn struct.unpack('<H', self.boot_sector_data[0 : 2])[0] != 0 and struct.unpack('<H', self.boot_sector_data[510 : 512])[0] == 0xAA55", "def _get_new_data_status(self):\n readValue = self._read_register_1ubyte(self.BME680_EAS_STATUS_0)\n\n return True if ((readValue & 0b10000000) == 0b10000000) else False", "def hw_version(self) -> str | None:\n return self.status.get(\"FIRMWARE\")", "def test_create_hyperflex_server_firmware_version(self):\n pass", "def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode", "def get_manufacturer_bytes(self):\n manufacturer = self._manufacturer.upper()\n id = ((ord(manufacturer[0]) - 64) * 32 * 32 +\n (ord(manufacturer[1]) - 64) * 32 +\n (ord(manufacturer[2]) - 64))\n if 0x0421 <= id <= 0x6b5a:\n return self.manufacturer_encode(id, 2)\n return False", "def print_firmata_status(self):\n print(f\"pyFirmata version: {pyfirmata.__version__}\")\n print(f\"Hardware: {self.board.__str__()}\")\n print( \"Firmata firmware: {major}.{minor}\".format(\n major = self.board.get_firmata_version()[0],\n minor = self.board.get_firmata_version()[1]\n ))", "def pilotIsBootValid (self):\n return self.isBootValid()", "def test_get_bios_boot_mode_by_moid(self):\n pass", "def system_valid(self):\n return self.udev.devices_exist", "def test_verify_state_of_a_device_when_disconnected_from_the_device():", "def verify_sas_interconnect_firmware_from_li(cls):\n #\n s2l = ui_lib.get_s2l()\n ic_firm = {}\n ic_installedversion = {}\n\n error_flag = 0\n\n logger.debug(\"Getting the list of interconnects and firmware versions\")\n CommonOperationLogicalInterconnect.click_logical_interconnect_firmware()\n\n length = len(s2l._element_find(GeneralLogicalInterconnectsElements.ID_INTERCONNECT_NATASHA_LIST, False, True))\n logger.debug(\"The number of ics is %s\" % length)\n for index in range(1, length + 1):\n installed_fw = ui_lib.get_text(\n UpdateLogicalInterconnectFirmwareElements.ID_SWITCH_FW_DETAILS % index + '/td[3]')\n baseline_fw = ui_lib.get_text(\n UpdateLogicalInterconnectFirmwareElements.ID_SWITCH_FW_DETAILS % index + '/td[4]')\n ic = ui_lib.get_text(UpdateLogicalInterconnectFirmwareElements.ID_SWITCH_FW_DETAILS % index + '/td[1]')\n ic_model = ui_lib.get_text(UpdateLogicalInterconnectFirmwareElements.ID_SWITCH_FW_DETAILS % index + '/td[2]')\n m = re.match(\".*/.*/\", installed_fw)\n if m:\n installed_fw = installed_fw.split()[0]\n if baseline_fw != installed_fw:\n logger.warn(\n \"Installed firmware '{0}' on IC '{1}', model: '{3}' is not same as baseline firmware '{2}'\".format(\n installed_fw, ic, baseline_fw, ic_model))\n error_flag = error_flag + 1\n ic_firm[ic] = baseline_fw\n ic_installedversion[ic] = installed_fw\n else:\n logger.debug(\n \"Installed firmware '{0}' on IC '{1}', model: '{3}' is same as baseline firmware '{2}'\".format(\n installed_fw, ic, baseline_fw, ic_model))\n ic_firm[ic] = baseline_fw\n ic_installedversion[ic] = installed_fw\n\n if error_flag != 0:\n logger.debug(\"Some mismatch in the versions comparision, please check\")\n return False, ic_firm, ic_installedversion\n else:\n logger.debug(\" Versions comparisions went well, Firmware activation is successful\")\n return True, ic_firm, ic_installedversion", "def hpb_supported(self):", "def eeprom_enabled():\n\n return (_read_device_state() & _get_addr_for_bit(_eeprom_bit)) != 0", "def no_afni():\n if Info.version() is None:\n return True\n return False", "def firmware_version(self):\n return self.data.get('fw_ver')", "def event_m20_11_x66(z54=211000002):\n \"\"\"State 0,2: Host?\"\"\"\n if IsGuest() != 0:\n pass\n else:\n \"\"\"State 1: Already destroyed?\"\"\"\n if GetEventFlag(z54) != 0:\n pass\n else:\n \"\"\"State 3: Not defeated\"\"\"\n return 0\n \"\"\"State 4: Defeated\"\"\"\n return 1", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def firmware(self) -> str:\n return self._device_info[\"Firmware\"]", "def has_been_paired():\n # This forces a load from the identity file in case the pairing state\n # has recently changed\n id = IdentityManager.load()\n return id.uuid is not None and id.uuid != \"\"", "def check(interface, mac):\n\tifconfig = sp.check_output(['sudo','ifconfig',interface]).decode()\n\tregexMax = re.compile(r'(\\w\\w:){5}\\w\\w')\n\tresult = regexMax.search(ifconfig)\n\tif not result == None and result.group() == mac:\n\t\tprint('Mac changed')\n\t\tprint('[+] '+interface+' --> '+mac)\n\telse:\n\t\tprint('[[[[!]]]] Faliour',result.group())", "def checkMac(self, mac):\n\t\tif mac in self.seenMacs:\n\t\t\treturn True\n\t\treturn False", "def test_has_mac_accelerate(self):\n\t\t\n\t\texpected_has_accelerate = False\n\t\t\n\t\timport platform\n\t\timport numpy.distutils.system_info as sysinfo\n\n\t\tmac_arch = platform.machine()\n\t\tif mac_arch == 'arm64':\n\t\t\tinfo = sysinfo.get_info('accelerate')\n\t\t\tif info is not None and len(info)>0:\n\t\t\t for x in info['extra_link_args']:\n\t\t\t if 'Accelerate' in x:\n\t\t\t expected_has_accelerate = True\n\t\t\t \n\t\tactual_has_accelerate = RMT_Util.has_mac_accelerate()\n\t\tself.assertEqual(expected_has_accelerate, actual_has_accelerate)\n\t\treturn", "def assumed_state(self):\n if self.tahoma_device.type.startswith(\"rts\"):\n return True\n\n return False", "def is_wasabi_running():\n wasabi_process_id = run('pidof wassabee')\n if wasabi_process_id:\n return True\n else:\n return False", "def fusion_api_get_appliance_firmware_upgrade_status(self, api=None, headers=None):\n param = '/notification'\n return self.appfirmware.get(api=api, headers=headers, param=param)", "def test_get_hyperflex_server_firmware_version_by_moid(self):\n pass", "def CheckBootFailure(self, serial_out, instance):\n pass", "def read_fw_version(self):\n\n # This function expects the firmware version to be in a line\n # prefixed with 'Product Extra'.\n # At the moment, it takes the form:\n # Product Extra : MCH FW V2.18.8 Final (r14042) (Mar 31 2017 - 11:29)\n # The following two parts will be extracted:\n # mch_fw_ver: V2.18.8 Final\n # mch_fw_date: Mar 31 2017 - 11:29\n # If NAT change the format, then this function will need to be updated\n\n pattern = \".*: MCH FW (.*) \\(.*\\) \\((.*)\\)\"\n\n for mch in range(1,3):\n try:\n result = self.mch_comms.call_ipmitool_command([\"fru\", \"print\", str(mch + MCH_FRU_ID_OFFSET)])\n\n for line in result.splitlines():\n if FW_TAG in line:\n match = re.match(pattern, line)\n if match:\n self.mch_fw_ver[mch] = match.group(1)\n self.mch_fw_date[mch] = match.group(2)\n else:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except CalledProcessError as e:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except TimeoutExpired as e:\n print(\"read_fw_version: caught TimeoutExpired exception: {}\".format(e))", "def _is_v0x04(self):\n return self.switch.is_connected() and \\\n self.switch.connection.protocol.version == 0x04", "def _mac_test(mac):\n\n\t\tif re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def check_ovmf(self):\n\n for index, ovmf in enumerate(self.ovmf_bios):\n if os.path.exists(ovmf):\n continue\n for suffix in ('qcow2', 'bin'):\n path = '%s/%s.%s' % (self.get('DEPLOY_DIR_IMAGE'), ovmf, suffix)\n if os.path.exists(path):\n self.ovmf_bios[index] = path\n break\n else:\n raise Exception(\"Can't find OVMF firmware: %s\" % ovmf)", "def test_delete_hyperflex_server_firmware_version(self):\n pass", "def detect_yubikey(self):\n try:\n self.yk = yubico.find_yubikey()\n self.version.set(\"Version:%s\" % self.yk.version())\n self.serial.set(\"Serial:%s\" % self.yk.serial())\n except yubico.yubikey.YubiKeyError:\n self.version.set(\"No YubiKey detected\")\n self.serial.set(\"\")\n self.yk = None\n except yubico.yubikey_usb_hid.usb.USBError as e:\n self.version.set(\"No YubiKey detected\")\n self.serial.set(\"\")\n self.user_message.set(\n \"A USB error occurred:%s - do you have permission to access USB devices?\",\n e.message\n )", "def check_maccs(maccs):\n\n\t# print number of features\n\tprint (\"Number of features =\", len(maccs[0]))\n\n\t# Check if size of all fingerprints is 167\n\tcount = 0\n\tfor fp in maccs:\n\t\tif len(fp) != 167:\n\t\t\tcount += 1\n\n\tif count == 0:\n\t\tprint (\"All instances have length 167.\")\n\telse:\n\t\tprint (\"Data not uniform. Check lengths for instances.\")\n\t\treturn False\n\n\treturn True", "def is_available():", "def check_restart(self, data):\n for entry in data:\n if entry.find('MODEM:STARTUP')!=-1: \n #print 'restart detected'\n return True\n if entry.find('+PBREADY')!=-1:\n #print 'ready'\n return True\n return False", "def isECGLeadV8(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == 'X110-7'", "def preCommitFixup(self):\n log_method_call(self, self.name)\n # UEFI firmware/bootloader cannot read 1.1 or 1.2 metadata arrays\n if getattr(self.format, \"mountpoint\", None) == \"/boot/efi\":\n self.metadataVersion = \"1.0\"", "def firmware_update_image(self) -> Optional[str]:\n return pulumi.get(self, \"firmware_update_image\")", "def test_boot_rom_version_value(self):\n \n boot_rom_version = get_boot_rom_version()\n \n # Check to make sure the boot_rom_version is 'IM142.0118.B12'\n self.assertEqual(boot_rom_version, 'IM142.0118.B12')", "def _detect(self):\n return True", "def probe(self):\n return False", "def test_get_bios_boot_mode_list(self):\n pass", "def test_scu_is_running_rw(self):\n output = run_ectool('version')\n self.assertIn('Firmware copy: RW', output)", "def pilotValidateBoot (self):\n return self.validateBoot()", "def test_update_pci_switch(self):\n pass", "def test_firmware_update_status_error(self):\n status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ERROR,\n FirmwareUpdateErrorType.INSTALLATION_FAILED,\n )\n expected_topic = (\n self.factory.common_topic + WAPMF.FIRMWARE_UPDATE_STATUS\n )\n\n expected_payload = json.dumps(\n {\"status\": status.status.value, \"error\": status.error.value}\n )\n expected_message = Message(expected_topic, expected_payload)\n serialized_message = self.factory.make_from_firmware_update_status(\n status\n )\n\n self.assertEqual(expected_message, serialized_message)", "def test_update_bios_unit(self):\n pass", "def _verify_device_state(expected_state):\n\n current_state = _read_device_state()\n\n if expected_state == current_state:\n return True\n\n else:\n print(\"Error: Device write verification failed. Expected: \" + _get_bit_string(expected_state) + \" Received: \" + _get_bit_string(current_state))\n return False", "def test_get_hyperflex_server_firmware_version_list(self):\n pass", "def get_firmware_version(self):\n fw_version = {\n \"BIOS\": self._api_helper.read_txt_file(BIOS_VER_PATH),\n \"BMC\": self.__get_bmc_ver(),\n \"SWITCH_CPLD1\": self.__get_cpld_ver(SW_CPLD1_VER_PATH),\n \"SWITCH_CPLD2\": self.__get_cpld_ver(SW_CPLD2_VER_PATH),\n }.get(self.name, \"Unknown\")\n\n return fw_version", "def test_handle_abort_when_not_idle_and_version_file(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n firmware_update.current_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.INSTALLING\n )\n file_handle = open(\"last_firmware_version.txt\", \"w\")\n file_handle.close()\n firmware_update.handle_abort()\n\n self.assertFalse(os.path.exists(\"last_firmware_version.txt\"))", "def test_list_drives_drive_firmware_update(self):\n pass" ]
[ "0.66273475", "0.64601934", "0.63994926", "0.63584757", "0.6354337", "0.6339835", "0.62948906", "0.62154347", "0.62096334", "0.61546713", "0.6140502", "0.61369526", "0.61060196", "0.60729986", "0.6002015", "0.59328735", "0.59070283", "0.58983636", "0.5769682", "0.5757605", "0.56484723", "0.5638263", "0.5630572", "0.56252694", "0.5618777", "0.5612192", "0.559085", "0.55813015", "0.55660933", "0.55520844", "0.55399144", "0.55067796", "0.5493657", "0.5490059", "0.54534674", "0.54334676", "0.5417033", "0.5411239", "0.54108685", "0.5382416", "0.5369453", "0.53491807", "0.53466815", "0.5340193", "0.5336762", "0.5323062", "0.5291334", "0.5288114", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.52810067", "0.5259539", "0.5253383", "0.52408135", "0.5238316", "0.52295357", "0.5224486", "0.52147335", "0.5206957", "0.5203767", "0.5203567", "0.5200951", "0.51927733", "0.5190061", "0.51886463", "0.5179078", "0.5177432", "0.5165194", "0.5161097", "0.5154501", "0.5153819", "0.51536316", "0.51468706", "0.5142719", "0.51393104", "0.5136683", "0.51201373", "0.51136696", "0.511146", "0.51050043", "0.5105002", "0.5103231", "0.510314", "0.5100283", "0.50995034", "0.5096677", "0.5090605" ]
0.64189225
2
Compare this systems versions to the firmware table to see if FW is at latest versions
def check_fw_versions(self, sys_info, api_results): if not api_results.get("latest_efi_version"): # Call the API to see what the latest version of EFI you are # expected to be running given OS ver and mac model api_results[ self.current_endpoint]["latest_efi_version"] = self.__make_api_get( '/apple/latest_efi_firmware/%s/%s' % (sys_info.get("hw_ver"), sys_info.get("build_num"))) self.message("\n\tEFI firmware version check:") # Validate response from API if self._validate_response(api_results["latest_efi_version"]): # Valid response from API - now interpret it # This is kind messy but it's so as we can detect newer and older firmware and message accordingly rather than just looking for 'different' versions # the way that EFI versions are denoted by Apple makes this more of # a pain thatit really needs to be quite honestly api_efi_str = api_results["latest_efi_version"]["msg"].split(".") my_efi_str = sys_info.get("rom_ver").split(".") api_efi_ver = int(api_efi_str[1], 16) api_efi_build = int(api_efi_str[2].replace("B", ""), 16) if all([x.isdigit() for x in my_efi_str]): # Newer EFI versions do not include a build number # or the Mac model code. The output will be something # like 256.0.0, whereas with the old format it would # be MBP133.0256.B00. my_efi_ver = int(my_efi_str[0], 16) my_efi_build = 0 else: my_efi_ver = int(my_efi_str[1], 16) my_efi_build = int(my_efi_str[2].replace("B", ""), 16) if api_efi_str == my_efi_str: self.message( "\t\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s" % (sys_info.get("rom_ver"), sys_info.get("build_num"), sys_info.get("hw_ver"))) elif my_efi_ver == api_efi_ver and my_efi_build == api_efi_build: self.message( "\t\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s" % (sys_info.get("rom_ver"), sys_info.get("build_num"), sys_info.get("hw_ver"))) elif (my_efi_ver > api_efi_ver) or (my_efi_ver > api_efi_ver and my_efi_build > api_efi_build) or (my_efi_ver == api_efi_ver and my_efi_build > api_efi_build): # Looks like you're running a beta or a dev build - pretty much # all bets are off here as the dataset doens't cover dev builds # but a nicer message makes sense self.message( "\t\t[!] ATTENTION - It looks like your EFI version (%s) is NEWER than the latest production release that is in the dataset (%s). This is most likely because you are now, or have in the past, installed a developer preview OS and as part of that you also had newer EFI firmware installed. The EFIgy API currently only has reliable data for production OS releases." % (sys_info.get("rom_ver"), api_results["latest_efi_version"]["msg"])) else: self.message( "\t\t[-] ATTENTION - You are running an unexpected firmware version given the model of your system (%s) and OS build you have installed (%s). Your firmware is %s, the firmware we expected to see is %s.\n" % (sys_info.get("hw_ver"), sys_info.get("build_num"), sys_info.get("rom_ver"), api_results["latest_efi_version"]["msg"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def firmware_newer(self):\n if self.firmware_outdated():\n return False\n return self.firmware_version != self.compatible_firmware_version", "def firmware_outdated(self):\n datefmt = ' %b %d %Y %H:%M:%S'\n\n compat_date = self.compatible_firmware_version.split('compiled')[1]\n compat_date = datetime.datetime.strptime(compat_date, datefmt)\n\n fw_date = self.firmware_version.split('compiled')[1]\n fw_date = datetime.datetime.strptime(fw_date, datefmt)\n return (compat_date > fw_date)", "def check_fw_being_updated(self, sys_info, api_results):\n if not api_results.get(\"efi_updates_released\"):\n # Call the API to see what the latest version of EFI you are\n # expected to be running given OS ver and mac model\n self.results[\n self.current_endpoint][\"efi_updates_released\"] = self.__make_api_get(\n '/apple/no_firmware_updates_released/%s' %\n (sys_info.get(\"hw_ver\")))\n\n # Validate response from API\n if self._validate_response(api_results[\"efi_updates_released\"]):\n\n # Check to see if this is a model that has seen any EFI firmware\n # updates\n if api_results[\"efi_updates_released\"][\"msg\"] is False:\n self.message(\"\\n\\tEFI firmware version check:\")\n self.message(\n \"\\t\\t[-]ATTENTION - Your Mac model (%s) is older than the models Apple currently provides updates for, EFIgy has no data for it.\" %\n (sys_info.get(\"hw_ver\")))\n return False\n else:\n return True", "def update_system_versions(self):\n #system_versions = [SystemVersion(id=-1 ,type=u'QX100',desc=u'Unknown Hardware version'),\n # SystemVersion(id=0 ,type=u'QX100',desc=u'QX100 - HW Rev A/B'),\n system_versions = [SystemVersion(id=1 ,type=u'QX100', desc=u'QX100 - HW Rev A/B bigger detector cap differences'),\n SystemVersion(id=2 ,type=u'QX100', desc=u'QX100 - HW Rev C'),\n SystemVersion(id=3 ,type=u'QX150', desc=u'QX150 - HW Rev Z Upgrade'),\n SystemVersion(id=4 ,type=u'QX200', desc=u'QX200 - HW Rev Z'),\n SystemVersion(id=5 ,type=u'QX201', desc=u'QX200 - HW with BR built Detector'),\n\t\t\t SystemVersion(id=6 ,type=u'QX150L', desc=u'QX150 - HW Rev Z Upgrade with LED'),\n SystemVersion(id=7 ,type=u'QX201L', desc=u'QX201 - HW with BR built LED Detector'),\n SystemVersion(id=200,type=u'QX200', desc=u'QX200 - Pre-Beta HW')]\n for sv in system_versions:\n dbsv = Session.query(SystemVersion).filter_by(id=sv.id).first()\n if not dbsv:\n Session.add(sv)\n else:\n if (dbsv.type != sv.type):\n dbsv.type = sv.type\n if( dbsv.desc != sv.desc):\n dbsv.desc = sv.desc\n\n Session.commit()", "def gather_system_versions(self):\n # Get Mac model ID\n self.hw_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"model\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n if \"imacpro\" in self.hw_version.lower():\n # iMac Pro stores it's EFI data different due it's new architecture\n # so grab the EFI & SMC ROM versions appropriately\n raw_efi_list = []\n raw_rom_info = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"apple-rom-info\",\n None,\n 0))\n for data in raw_rom_info.split(\"\\n\"):\n if data.strip().startswith(\"BIOS ID\"):\n raw_efi_list = data.split(\":\")[1].strip().split(\".\")\n break\n else:\n self.message(\n \"[-] Could not find raw EFI data to determine EFI versions. Exiting....\")\n return False\n\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi_list[0], raw_efi_list[2], raw_efi_list[3])\n # Can't currently find the SMC version like this on imac pros ....\n # self.smc_version = str(IORegistryEntryCreateCFProperty(IOServiceGetMatchingService(0, IOServiceMatching(\"AppleSMC\")), \"smc-version\", None, 0))\n self.smc_version = \"\"\n else:\n # EFI & SMC ROM versions\n self.smc_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"AppleSMC\")),\n \"smc-version\",\n None,\n 0))\n raw_efi = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"version\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\").split(\".\")\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi[0], raw_efi[2], raw_efi[3])\n\n # Set the salt to be the MAC address of the system, using the MAC as a salt in this manner\n # helps ensure that the hashed sysuuid is pseudonymous. We don't want to know the sysuuid's\n # value, but we do want it to be unique however. The Salt value is\n # never submitted to the API\n salt = hex(getnode())\n sys_uuid = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"IOPlatformUUID\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n self.h_sys_uuid = hashlib.sha256(salt + sys_uuid).hexdigest()\n\n # Get the Board-ID, this is how EFI files are matched to running\n # hardware - Nastee\n self.board_id = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"board-id\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n # Get OS version\n self.os_version = commands.getoutput(\"sw_vers -productVersion\")\n\n # Get build number\n self.build_num = commands.getoutput(\"sw_vers -buildVersion\")\n\n # Carve out the major version as we use this a bunch\n # self.os_maj_ver = \".\".join(self.os_version.split(\".\")[:2])\n\n # Add gathered info to the dictionary to query the API with\n self.endpoints_to_check[\"127.0.0.1\"] = {\n \"hashed_uuid\": self.h_sys_uuid,\n \"hw_ver\": self.hw_version,\n \"rom_ver\": self.efi_version,\n \"smc_ver\": self.smc_version,\n \"board_id\": self.board_id,\n \"os_ver\": self.os_version,\n \"build_num\": self.build_num}\n\n return True", "def is_old_firmware():\n # Read firmware version from runt.\n fw_version = get_runt(PROP_FW_VERSION)\n\n # Compare firmware year and month with old versions.\n year = int(fw_version.split(\".\")[0])\n month = int(fw_version.split(\".\")[1])\n if year < OLD_FW_YEAR:\n return True\n if year == OLD_FW_YEAR:\n if month < OLD_FW_MONTH:\n return True\n\n return False", "def compatible_version(self):\n\n cursor = self.disk_connection.cursor()\n try:\n row = cursor.execute(\"\"\"\n SELECT COUNT(schema_version_hash) FROM version WHERE schema_version_hash=(?);\n \"\"\", (self.schema_version_hash,)).fetchone()\n return row[0] > 0\n except sqlite3.Error: # pylint: disable=broad-except\n return False", "def check_os_up_to_date(self, sys_info, api_results):\n if not api_results.get(\"latest_os_version\"):\n self.results[self.current_endpoint][\"latest_os_version\"] = self.__make_api_get(\n '/apple/latest_os_version/%s' % (\".\".join(sys_info.get(\"os_ver\").split(\".\")[:2])))\n\n self.message(\"\\n\\tUp-to-date OS check:\")\n\n # Validate response from API\n if self._validate_response(api_results[\"latest_os_version\"]):\n\n # Valid response from API - now interpret it\n my_os_ver_str = sys_info.get(\"os_ver\").split(\".\")\n my_os_ver_num = int(\n \"%s%s%s\" %\n (my_os_ver_str[0],\n my_os_ver_str[1],\n my_os_ver_str[2]))\n\n api_os_ver_str = api_results[\"latest_os_version\"][\"msg\"].split(\".\")\n api_os_ver_num = int(\n \"%s%s%s\" %\n (api_os_ver_str[0],\n api_os_ver_str[1],\n api_os_ver_str[2]))\n\n # if sys_info.get(\"os_ver\") !=\n # api_results[\"latest_os_version\"][\"msg\"]:\n if my_os_ver_num < api_os_ver_num:\n self.message(\n \"\\t\\t[-] ATTENTION - You are NOT running the most up to date version of the OS. Your OS version is %s, the latest versions is %s\" %\n (sys_info.get(\"os_ver\"), api_results[\"latest_os_version\"][\"msg\"]))\n\n elif my_os_ver_num > api_os_ver_num:\n self.message(\n \"\\t\\t[!] ATTENTION - It looks like you might be running a development OS build %s. The EFIgy API currently only has reliable data for production OS releases.\" %\n (sys_info.get(\"os_ver\")))\n\n else:\n self.message(\n \"\\t\\t[+] SUCCESS - You are running the latest major/minor/micro version of the OS you have installed (%s)\" %\n (sys_info.get(\"os_ver\")))", "def _is_version_uptodate(self):\n logging.info(\"Checking tesseract version\")\n cmd = '%s -v' % (self.binary)\n logging.info(cmd) \n try:\n ret_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except CalledProcessError:\n # Could not run tesseract\n error(self.msgs['TS_MISSING'])\n\n ver_str = '0.0.0'\n for line in ret_output.splitlines():\n if 'tesseract' in line:\n ver_str = line.split(' ')[1]\n if ver_str.endswith('dev'): # Fix for version strings that end in 'dev'\n ver_str = ver_str[:-3]\n\n # Iterate through the version dots\n ver = [int(x) for x in ver_str.split('.')]\n req = [int(x) for x in self.required.split('.')]\n\n # Aargh, in windows 3.02.02 is reported as version 3.02 \n # SFKM\n if str(os.name) == 'nt':\n req = req[:2]\n\n version_good = False\n for i,num in enumerate(req):\n if len(ver) < i+1:\n # This minor version number is not present in tesseract, so it must be\n # lower than required. (3.02 < 3.02.01)\n break\n if ver[i]==num and len(ver) == i+1 and len(ver)==len(req):\n # 3.02.02 == 3.02.02\n version_good = True\n continue\n if ver[i]>num:\n # 4.0 > 3.02.02\n # 3.03.02 > 3.02.02\n version_good = True\n break\n if ver[i]<num:\n # 3.01.02 < 3.02.02\n break\n \n return version_good, ver_str", "def check(self):\n current = self._get_current()\n # There is no version, so don't attempt to upgrade\n if current[-1]:\n return False\n\n highest = self._get_highest_version()\n return highest > current", "def _version_support_check(self, v_maps, **kwargs):\n if self.session._invalid_server_version():\n # server version is not valid, force a refresh right now\n self.session.get_server_version(**kwargs)\n\n if self.session._invalid_server_version():\n # server version is STILL invalid, return False\n return False\n\n for v_map in v_maps:\n if not self.session.server_version >= v_map:\n return False\n return True", "def test_above_24_latest_version(self):\n self.data['version'] = ''\n self.data['appVersion'] = '28.0'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20202020.01') > -1", "async def check_new_version(now):\n result = await get_newest_version(hass, huuid, include_components)\n\n if result is None:\n return\n\n newest, releasenotes, android, apt = result\n\n # Load data from supervisor on hass.io\n if hass.components.hassio.is_hassio():\n newest = hass.components.hassio.get_homeassistant_version()\n\n # Validate version\n if StrictVersion(newest) > StrictVersion(current_version):\n _LOGGER.info(\"The latest available version is %s\", newest)\n info = 'Dostępna jest nowa wersja ' + newest + '. ' + releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Aktualizacja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": True,\n \"reinstall_android_app\": android,\n \"apt\": apt\n }\n )\n # add all entities to keep the order\n # hass.async_add_job(\n # hass.services.async_call(\n # 'group',\n # 'set', {\n # \"object_id\": \"dom_system_version\",\n # \"entities\": [\n # \"sensor.version_info\",\n # \"script.ais_update_system\",\n # \"camera.remote_access\",\n # \"input_boolean.ais_remote_access\",\n # \"sensor.ais_secure_android_id_dom\",\n # \"script.ais_scan_network_devices\",\n # \"script.ais_restart_system\",\n # \"script.ais_stop_system\"]}))\n\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Zainstaluj aktualizację',\n \"icon\": \"mdi:download\"\n }\n )\n\n else:\n info = 'Twój system jest aktualny, wersja ' + newest + '. '\n info += releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Wersja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": False,\n \"reinstall_android_app\": False,\n \"apt\": apt\n }\n )\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Sprawdź dostępność aktualizacji',\n \"icon\": \"mdi:refresh\"\n }\n )\n _LOGGER.info(\n \"You are on the latest version (%s) of Assystent domowy\", newest)", "def _PerformCompare(self, component):\n\n updater_commands = ['/usr/sbin/chromeos-firmwareupdate', '-V']\n content = subprocess.Popen(updater_commands,\n stdout=subprocess.PIPE).stdout.read()\n system_version = self._GetSystemVersion(component, content)\n whitelist_version = self._GetWhitelistVersion(component)\n self.assertEqual(system_version, whitelist_version, msg='%s does not match'\n ' what is in the whitelist.\\n\\tSystem: %s\\n\\tWhitelist: '\n '%s' % (component, system_version, whitelist_version))", "def oskernel_isgreater(self, version):\n match = re.search(r\"([0-9.]+)\", self.oskernel())\n if match:\n os_release = match.group(1)\n else:\n return True\n\n for (idx, os_version) in enumerate(os_release.split('.')):\n if idx >= len(version):\n break\n if int(os_version) > int(version[idx]):\n return True\n if int(os_version) < int(version[idx]):\n return False\n\n return True", "def is_up_to_date(self, server_version):\r\n client_split = self.__version__.split('.')\r\n client_len = len(client_split)\r\n server_split = server_version.split('.')\r\n server_len = len(server_split)\r\n\r\n # Make both lists the same length\r\n for i in range(client_len, server_len):\r\n client_split.append('0')\r\n for i in range(server_len, client_len):\r\n server_split.append('0')\r\n\r\n for i in range(0, client_len):\r\n if 'b' in client_split[i]:\r\n # Using a beta version, don't check\r\n return True\r\n client = int(client_split[i])\r\n server = int(server_split[i])\r\n if client < server:\r\n return False\r\n elif server < client:\r\n return True\r\n\r\n return True", "def compare_versions(fixed_version, target_version):\n for i, j in zip(map(int, fixed_version.split(\".\")), map(int, target_version.split(\".\"))):\n if i == j:\n continue\n return i > j\n return len(fixed_version.split(\".\")) > len(target_version.split(\".\"))", "def compare_versions(deployed_version, current_version):\n assert isinstance(deployed_version, str)\n assert isinstance(current_version, str)\n\n deployed_version = deployed_version.replace('_', '0')\n current_version = current_version.replace('_', '0')\n deployed = [int(x) for x in deployed_version.split('.')]\n current = [int(x) for x in current_version.split('.')]\n\n if deployed[0] != current[0]:\n return False\n if deployed[1] != current[1]:\n return False\n return True", "def test_get_hyperflex_server_firmware_version_list(self):\n pass", "def compareVersions(self):\n logger.debug(\"Func: compareVersions\")\n\n cMajorV = nuke.NUKE_VERSION_MAJOR\n cMinorV = nuke.NUKE_VERSION_MINOR\n currentVersion = float(\"{0}.{1}\".format(cMajorV, cMinorV))\n\n dbMajorV = self._currentSceneInfo[\"NukeVersion\"][0]\n dbMinorV = self._currentSceneInfo[\"NukeVersion\"][1]\n databaseVersion = float(\"{0}.{1}\".format(dbMajorV, dbMinorV))\n\n messageList = []\n\n\n if currentVersion == databaseVersion:\n pass\n\n if currentVersion < databaseVersion: # version compare\n message = \"Base Scene is created with a HIGHER Nuke version ({0}). Are you sure you want to continue?\".format(databaseVersion)\n messageList.append(message)\n\n if currentVersion > databaseVersion:\n message = \"Base Scene is created with a LOWER Nuke version ({0}). Are you sure you want to continue?\".format(databaseVersion)\n messageList.append(message)\n\n message=\"\"\n for x in messageList:\n message = message + \"\\n\" + str(x)\n\n if messageList == []:\n return 0, message\n else:\n return -1, message", "def check_for_updates():\n last_version = str(request.urlopen(__source__).read().decode(\"utf8\"))\n if str(open(__file__).read()) != last_version:\n log.warning(\"Theres new Version available!, Update from \" + __source__)\n else:\n log.info(\"No new updates!,You have the lastest version of this app.\")", "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def get_firmware_version(self):\n cmd = protocol.GET_FIRMWARE_VERSION\n response = self.__send_and_receive(cmd)\n\n value = self.__gen_response_value(response)\n if value:\n self.firmware_version = value[0][1:]\n else:\n return False", "def _does_require_force_update(self):\n\n if self.current_version[0][0] > self.version_yaml[0]:\n # The current version first index is greater than the one we have in the\n # current version.yaml.\n\n # We return True.\n return True\n\n # We return False, we do not need to force the update for\n # the current version number.\n return False", "def checkVersion(self):\n try:\n respInfo = self._reqSession.get(self._host + \"/static/pythonSDKVersion.txt\")\n if respInfo.status_code != 200 or len(respInfo.text) > 20:\n return\n latestVersion = respInfo.text.strip()\n import eventregistry._version as _version\n currentVersion = _version.__version__\n for (latest, current) in zip(latestVersion.split(\".\"), currentVersion.split(\".\")):\n if int(latest) > int(current):\n logger.info(\"==============\\nYour version of the module is outdated, please update to the latest version\")\n logger.info(\"Your version is %s while the latest is %s\", currentVersion, latestVersion)\n logger.info(\"Update by calling: pip install --upgrade eventregistry\\n==============\")\n return\n # in case the server mistakenly has a lower version that the user has, don't report an error\n elif int(latest) < int(current):\n return\n except:\n pass", "def _checkUpdateNeeded(self):\n try:\n currentVersionLine = str(subprocess.run(['pacman', '-Q', '-i', self._name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True).stdout)\n currentVersion = re.sub(r'.*Version\\s*: ([\\d|\\.]*)-.*', r'\\1', currentVersionLine).split('.')\n newVersion = self._version.split('.')\n for i in range(0, min(len(currentVersion), len(newVersion))):\n if currentVersion[i].isdigit():\n # TODO: test if new version is only digits too, two of them should be the same anyway\n if int(newVersion[i]) > int(currentVersion[i]):\n return True\n if int(newVersion[i]) < int(currentVersion[i]):\n return False\n return len(newVersion) > len(currentVersion)\n except subprocess.CalledProcessError:\n # Package not found: to be installed then\n return True", "def test_16_24_newest_hotfix(self):\n self.data['version'] = '20130826.01'\n self.data['appVersion'] = '16.0.2'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20202020.01') > -1", "def check_updates(self):\n try:\n if not common.latest_version(version):\n self.update_notify()\n except:\n self.neterror()", "def compareVersions(self, v1, v2, savedBy1, savedBy2):\n if savedBy1 == savedBy2:\n return 3\n\n v2GreaterThanv1 = savedBy1[0] in v2 and v2[savedBy1[0]] >= savedBy1[1]\n v1GreaterThanv2 = savedBy2[0] in v1 and v1[savedBy2[0]] >= savedBy2[1]\n\n if v2GreaterThanv1 and not (v1GreaterThanv2):\n return 0\n elif not (v2GreaterThanv1) and v1GreaterThanv2:\n return 1\n else:\n return 2", "def read_fw_version(self):\n\n # This function expects the firmware version to be in a line\n # prefixed with 'Product Extra'.\n # At the moment, it takes the form:\n # Product Extra : MCH FW V2.18.8 Final (r14042) (Mar 31 2017 - 11:29)\n # The following two parts will be extracted:\n # mch_fw_ver: V2.18.8 Final\n # mch_fw_date: Mar 31 2017 - 11:29\n # If NAT change the format, then this function will need to be updated\n\n pattern = \".*: MCH FW (.*) \\(.*\\) \\((.*)\\)\"\n\n for mch in range(1,3):\n try:\n result = self.mch_comms.call_ipmitool_command([\"fru\", \"print\", str(mch + MCH_FRU_ID_OFFSET)])\n\n for line in result.splitlines():\n if FW_TAG in line:\n match = re.match(pattern, line)\n if match:\n self.mch_fw_ver[mch] = match.group(1)\n self.mch_fw_date[mch] = match.group(2)\n else:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except CalledProcessError as e:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except TimeoutExpired as e:\n print(\"read_fw_version: caught TimeoutExpired exception: {}\".format(e))", "def check_for_updates(package_name, latest_version_str, our_version_str=VERSION):\n our = dict()\n latest = dict()\n for version, suffix in ((our, our_version_str), (latest, latest_version_str)):\n for part in ['major', 'minor', 'patch']:\n version[part], _, suffix = suffix.partition('.')\n version[part] = int(version[part])\n version['suffix'] = suffix\n\n for part in ['major', 'minor', 'patch', 'suffix']:\n if latest[part] > our[part]:\n if part == 'major':\n sys.exit(messages['UpdateRequired'].format(package_name))\n else:\n print >> sys.stderr, messages['UpdateAvailable'].format(package_name)\n return", "def find_updates(self, versions, last_versions):\n updates = []\n\n for package, current_version in versions.items():\n last_version = last_versions[package]\n if last_version != current_version:\n logger.debug(\n '=> %s current version (%s) and last '\n 'version (%s) are different.',\n package, current_version, last_version\n )\n updates.append(\n (package, last_version)\n )\n\n logger.info('- %d package updates found.', len(updates))\n\n return updates", "def test_10_16_newest_hotfix(self):\n self.data['version'] = '20130826.01'\n self.data['appVersion'] = '16.0.1'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20202020.01') > -1", "def get_next_best_versions(self, versions):\n fake_matches = set()\n\n if not self.major.is_yes:\n major_versions = [self.major.val()]\n else:\n major_versions = sorted(set([v.major for v in versions]))\n\n for major in range(min(major_versions), max(major_versions) + 1):\n if not self.minor.is_yes:\n minor_versions = [self.minor.val()]\n else:\n minor_versions = sorted(set([v.minor for v in versions if v.major == major]))\n\n for minor in range(min(minor_versions), max(minor_versions) + 1):\n if not self.patch.is_yes:\n patch_versions = [self.patch.val()]\n else:\n patch_versions = sorted(set([v.patch for v in versions if v.major == major and v.minor == minor]))\n\n for patch in range(min(patch_versions), max(patch_versions) + 1):\n fake = _parse_semver(\"{}.{}.{}\".format(major, minor, patch), makefake=True)\n if fake not in versions:\n fake_matches.add(fake)\n\n return fake_matches", "def get_hardware_version(self):\n cmd = protocol.GET_HARDWARE_VERSION\n response = self.__send_and_receive(cmd)\n\n value = self.__gen_response_value(response)\n if value:\n self.hardware_version = value[0][1:]\n else:\n return False", "def software_versions():\n\n quiet = 1\n versions = collections.OrderedDict()\n for package in ['python', 'python3', 'robot', 'firefox', 'google-chrome']:\n # Note: \"robot --version\" returns 0x00000000000000fb.\n # Note: If package does not exist, 0x7f is returned.\n rc, version = gc.shell_cmd(package + \" --version\",\n valid_rcs=[0, 0x7f, 0xfb])\n versions[package] = \"Not installed\" if rc == 0x7f else version.rstrip('\\n')\n\n versions.update(import_versions)\n\n for package in ['robotframework-angularjs', 'robotframework-scplibrary',\n 'robotframework-extendedselenium2library']:\n rc, version = gc.shell_cmd(\"pip3 show \" + package\n + \" | grep Version | sed -re 's/.*: //g'\")\n versions[package] = \"Not installed\" if not version else version.rstrip('\\n')\n\n rc, version = gc.shell_cmd(\"lsb_release -d -s\")\n versions[\"host OS\"] = \"Failed\" if not version else version.rstrip('\\n')\n return versions", "def update_firmware(self):\n return self._dll.JLINKARM_UpdateFirmwareIfNewer()", "def os_is_compatible(required_os_version: str) -> bool:\n\tcurrent_version = [int(c) for c in os_release().split('.')]\n\trequired_version = [int(c) for c in required_os_version.split('.')]\n\n\t# 10.13.6.2 is not (necessarily) compatible with 10.13.6\n\tif len(required_version) > len(current_version) and\\\n\t required_version[0:len(current_version)] == current_version:\n\t return False\n\n\t# Compare versions component-wise\n\tfor (c, r) in zip(current_version, required_version):\n\t\tif c < r:\n\t\t\treturn False\n\n\treturn True", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def test_installed_beta_no_newer_stable(self):\n self.change_version(self.version_1_2_2, '1.2beta')\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def firmware_version(self):\n return self.data.get('fw_ver')", "def compare_versions(a, b):\n if a:\n a = distutils.version.LooseVersion(a)\n b = distutils.version.LooseVersion(b)\n return a >= b\n else:\n return False", "def compare_versions(current_version, supported_version):\n try:\n current = current_version.split(\".\")\n supported = supported_version.split(\".\")\n\n if int(current[0]) < int(supported[0]):\n return False\n if int(current[0]) > int(supported[0]):\n return True\n return int(current[1]) >= int(supported[1])\n # pylint: disable=W0703\n except Exception:\n logger.info(\"issues parsing version\")\n return False", "def check_versions(context, num=0, versions='', ecosystem='', package=''):\n versions = split_comma_separated_list(versions)\n vrsns = context.response.json()['items']\n assert len(vrsns) == num\n for v in vrsns:\n assert v['ecosystem'] == ecosystem\n assert v['package'] == package\n assert v['version'] in versions", "def _is_version_greater(self):\n\n # we compare the 2 versions.\n checked = PyFunceble.abstracts.Version.compare(\n self.data_version_yaml[\"current_version\"]\n )\n\n if checked is not None and not checked:\n # The current version is greater as the older one.\n\n # We return True.\n return True\n\n # We return False\n return False", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def version_check(self):\n param_name = \"rethink/software_version\"\n sdk_version = settings.SDK_VERSION\n\n # get local lock for rosparam threading bug\n with self.__class__.param_lock:\n robot_version = rospy.get_param(param_name, None)\n if not robot_version:\n rospy.logwarn(\"RobotEnable: Failed to retrieve robot version \"\n \"from rosparam: %s\\n\"\n \"Verify robot state and connectivity \"\n \"(i.e. ROS_MASTER_URI)\", param_name)\n return False\n else:\n # parse out first 3 digits of robot version tag\n pattern = (\"^([0-9]+)\\.([0-9]+)\\.([0-9]+)\")\n match = re.search(pattern, robot_version)\n if not match:\n rospy.logwarn(\"RobotEnable: Invalid robot version: %s\",\n robot_version)\n return False\n robot_version = match.string[match.start(1):match.end(3)]\n if robot_version not in settings.VERSIONS_SDK2ROBOT[sdk_version]:\n errstr_version = \"\"\"RobotEnable: Software Version Mismatch.\nRobot Software version (%s) does not match local SDK version (%s). Please\nUpdate your Robot Software. \\\nSee: http://sdk.rethinkrobotics.com/wiki/Software_Update\"\"\"\n rospy.logerr(errstr_version, robot_version, sdk_version)\n return False\n return True", "def checkVersions():\n item = Item(fromScene=True)\n\n for ns, componentMData in item.components.iteritems():\n if ns == 'cam':\n # todo tratar versoes da camera\n continue\n\n if componentMData['assembleMode'] == 'reference':\n refComponent = ReferenceComponent(ns, componentMData, parent=item)\n refComponent.checkDBForNewVersion()\n\n elif componentMData['assembleMode'] == 'xlo':\n xloComponent = XloComponent(ns, componentMData, parent=item)\n xloComponent.checkDBForNewVersion()\n xloComponent.checkDBForNewCacheVersion()\n\n elif componentMData['assembleMode'] == 'cache':\n cacheComponent = CacheComponent(ns, componentMData, parent=item)\n cacheComponent.checkDBForNewVersion()\n\n item.putDataToDB()", "def get_version_for(self,platform,version):\n def supports_platform(test_platforms):\n if test_platforms.upper() in ['ALL','ANY']:\n platforms = PLATFORMS\n else:\n platforms = test_platforms.split(':')\n return platform in platforms\n\n # Minimal required version check (for mainline releases)\n if self.min_versions:\n base_version = '.'.join(version.split('.')[:2])\n for base_min_version, min_version in (('.'.join(x.split('.')[:2]),x)\n for x in self.min_versions.split(';')):\n if compare_versions(base_version,base_min_version) == 0:\n if compare_versions(version,min_version) < 0:\n return None\n # Find the suitable test version\n candidate = '0'\n test = None\n for t in (t for t in self.versions if supports_platform(t.platform)):\n if compare_versions(version,t.firebird_version) >= 0:\n if compare_versions(candidate,t.firebird_version) < 0:\n candidate = t.firebird_version\n test = t\n return test", "def is_latest_os_image_version(self) -> bool:\n return self._is_latest_os_image_version", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def get_latest_firmware_version(self, appid, slot):\n all_firmwares = os.listdir(self.fw_path)\n if all_firmwares == []:\n logger.warning('No firmware found')\n return ''\n\n all_versions = [int(get_info_from_filename(fw)[2], 16)\n for fw in all_firmwares\n if (get_info_from_filename(fw)[1] == appid and\n get_info_from_filename(fw)[0] == slot)]\n\n if all_versions == []:\n logger.warning('No latest version found')\n return ''\n\n return str(hex(max(all_versions)))", "def query_thinupdate():\n flag = [False, '']\n wmi = GetObject(r'winmgmts:\\\\.\\root\\cimv2')\n wql = \"Select * from Win32_Product\"\n rs = wmi.ExecQuery(wql)\n for r in rs:\n if r.name:\n if r.name.upper() == 'HP THINUPDATE':\n flag[0] = True\n flag[1] = r.version\n break\n else:\n continue\n return flag", "def get_firmware_version(self):\n fw_version = {\n \"BIOS\": self._api_helper.read_txt_file(BIOS_VER_PATH),\n \"BMC\": self.__get_bmc_ver(),\n \"SWITCH_CPLD1\": self.__get_cpld_ver(SW_CPLD1_VER_PATH),\n \"SWITCH_CPLD2\": self.__get_cpld_ver(SW_CPLD2_VER_PATH),\n }.get(self.name, \"Unknown\")\n\n return fw_version", "def test_compare_schemas_major(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.major_removed_value\n )\n\n assert status == schema_utils.Update.major", "def verify_sas_interconnect_firmware_from_li(cls):\n #\n s2l = ui_lib.get_s2l()\n ic_firm = {}\n ic_installedversion = {}\n\n error_flag = 0\n\n logger.debug(\"Getting the list of interconnects and firmware versions\")\n CommonOperationLogicalInterconnect.click_logical_interconnect_firmware()\n\n length = len(s2l._element_find(GeneralLogicalInterconnectsElements.ID_INTERCONNECT_NATASHA_LIST, False, True))\n logger.debug(\"The number of ics is %s\" % length)\n for index in range(1, length + 1):\n installed_fw = ui_lib.get_text(\n UpdateLogicalInterconnectFirmwareElements.ID_SWITCH_FW_DETAILS % index + '/td[3]')\n baseline_fw = ui_lib.get_text(\n UpdateLogicalInterconnectFirmwareElements.ID_SWITCH_FW_DETAILS % index + '/td[4]')\n ic = ui_lib.get_text(UpdateLogicalInterconnectFirmwareElements.ID_SWITCH_FW_DETAILS % index + '/td[1]')\n ic_model = ui_lib.get_text(UpdateLogicalInterconnectFirmwareElements.ID_SWITCH_FW_DETAILS % index + '/td[2]')\n m = re.match(\".*/.*/\", installed_fw)\n if m:\n installed_fw = installed_fw.split()[0]\n if baseline_fw != installed_fw:\n logger.warn(\n \"Installed firmware '{0}' on IC '{1}', model: '{3}' is not same as baseline firmware '{2}'\".format(\n installed_fw, ic, baseline_fw, ic_model))\n error_flag = error_flag + 1\n ic_firm[ic] = baseline_fw\n ic_installedversion[ic] = installed_fw\n else:\n logger.debug(\n \"Installed firmware '{0}' on IC '{1}', model: '{3}' is same as baseline firmware '{2}'\".format(\n installed_fw, ic, baseline_fw, ic_model))\n ic_firm[ic] = baseline_fw\n ic_installedversion[ic] = installed_fw\n\n if error_flag != 0:\n logger.debug(\"Some mismatch in the versions comparision, please check\")\n return False, ic_firm, ic_installedversion\n else:\n logger.debug(\" Versions comparisions went well, Firmware activation is successful\")\n return True, ic_firm, ic_installedversion", "def available_versions(self):\n return list(sorted(self.onxs))", "def select_latest_micro_versions(versions):\n seen_minors = set()\n res = []\n\n for ver, _ in sorted(\n versions.items(),\n # Sort by (minor_version, upload_time) in descending order\n key=lambda x: (Version(x[0]).release[:2], x[1]),\n reverse=True,\n ):\n minor_ver = Version(ver).release[:2]\n\n if minor_ver not in seen_minors:\n seen_minors.add(minor_ver)\n res.insert(0, ver)\n\n return res", "def _verify_firmware_version(self):\n firmware_version = self.device.firmware_version\n self.assertTrue(firmware_version)\n self.assertIsInstance(firmware_version, str)", "def sync_firmware(self):\n serial_no = self.serial_number\n\n if self.firmware_newer():\n # The J-Link's firmware is newer than the one compatible with the\n # DLL (though there are promises of backwards compatibility), so\n # perform a downgrade.\n try:\n # This may throw an exception on older versions of the J-Link\n # software due to the software timing out after a firmware\n # upgrade.\n self.invalidate_firmware()\n self.update_firmware()\n except errors.JLinkException as e:\n pass\n\n res = self.open(serial_no=serial_no)\n\n if self.firmware_newer():\n raise errors.JLinkException('Failed to sync firmware version.')\n\n return res\n\n elif self.firmware_outdated():\n # The J-Link's firmware is older than the one compatible with the\n # DLL, so perform a firmware upgrade.\n try:\n # This may throw an exception on older versions of the J-Link\n # software due to the software timing out after a firmware\n # upgrade.\n self.update_firmware()\n except errors.JLinkException as e:\n pass\n\n if self.firmware_outdated():\n raise errors.JLinkException('Failed to sync firmware version.')\n\n return self.open(serial_no=serial_no)\n\n return None", "def compare_versions(self, version1, version2):\n max_segments = max(len(version1.split(\".\")), len(version2.split(\".\")))\n return cmp(self.__normalize_version(version1, desired_segments=max_segments), self.__normalize_version(version2, desired_segments=max_segments))", "def test_compare_schemas_minor(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.minor_change\n )\n\n assert status == schema_utils.Update.minor", "def compare_version(self, ref):\n if not self.version:\n print 'No version for package %s' % self.package\n if not ref.version:\n print 'No version for package %s' % ref.package\n if not self.parsed_version:\n self.parsed_version = parse_version(self.version)\n if not ref.parsed_version:\n ref.parsed_version = parse_version(ref.version)\n return self.parsed_version.compare(ref.parsed_version)", "def get_version_and_model_spread(devices):\n if isinstance(devices[0], jss.Computer):\n os_type_search = \"hardware/os_name\"\n os_type = \"Mac OS X\"\n os_version_search = \"hardware/os_version\"\n model_search = \"hardware/model\"\n model_identifier_search = \"hardware/model_identifier\"\n else:\n os_type_search = \"general/os_type\"\n os_type = \"iOS\"\n os_version_search = \"general/os_version\"\n model_search = \"general/model\"\n model_identifier_search = \"general/model_identifier\"\n versions, models = [], []\n\n for device in devices:\n if device.findtext(os_type_search) == os_type:\n versions.append(device.findtext(os_version_search) or\n \"No Version Inventoried\")\n models.append(\"%s / %s\" % (\n device.findtext(model_search) or \"No Model\",\n device.findtext(model_identifier_search,) or\n \"No Model Identifier\"))\n version_counts = Counter(versions)\n # Standardize version number format.\n version_counts = fix_version_counts(version_counts)\n model_counts = Counter(models)\n\n total = len(devices)\n\n # Report on OS version spread\n strings = sorted(get_histogram_strings(version_counts, padding=8))\n version_metadata = {\"%s Version Histogram (%s)\" % (os_type, total):\n strings}\n\n # Report on Model Spread\n # Compare on the model identifier since it is an easy numerical\n # sort.\n strings = sorted(get_histogram_strings(model_counts, padding=8),\n cmp=model_identifier_cmp)\n model_metadata = {\"Hardware Model Histogram (%s)\" % total: strings}\n\n return (version_metadata, model_metadata)", "def all_latest_versions(self):\n for p in self.list_platforms():\n for ch in p.channels:\n vsn = ch.get_latest_vsn()\n yield vsn", "def check_os_version():\n if not version.is_supported_version():\n supported_releases = []\n for rel in version.SUPPORTED_VERSIONS:\n for ver in version.SUPPORTED_VERSIONS[rel]:\n supported_releases.append(rel.upper() + ' ' + ver)\n reporting.create_report([\n reporting.Title(\n 'The installed OS version is not supported for the in-place upgrade to the target RHEL version'\n ),\n reporting.Summary(\n 'The supported OS releases for the upgrade process:\\n'\n ' {}'.format('\\n'.join(supported_releases))\n ),\n reporting.Severity(reporting.Severity.HIGH),\n reporting.Groups(COMMON_REPORT_TAGS),\n reporting.Groups([reporting.Groups.INHIBITOR]),\n # we want to set a static Key here because of different Title per path\n reporting.Key('1c7a98849a747ec9890f04bf4321de7280970715')\n ] + related)", "def test_case03(self):\n version1 = versions.get_version_power(\"1.1.1\")\n version2 = versions.get_version_power(\"0.2.1\")\n self.assertGreater(version1, version2)", "def test_firmware_version(self):\n self._verify_firmware_version()", "def compatible_version(self):\n note_version = self.py_version\n py_version = sys.version_info\n if note_version[0] != py_version[0]:\n return False\n if len(note_version) > 1 and note_version[1] > py_version[1]:\n return False\n return True", "def test_get_hyperflex_server_firmware_version_by_moid(self):\n pass", "def latestidd():\n pth, _ = run_functions.install_paths(version='8.8.0') # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith('EnergyPlus')]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "async def get_firmware_version(self):\n current_time = time.time()\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n elapsed_time = time.time()\n if elapsed_time - current_time > 2:\n return None\n await asyncio.sleep(self.sleep_tune)\n reply = ''\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)", "def checkForUpdates(cversion):\r\n \r\n # set a list of constant versions\r\n \r\n if MpGlobal.SAVED_VERSION == \"0.0.0.0\" :\r\n return;\r\n \r\n v1 = \"0.4.2.0\" # update songs in library to contain index values.\r\n \r\n # if any version compares are less than 0 then updates are required.\r\n update = versionCompare(cversion,v1) < 0;\r\n \r\n \r\n \r\n if update:\r\n print \"updates are required\"\r\n runUpdater(cversion);", "def hasMultipleVersions(self):\n return _libsbml.FbcExtension_hasMultipleVersions(self)", "def need_update(self):\n self.logging.debug( \"need_update()\" )\n\n for name in self.tables:\n\n md5 = self.dbs_tables[name]['md5']\n test = get_md5(self.dbs_tables[name]['path'])\n\n self.logging.debug('(%s) table:%s md5:[old: %s new: %s]' % \\\n (self.db,name,md5,test) )\n\n if test != md5: return True\n\n return False", "def find_latest_version(versions):\n\n highest_version = 0\n for version in versions:\n version = parse_version(version)\n\n if version > highest_version:\n highest_version = version\n\n return highest_version", "def getFirmwareVersion(self, board=0):\n return self.callModule('admin', board, 0, 'getVersion')", "def get_fw_version(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? .*? .*? .*? (.*?) \\r\\n' \n fw_version = re.findall(pattern,summary).pop()\n return fw_version", "def firmware_version(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()", "def hw_version(self) -> str | None:\n return self.status.get(\"FIRMWARE\")", "def compareVersion(self, version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n main1 = 0\n main2 = 0\n branch1 = 0\n branch2 = 0\n list1 = version1.split('.')\n list2 = version2.split('.')\n for i in range(max(len(list1),len(list2))):\n item1 = 0 if (i > len(list1)-1) else int(list1[i])\n item2 = 0 if (i > len(list2)-1) else int(list2[i])\n if item1 > item2:\n return 1\n if item2 > item1:\n return -1\n return 0", "def check_recommended_versions_result(context, version):\n json_data = context.response.json()\n result = json_data[\"recommended_versions\"]\n assert result == version, \"different version found {} != {}\".format(version, result)", "def latestidd():\n pth, _ = run_functions.install_paths(\n version=\"8.8.0\"\n ) # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith(\"EnergyPlus\")]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "def test_compare_local_version_is_older(self):\n\n given = \"2.34.0.dev (Hello, World)\"\n expected = True\n actual = Version.compare(given)\n\n self.assertEqual(expected, actual)", "def _check_for_updated_ballet() -> Optional[str]:\n latest = _get_latest_ballet_version_string()\n current = ballet.__version__\n parse = packaging.version.parse\n if latest and parse(latest) > parse(current):\n return latest\n else:\n return None", "def test_16_24_second_hotfix(self):\n self.data['version'] = ''\n self.data['appVersion'] = '16.0.2'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20130826.01') > -1", "def test_compare_local_version_is_newer(self):\n\n given = \"1.15.0.dev (Hello, World)\"\n expected = False\n actual = Version.compare(given)\n\n self.assertEqual(expected, actual)", "def check(self, expected):\n versions = ['3.0', '4.0', '5.0', '6.0', '7.0', '8.0']\n modes = ['strict', 'normal', 'ignore']\n\n for version in versions:\n for mode in modes:\n assert self.get(app_version=version, compat_mode=mode) == (\n expected['-'.join([version, mode])])", "def check_exactly_one_current_version(self):\n expected_state = \"CURRENT\"\n\n query = \"SELECT COUNT(*) FROM cluster_version;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.error(\"Unable to run query: {0}\".format(query))\n return\n\n count = result[0]\n if count == 0:\n msg = \"There are no cluster_versions. Start ambari-server, and then perform a Restart on one of the services.\\n\" + \\\n \"Then navigate to the \\\"Stacks and Versions > Versions\\\" page and ensure you can see the stack version.\\n\" + \\\n \"Next, restart all services, one-by-one, so that Ambari knows what version each component is running.\"\n Logger.warning(msg)\n elif count == 1:\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n\n repo_version_id = None\n repo_version = None\n cluster_version_state = None\n\n if result and len(result) == 3:\n repo_version_id = result[0]\n repo_version = result[1]\n cluster_version_state = result[2]\n\n if repo_version_id and repo_version and cluster_version_state:\n if cluster_version_state.upper() == expected_state:\n self.check_all_hosts(repo_version_id, repo_version)\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHDP STACK OVERVIEW\")\n\t Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Cluster HDP Version\\t{0}\".format(repo_version))\n Logger.info(\"Cluster State\\t{0}\".format(cluster_version_state))\n Logger.info(\"Ambari version\\t:{0}\".format(self.ambari_version))\n\n if self.ambari_server_user != \"root\" :\n Logger.info(\"Ambari Server as non-root?\\tYes\")\n else :\n Logger.info(\"Ambari Server as non-root?\\tNo\")\n\n # Read ambari-agent.ini file\n if os.path.exists(AMBARI_AGENT_INI):\n self.ambari_agent_props = self.read_conf_file(AMBARI_AGENT_INI)\n Logger.debug(\"Reading file {0}.\".format(self.ambari_agent_props))\n if \"run_as_user\" in self.ambari_agent_props:\n self.run_as_user = self.ambari_agent_props[\"run_as_user\"]\n if self.run_as_user != \"root\":\n Logger.info(\"Ambari Agent as non-root?\\tYes\")\n else:\n Logger.info(\"Ambari Agent as non-root?\\tNo\")\n else:\n Logger.error(\"Unable to read ambari-agent.ini file\")\n\n else:\n Logger.error(\"Cluster Version {0} should have a state of {1} but is {2}. Make sure to restart all of the Services.\".format(repo_version, expected_state, cluster_version_state))\n else:\n Logger.error(\"Unable to run query: {0}\".format(query))\n elif count > 1:\n # Ensure at least one Cluster Version is CURRENT\n Logger.info(\"Found multiple Cluster versions, checking that exactly one is {0}.\".format(expected_state))\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id WHERE cv.state = '{0}';\".format(expected_state)\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n if rows:\n if len(rows) == 1:\n Logger.info(\"Good news; Cluster Version {0} has a state of {1}.\".format(rows[0][1], expected_state))\n self.check_all_hosts_current(rows[0][0], rows[0][1])\n elif len(rows) > 1:\n # Take the repo_version's version column\n repo_versions = [row[1] for row in rows if len(row) == 3]\n Logger.error(\"Found multiple cluster versions with a state of {0}, but only one should be {0}.\\n\" \\\n \"Will need to fix this manually, please contact Support. Cluster Versions found: {1}\".format(expected_state, \", \".join(repo_versions)))\n else:\n Logger.error(\"Unable to run query: {0}\\n\".format(query))\n pass", "def is_requested_microversion_compatible(cls, max_version):\n try:\n req_version_obj = api_version_request.APIVersionRequest(\n cls.request_microversion)\n # NOTE(gmann): This is case where this method is used before calling\n # resource_setup(), where cls.request_microversion is set. There may\n # not be any such case but still we can handle this case.\n except AttributeError:\n request_microversion = (\n api_version_utils.select_request_microversion(\n cls.min_microversion,\n CONF.compute.min_microversion))\n req_version_obj = api_version_request.APIVersionRequest(\n request_microversion)\n max_version_obj = api_version_request.APIVersionRequest(max_version)\n return req_version_obj <= max_version_obj", "def new_version_available(self):\n return self._new_version_available", "async def get_firmware_version(self):\n current_time = time.time()\n #logstring(\"setting current time {}\".format(current_time))\n #logstring(\"1\")\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n #logstring(\"2\")\n #logstring(\"checking time now 1 {}\".format(time.time()))\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n #logstring(\"checking time now 2 {}\".format(time.time()))\n #logstring(\"3\")\n if self.serial_port.IsPortOpen == False:\n #logstring(\"Looks like that port wasn't working!!!!!!!!!!!!!????\")\n return None\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n #logstring(\"4\")\n elapsed_time = time.time()\n #logstring(\"setting elapsed time {}\".format(elapsed_time))\n #logstring(\"5\")\n if elapsed_time - current_time > 3:\n #logstring(\"really took too long: {} {} {}\".format(elapsed_time, current_time, elapsed_time - current_time))\n return None\n #logstring(\"7\")\n if self.serial_port.IsPortOpen == False:\n #logstring(\"Looks like that port wasn't working!!!!!!!!!!!!!\")\n return None\n await asyncio.sleep(self.sleep_tune)\n #logstring(\"8\")\n #logstring(\"Geez, that took: {} {} {} ??????????????????\".format(elapsed_time, current_time, elapsed_time - current_time))\n\n reply = ''\n #logstring(\"9\")\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n #logstring(\"10\")\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)", "def test_min_client(self):\n for version in Version.objects.filter(pk__gte=self.version_1_2_0):\n appversion = version.apps.all()[0]\n appversion.min = AppVersion.objects.get(pk=325) # 3.7a5\n appversion.save()\n\n version, file = self.get('', '3070000005000', # 3.7a5pre\n self.app, self.platform)\n assert version == self.version_1_1_3", "def fw_version(self):\n return self.capabilities.get(\"fw_ver\")", "def _check_version(version):\n # Update cache if needed.\n if _check_version._versions_cache is None:\n log.debug(\"Loading versions cache ...\")\n _check_version._versions_cache = __salt__[\"netbeans.list_versions\"]()\n\n # Convert latest.\n if version is None or version == \"latest\":\n return __salt__[\"netbeans.pick_latest_version\"](\n _check_version._versions_cache\n )\n\n # Check if version is available.\n if version not in _check_version._versions_cache:\n return None\n return version", "def test_beta_updates_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6", "def is_valid_version(self):\n pass" ]
[ "0.70892644", "0.708643", "0.70024616", "0.64528227", "0.64317346", "0.64306974", "0.6340362", "0.6231141", "0.61943406", "0.6168598", "0.61505914", "0.612571", "0.6075346", "0.60498714", "0.6030455", "0.60061157", "0.59848315", "0.59787464", "0.5975126", "0.59665155", "0.59654367", "0.596271", "0.59094965", "0.5875248", "0.5868414", "0.58661413", "0.5856628", "0.58280647", "0.5813281", "0.57699996", "0.57555854", "0.5747705", "0.57383984", "0.57279825", "0.57260877", "0.571574", "0.5706904", "0.56925505", "0.5684565", "0.56807476", "0.5675459", "0.56544167", "0.5640635", "0.5626905", "0.5623169", "0.561551", "0.56101906", "0.5600031", "0.5588904", "0.55858785", "0.5584469", "0.5577686", "0.5574718", "0.5573054", "0.556403", "0.5550241", "0.55443454", "0.5526283", "0.5525878", "0.5512879", "0.5509468", "0.55001414", "0.54982066", "0.54974955", "0.5495342", "0.5470963", "0.54628485", "0.54621863", "0.54587036", "0.5439528", "0.5437846", "0.5428699", "0.54163575", "0.54055786", "0.5403708", "0.5401982", "0.5390251", "0.5383479", "0.53766793", "0.53658855", "0.53569275", "0.53536266", "0.5350327", "0.53479296", "0.534437", "0.5333285", "0.53304964", "0.5317845", "0.5314217", "0.5310657", "0.53076744", "0.5303692", "0.5301868", "0.52965945", "0.52875197", "0.5282641", "0.5276947", "0.5274953", "0.52747273", "0.52716464" ]
0.7005981
2
Output results in a json format which can be useful to ingest into other tools
def dump_json(self): # JSON output not requested if not self.json_results: return # Are we writing to a file or stdout? if self.json_results == "-": json_results_fd = sys.stdout else: try: json_results_fd = open( os.path.expanduser( os.path.expandvars( self.json_results)), "wb") except Exception as err: self.message( "[-] Problem opening file '%s' to write JSON results to: %s" % (self.json_results, err)) self.message( "[!] Defaulting to writing JSON results to stdout instead") json_results_fd = sys.stdout try: json.dump(self.results, json_results_fd) except Exception as err: self.message( "[-] Problem writing JSON output to %s : %s" % (self.json_results, err)) if self.json_results != "-": self.message("[+] Written JSON results to %s" % (os.path.abspath(self.json_results)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_json(results):\r\n import json\r\n stats = calc_stats(results)\r\n print(json.dumps(stats._asdict()))", "def format_json(self,query_results):\n results=query_results.data\n factory=factory_json()\n dump=factory.dumps(results)\n print(dump)\n # TODO return output for this\n return \"\"", "def display_json(self, results, verbose):\n print(json.dumps(results))", "def print_json(results, number, concurrency):\n import json\n stats = calc_stats(results, number, concurrency)\n print(json.dumps(stats))", "def json_out(db, options):\n stats = {\"stats\": basic_stats(db)}\n stats['logins_per_rp'] = db['rp']\n if options.quiet:\n print(dumps(stats, separators=(',', ':')))\n else:\n print(dumps(stats, indent=2, separators=(',', ': ')))", "def format_parsed_json(results):\n json_result = json.dumps(results, indent=4)\n return json_result", "def format_results(results):\n parsed = json.loads(results.to_json())\n json_result = json.dumps(parsed, indent=4)\n return json_result", "def create_json_report(output):\n # Initial work, just dump mia_metrics and dummy_metrics into a json structure\n return json.dumps(output, cls=NumpyArrayEncoder)", "def json_out(self):\n temp_json = json.dumps(self.ecat_info, indent=4)\n print(temp_json)", "def result_writer(result_poly):\n val = {}\n val[\"type\"] = \"FeatureCollection\"\n val[\"features\"] = result_poly\n with open(output_file_path, 'w') as outfile:\n json.dump(val, outfile, indent=3)\n outfile.close()", "def get_json_accessibility_result(self):\n axe_result = json.dumps(self.results, indent = 3)\n logger.info(axe_result)\n return axe_result", "def dump_result_as_json(self, filename):\n import json\n with open(filename, \"w\") as out:\n data = {\"raport\": [x.to_dict() for x in self.events]}\n json.dump(data, out, indent=4, sort_keys=True)", "def output_results(results, summary, format=\"json\", outstream=sys.stdout):\n\n try:\n\n if format.upper() == \"JSON\":\n\n res_json = {\n \"summary\": summary,\n \"test_results\": results\n }\n\n json.dump(res_json, outstream, indent=4)\n\n elif format.upper() == \"TEXT\":\n\n outstream.write(\"***\\n\")\n outstream.write(\"TEST SUMMARY\\n\")\n outstream.write(\"------------\\n\")\n outstream.write(\"Tests passed: %i\\n\" % summary[\"passed\"])\n outstream.write(\"Tests failed: %i\\n\" % summary[\"failed\"])\n\n outstream.write(\"Success percentage : %.2f%%\\n\"\n % summary[\"success_percentage\"])\n outstream.write(\"Total elapsed time: %.3f seconds\\n\"\n % summary[\"total_elapsed_time\"])\n outstream.write(\"***\\n\")\n\n for res in results:\n\n outstream.write(\"%s\\n\" % res[\"name\"])\n outstream.write(\"\\tStatus:%s\\n\" % res[\"status\"])\n outstream.write(\"\\tElapsed time: %f\\n\" % res[\"elapsed_time\"])\n\n if(res[\"status\"] == \"FAILED\"):\n outstream.write(\"\\tError message: %s\\n\"\n % res[\"error_msg\"])\n\n except KeyError as e:\n print(str(e))", "def make_json(result):\n new_result = result.to_dict()\n json_result = json.dumps(new_result, indent=4)\n return json_result", "def summarize_as_json(self):\n return json.dumps({\n 'total_time': self.total_time,\n 'std_dev_total_time': self.std_dev_total_time,\n 'max_memory': self.max_memory,\n 'std_dev_max_memory': self.std_dev_max_memory,\n 'average_memory': self.average_memory,\n 'std_dev_average_memory': self.std_dev_average_memory,\n 'average_cpu': self.average_cpu,\n 'std_dev_average_cpu': self.std_dev_average_cpu,\n }, indent=2)", "def output_result(self):\n output = {}\n output['draw'] = str(int(self.params['draw']))\n output['recordsTotal'] = str(self.cardinality)\n output['recordsFiltered'] = str(self.cardinality_filtered)\n if self.error:\n output['error'] = self.error\n return output\n\n output['data'] = self.results\n for k, v in self.yadcf_params:\n output[k] = v\n return output", "def to_json(self, exclude_vectors=True):\n json_repr = vars(self)\n json_repr[\"results\"] = [\n r.to_json(exclude_vectors=exclude_vectors) for r in json_repr[\"results\"]]\n return json_repr", "def _to_json(self, output):\n out_dict = {\"predictions\": output}\n return json.dumps(out_dict)", "def print_json(res, ctx):\n\n return json.dumps(res)", "def save_results(results):\n json.dump(results, open(\"results.json\", \"w\"))", "def print_json_stdout(results):\n for json in results:\n print(\"\\n########## Result for IP {} ##########\".format(json['ip']))\n pprint.pprint(json)\n print('######################################')\n print()", "def display_results(self, prelude='\\n', json_output=False):\n if json_output:\n # JSON output\n import json\n results_object = [r.jsonizable_object() for r in self.results]\n print(json.dumps(results_object, indent=2, sort_keys=True, ensure_ascii=False))\n else:\n # Regular output\n if not self.results:\n print('No results.', file=sys.stderr)\n else:\n sys.stderr.write(prelude)\n for r in self.results:\n r.print()", "def export_verbose_json(self):\n self.export_json(verbosejson=True)", "def to_multiple_jsons(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('multiple_jsons')\n else:\n self.output('multiple_jsons')", "def output_from_json(self, output: Dict[str, Any]) -> OutputInfo:", "def write(fmt: Format, out: str):\n\n results = {result[\"name\"]: {\"average\": result[\"average\"], \"range\": result[\"range\"]} for result in Paragon.results}\n\n if fmt == Format.JSON:\n json.dump(results, out, indent=4)\n\n if fmt == Format.MARKDOWN:\n table = \"| Program | Average [ms] | Min [ms] | Max [ms] |\\n|---|---|---|---|\\n\"\n for key, val in results.items():\n table += f\"| `{key}` | {val['average']} | {val['range'][0]} | {val['range'][1]} |\\n\"\n out.write(table)\n\n if fmt == Format.CSV:\n writer = csv.DictWriter(out, fieldnames=[\"Program\", \"Average [ms]\", \"Min [ms]\", \"Max [ms]\"])\n\n writer.writeheader()\n\n for key, val in results.items():\n writer.writerow({\n \"Program\": key,\n \"Average [ms]\": val[\"average\"],\n \"Min [ms]\": val[\"range\"][0],\n \"Max [ms]\": val[\"range\"][1],\n })", "def json(self, update=False):\n return json.dumps(self.export(update=update), indent=4)", "def _convert_result_to_json(self, lines):\n if not lines:\n return {'status': 'Failed',\n 'msg': 'Final result is not available.'}\n\n lines = lines.split('\\n')\n n = len(lines)\n\n if n == 1:\n return self._result_to_dict(lines[0])\n\n return {'count': n,\n 'nodes': [self._result_to_dict(line) for line in lines]}", "def to_single_json(self):\n self.error_throw('output')\n \n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('single_json')\n else:\n self.output('single_json')", "def json_out(self, data):\n\t\treturn json.dumps(data)", "def send_results_output(**kwargs):\n logging.info(\"Printing query results to output\")\n print(kwargs[\"results_dataset_json\"])\n return True", "def metrics_to_json(pipeline_result, fname):\n metric_results = pipeline_result.metrics().query()\n results = {}\n for counter in metric_results['counters']:\n counter_name = counter.key.step + ':' + counter.key.metric.name\n results[counter_name] = counter.result\n with tf.io.gfile.GFile(fname, 'w') as f:\n f.write(json.dumps(results, indent=4, sort_keys=True))", "def write_output(version, aliases, zones, filename):\n data = OrderedDict()\n data[\"version\"] = version\n data[\"aliases\"] = OrderedDict(sorted(aliases.items()))\n data[\"zones\"] = OrderedDict(sorted(zones.items()))\n\n with open(filename, \"w\") as jsonfile:\n json.dump(data, jsonfile, indent=2, separators=(\",\", \": \"))\n jsonfile.write(\"\\n\")", "def generateJsonString(self) -> str:\n try:\n if self.lastResult is not None and len(self.lastResult) != 0:\n for result in self.lastResult:\n result['SAPMON_VERSION'] = PAYLOAD_VERSION\n result['PROVIDER_INSTANCE'] = self.providerInstance.name\n result['METADATA'] = self.providerInstance.metadata\n resultJsonString = json.dumps(\n self.lastResult, sort_keys=True, indent=4, cls=JsonEncoder)\n self.tracer.debug(\"[%s] resultJson=%s\" % (self.fullName,\n str(resultJsonString)))\n except Exception as e:\n self.tracer.error(\"[%s] Could not format lastResult=%s into JSON (%s)\", self.fullName,\n self.lastResult,\n e, exc_info=True)\n raise\n return resultJsonString", "def json_view(self, recursive=False):\n\n context = self.context.aq_inner\n data = self.export(context, recursive=recursive)\n pretty = json.dumps(data, sort_keys=True, indent=4)\n self.request.response.setHeader(\"Content-type\", \"application/json\")\n return pretty", "def json(self):\n robot_dict = self.robot_dict()\n target_dict = self.target_dict()\n json_str = '{'\n json_str = json_str + '\"robot_obj\" : ' + json.dumps(robot_dict) + \",\\n\"\n json_str = json_str + '\"target_obj\" : ' + json.dumps(target_dict) + \"\\n\"\n json_str = json_str + '}'\n return(json_str)", "def write(self):\n out = json.dumps({\"items\": self.items})\n sys.stdout.write(out)", "def write_to_json(results, filename):\r\n dicts = []\r\n for row in results:\r\n print(row.neo)\r\n r = {'datetime_utc': datetime_to_str(row.time),\r\n 'distance_au': row.distance, 'velocity_km_s': row.velocity,\r\n 'designation': row._designation,\r\n 'neo': {'designation': row.neo.designation,\r\n 'name': row.neo.name, 'diameter_km': row.neo.diameter,\r\n 'potentially_hazardous': row.neo.hazardous}}\r\n dicts.append(r)\r\n\r\n with open(filename, 'w') as json_file:\r\n json.dump(dicts, json_file, indent=4, sort_keys=False)", "def save_result(result, file_name='result.txt', formatted=False):\n print \"Save result into %s\" % file_name\n with open(file_name, 'w') as file_:\n for product_name in result:\n if formatted:\n file_.write(json.dumps({\n 'product_name' : product_name,\n 'listings' : result[product_name]\n }, sort_keys=True, indent=2))\n else:\n file_.write(json.dumps({\n 'product_name' : product_name,\n 'listings' : result[product_name]\n }))", "def test_to_json(self):\n actual_result = ResultBuilder(IPERF_PARSER_EXPECTED_RESULT,\n OK_MESSAGE,\n OK_RETURN_CODE).build_json()\n self.assertMultiLineEqual(actual_result,\n EXPECTED_OUTPUT_BUILDER_RESULT)", "def results2json(self, results, outfile_prefix):\r\n result_files = dict()\r\n if isinstance(results[0], list):\r\n json_results = self._det2json(results)\r\n result_files['bbox'] = '{}.{}.json'.format(outfile_prefix, 'bbox')\r\n result_files['proposal'] = '{}.{}.json'.format(\r\n outfile_prefix, 'bbox')\r\n mmcv.dump(json_results, result_files['bbox'])\r\n elif isinstance(results[0], tuple):\r\n json_results = self._segm2json(results)\r\n result_files['bbox'] = '{}.{}.json'.format(outfile_prefix, 'bbox')\r\n result_files['proposal'] = '{}.{}.json'.format(\r\n outfile_prefix, 'bbox')\r\n result_files['segm'] = '{}.{}.json'.format(outfile_prefix, 'segm')\r\n mmcv.dump(json_results[0], result_files['bbox'])\r\n mmcv.dump(json_results[1], result_files['segm'])\r\n elif isinstance(results[0], np.ndarray):\r\n json_results = self._proposal2json(results)\r\n result_files['proposal'] = '{}.{}.json'.format(\r\n outfile_prefix, 'proposal')\r\n mmcv.dump(json_results, result_files['proposal'])\r\n else:\r\n raise TypeError('invalid type of results')\r\n return result_files", "def dump_analysis(self, ecosystem, component, version, json_response):\n timestamp_str = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n filename = API_RESULTS_DIRECTORY + \"/\"\n filename += \"component_analysis_{e}_{c}_{v}_{t}.json\".format(e=ecosystem,\n c=component,\n v=version,\n t=timestamp_str)\n with open(filename, 'w') as fout:\n json.dump(json_response, fout)", "def cache_to_json(self):\n # {\"team_number\": 0000000, \"student1\": \"\", \"student2\": \"\", \"student3\": \"\",\n # \"advisor_type\": \"\", \"advisor\": \"\", \"school\": \"\", \"prize\": \"\"}\n json_details = {\"fields\": [\"teams counts\", \"teams numbers\",\n \"student1\", \"student2\", \"student3\",\n \"advisor_type\", \"advisor\", \"school\", \"prize\"],\n \"teams counts\": 0,\n \"teams numbers\": [],\n \"info\": []}\n self.cache_result_file.seek(0, 0)\n lines = self.cache_result_file.readlines()\n json_details[\"teams counts\"] = len(lines)\n for line in lines:\n info = eval(line)\n json_details[\"teams numbers\"].append(info[\"team_number\"])\n json_details[\"info\"].append(info)\n\n with open(self.result_filename, \"w\") as f:\n json.dump(obj=json_details, fp=f, indent=4)\n\n self.logger.debug(\"Info Result Updated to JSON\")", "def output_run(run_data, name):\n\n print(json.dumps(run_data, indent=4))\n ret = run_data.get('return', {})\n display_output(\n {name: ret}, \n\tout=run_data.get('out', 'nested'),\n\topts = salt.config.minion_config('/dev/null'))", "def write_to_json(results, filename):\n resultsDict = []\n with open(filename, 'w') as results_file:\n for i, result in enumerate(results):\n resultsDict.append(\n {'datetime_utc': result.time_str,\n 'distance_au': result.distance,\n 'velocity_km_s': result.velocity,\n 'neo': {'designation': result._designation,\n 'name': result.neo.name,\n 'diameter_km': result.neo.diameter,\n 'potentially_hazardous': result.neo.hazardous}})\n if resultsDict[i]['neo']['name'] is None:\n resultsDict[i]['neo']['name'] = ''\n json.dump(resultsDict, results_file, indent=2)", "def summaryJSON(self, filename=None):\n d = self.robotGridSummaryDict()\n if filename is not None:\n with open(filename, \"w\") as f:\n json.dump(d, f, separators=(',', ':'))\n else:\n return json.dumps(d)", "def data():\n result = {}\n for thread in DATA.threads:\n result[thread] = [formatNode(node) for node in DATA.threads[thread].tree]\n return json.dumps({\n 'checkpoints': DATA.checkpoints,\n 'threads': result\n })", "def print_json_file(results, file):\n print(\"Writing results to {}...\".format(file.name))\n for json in results:\n file.write(\"\\n########## Result for IP {} ##########\\n\".format(json['ip']))\n pprint.pprint(json, stream=file)\n file.write('######################################\\n')", "def _generate_step_results(json_filename=\"jsonoutput.json\"):\n if debug:\n print('[DEBUG] Func: _generate_step_results...')\n\n testrail_format = {\"custom_step_results\": []} # Json format that TestRail requires\n step_counter = 1\n\n with open(json_filename, 'r') as json_file_obj:\n json_file_contents = json.loads(json_file_obj.read())\n json_file_obj.close()\n\n for feature in json_file_contents:\n for step in feature['elements'][0]['steps']:\n\n # Create \"content\": \"Step j\"\n test_step = {}\n\n test_step[\"content\"] = \"Step \" + str(step_counter)\n\n # Create \"expected\": \"foo\"\n test_step[\"name\"] = str(step['name'])\n test_step[\"expected\"] = \"passed\"\n\n # Create \"actual\": \"foo\"\n test_step[\"actual\"] = str(step['result']['status'])\n\n # Create \"status_id\": 1 pass, 4 retest\n test_step[\"status_id\"] = 1 if test_step[\"expected\"] == test_step[\"actual\"] else 4\n\n testrail_format['custom_step_results'].append(test_step)\n\n step_counter += 1\n\n return json.dumps(testrail_format[\"custom_step_results\"])", "def write_result_to_file(self):\n self.__test_result[Result.__RUN] = self.__run\n with open(self.__json_file_path, \"w+\") as outfile:\n json.dump(self.__test_result, outfile,\n ensure_ascii=False, indent=2)", "def write_result(dict, out_path):\n with open(out_path, 'w') as f:\n json.dump(dict, f)", "def json_all_builder(self, customer_count, invoice_count, invl_count ):\n json_result = '{\\n'\n json_result += '\\t \"_results\":[\\n'\n json_result += '\\t\\t{ \"customer_count\": \"' + str(customer_count)\n json_result += ', \"invoice_count\": \"' + str(invoice_count)\n json_result += ', \"invl_count\": \"' + str(invl_count)\n json_result += '}\\n'\n json_result += '\\n\\t]\\n}'\n return json_result", "def json_output(self):\n enabled = self.lib.iperf_get_test_json_output(self._test)\n\n if enabled:\n self._json_output = True\n else:\n self._json_output = False\n\n return self._json_output", "def as_json(self):\n\n return {\n \"name\": self.name,\n \"summary\": self.summary.as_json(),\n \"cases\": [case.as_json() for case in self.cases]\n }", "def format_results(requests):\n\n keys = get_sorted_keys(requests)\n index = 1\n \n print \"\\nResults:\"\n for key in keys:\n\tfor request in requests[key]:\n\t print \"%s %s\" % (index, request.print_result())\n\t index += 1", "def output_json(data, code, headers=None):\n #data[\"timestamp\"] = datetime.now()\n return jsonify(data)", "def print_json(result_json):\n\n if (result_json):\n for element in result_json:\n json_str = json.dumps(element, indent=4)\n click.echo(json_str)\n else:\n click.echo(\"No starships found with given criteria. Try again!\")", "def save_result(res, name):\n with open('dist/'+name+'.json','w') as fp:\n json.dump(res, fp)", "def pretty_print_result_map(results: dict) -> None:\n print(json.dumps({k: map_res(v) for k, v in results.items()}, indent=2))", "def prepare_results(self) -> dict:\n if not hasattr(self, \"results\"):\n raise AttributeError(\n \"Results have not been finalized. Please call \"\n \"finalize_results() before saving output.\"\n )\n\n output = {\n \"armory_version\": armory.__version__,\n \"config\": self.config,\n \"results\": self.results,\n \"timestamp\": int(self.time_stamp),\n }\n return output", "def export_json(meta, data, output):\n\n formatted = { k: list(v.values()) for k, v in data.items() }\n output_file = open(output, 'w')\n output_file.write(json.dumps(formatted, cls=JSONEncoder))\n output_file.close()", "def xcresulttool_json(*args):\n args = list(args) + ['--format', 'json']\n contents = xcresulttool(*args)\n return json.loads(contents)", "def xcresulttool_json(*args):\n args = list(args) + ['--format', 'json']\n contents = xcresulttool(*args)\n return json.loads(contents)", "def json_data(self):\n self.check_proof()\n return {\n \"vars\": [{'name': v.name, 'T': str(v.T)} for v in self.vars],\n \"proof\": sum([printer.export_proof_item(self.thy, item, unicode=True, highlight=True)\n for item in self.prf.items], []),\n \"report\": self.rpt.json_data(),\n \"method_sig\": self.get_method_sig()\n }", "def generate_qps_json(input_folder, output_file):\n dic = get_qps_as_dict(input_folder)\n with open(output_file, \"w\") as f:\n f.write(json.dumps(dic))", "def stat_ret():\n count_am = storage.count(\"Amenity\")\n count_ct = storage.count(\"City\")\n count_pl = storage.count(\"Place\")\n count_re = storage.count(\"Review\")\n count_st = storage.count(\"State\")\n count_us = storage.count(\"User\")\n return jsonify({\"amenities\": count_am,\n \"cities\": count_ct,\n \"places\": count_pl,\n \"reviews\": count_re,\n \"states\": count_st,\n \"users\": count_us})", "def __saveGithubResults(self):\n\t\tself.__debugInfo(\"Saving JSON results into file {}\".format(self.output_file))\n\t\ttry:\n\t\t\twith open(self.output_file, 'w') as wfile:\n\t\t\t\tjson.dump(self.final_results, wfile)\n\t\texcept Exception as exception:\n\t\t\traise MsgException('Output file could not be written', exception)", "def write_benchmark_json(file_out, benchmark_result, benchmark, test_config = TestConfig()):\n json.dump(benchmark_result, file_out)", "def main(index, output_file, **kwargs):\n\n output_jsonl = None\n output_text = None\n if 'json' in kwargs['output_format']:\n fname = output_file if len(kwargs['output_format']) == 1 else kwargs['output_format'] + '.jsonl'\n output_jsonl = open(fname, 'w')\n if 'text' in kwargs['output_format']:\n fname = output_file if len(kwargs['output_format']) == 1 else kwargs['output_format'] + '.txt'\n output_text = open(fname, 'w')\n\n if kwargs.get('query') is not None:\n query = json.load(kwargs.get('query'))\n else:\n query = {\n \"sort\": [\"warc_id\"],\n \"size\": 200,\n \"query\": {\n \"bool\": {\n \"filter\": {\n \"bool\": {\n \"must_not\": [\n {\n \"query_string\": {\n \"analyze_wildcard\": True,\n \"default_field\": \"*\",\n \"query\": \"\"\"group:(*.patches OR *.commits* OR\n *.dist-commits* OR *.version-control* OR *.git* OR *.cvs* OR *.svn*\n OR *.trunk* OR *.scm* OR *.pkg*) OR (group:(*.bugs* OR *.issues*\n OR *.bugzilla* OR *.codereview*) OR \n headers.subject.keyword:(*jira* OR *bugzilla*) OR\n headers.from_email.keyword:(*bugs* OR *bugzilla* OR *jira* OR *jboss*))\"\"\"\n }\n }\n ],\n \"must\": {\"term\": {\"lang\": \"en\"}},\n \"minimum_should_match\": 1,\n \"should\": [\n {\"wildcard\": {\"group\": \"gmane.culture.*\"}},\n {\"wildcard\": {\"group\": \"gmane.politics.*\"}},\n {\"wildcard\": {\"group\": \"gmane.science.*\"}},\n {\"wildcard\": {\"group\": \"gmane.education.*\"}},\n {\"wildcard\": {\"group\": \"gmane.music.*\"}},\n {\"wildcard\": {\"group\": \"gmane.games.*\"}},\n {\"wildcard\": {\"group\": \"gmane.recreation.*\"}}\n ]\n }\n }\n }\n }\n }\n\n logger.info('Retrieving initial batch')\n es = util.get_es_client()\n results = util.es_retry(es.search, index=index, scroll='10m', size=kwargs['scroll_size'], body=query)\n\n skip = kwargs['skip']\n if skip > 0:\n logger.info('Skipping ahead {} messages'.format(skip))\n\n sampled_groups = {}\n num_samples = 0\n num_skipped = 0\n\n try:\n with tqdm(desc='Calculating progress', unit=' messages') as progress_bar:\n while num_samples < kwargs['total_mails'] and len(results['hits']['hits']) > 0:\n for hit in results['hits']['hits']:\n if skip > 0 and num_skipped < skip:\n progress_bar.set_description('Skipping messages')\n progress_bar.total = skip\n num_skipped += 1\n progress_bar.update()\n continue\n elif (skip == 0 or num_skipped >= skip) and num_samples == 0:\n progress_bar.set_description('Sampling messages')\n progress_bar.total = kwargs['total_mails']\n progress_bar.n = 0\n progress_bar.last_print_n = 0\n progress_bar.update(0)\n\n src = hit['_source']\n text_plain = src['text_plain']\n\n prev_samples = sampled_groups.get(src['group'], 0)\n if kwargs['group_limit'] and prev_samples > kwargs['group_limit']:\n continue\n sampled_groups[src['group']] = prev_samples + 1\n\n num_samples += 1\n progress_bar.update()\n\n if output_jsonl:\n json.dump({'text': text_plain,\n 'meta': {k: src[k] for k in src.keys() if k not in ['text_plain', 'text_html']},\n 'labels': []}, output_jsonl)\n output_jsonl.write('\\n')\n\n if output_text:\n output_text.write(util.normalize_message_text(text_plain))\n output_text.write('\\n')\n\n if num_samples >= kwargs['total_mails']:\n break\n\n results = util.es_retry(es.scroll, scroll_id=results['_scroll_id'], scroll='10m')\n finally:\n es.clear_scroll(scroll_id=results['_scroll_id'])\n\n if output_jsonl:\n output_jsonl.close()\n if output_text:\n output_text.close()", "def result2json(ifilename, poiname, ofilename):\n nameMap = {\n \"SysWeight1\" : \"mc\",\n \"SysWeight2\" : \"FSR\",\n \"SysWeight3\" : \"bkg\",\n \"SysWeight4\" : \"tagpt\",\n \"SysWeight6\" : \"Prefire\",\n \"SysRecoil2\" : \"recoil_eta\",\n \"SysRecoil3\" : \"recoil_keys\",\n \"SysRecoil6\" : \"recoil_stat0\",\n \"SysRecoil7\" : \"recoil_stat1\",\n \"SysRecoil8\" : \"recoil_stat2\",\n \"SysRecoil9\" : \"recoil_stat3\",\n \"SysRecoil10\": \"recoil_stat4\",\n \"SysRecoil11\": \"recoil_stat5\",\n \"SysRecoil12\": \"recoil_stat6\",\n \"SysRecoil13\": \"recoil_stat7\",\n \"SysRecoil14\": \"recoil_stat8\",\n \"SysRecoil15\": \"recoil_stat9\",\n }\n\n def getNuisName(nuis):\n if nuis in nameMap.keys():\n return nameMap[nuis]\n elif bool(re.match(r\"\\w*bin\\d+shape\", nuis)):\n return \"QCD_\" + nuis\n else:\n return nuis\n\n ifile = ROOT.TFile(ifilename)\n himpact = ifile.Get(\"nuisance_impact_mu\")\n himpact_grouped = ifile.Get(\"nuisance_group_impact_mu\")\n tree = ifile.Get(\"fitresults\")\n tree.GetEntry(0)\n\n # find the POI bin for poiname\n ibinX = -1\n for binX in range(1, himpact.GetNbinsX()+1):\n poi = himpact.GetXaxis().GetBinLabel(binX)\n if poi == poiname:\n ibinX = binX\n continue\n assert ibinX >=0, \"Can not find the POI {} in the postfit file {}. Please check.\".format(poiname, ifilename)\n\n results = OrderedDict()\n results['POIs'] = []\n val = getattr(tree, poiname)\n err = abs(getattr(tree, poiname+\"_err\"))\n poi = OrderedDict()\n poi['fit'] = [val-err, val, val+err]\n poi['name'] = poiname\n results['POIs'].append(poi)\n\n results['method'] = 'default'\n results['params'] = []\n\n # dump impacts\n impacts = OrderedDict()\n for ibinY in range(1, himpact.GetNbinsY()+1):\n nuis = himpact.GetYaxis().GetBinLabel(ibinY)\n impacts[nuis] = himpact.GetBinContent(ibinX, ibinY)\n\n # add the grouped QCD and Recoil systematic\n groupnames = []\n for ibinY in range(1, himpact_grouped.GetNbinsY()+1):\n tmpY = himpact_grouped.GetYaxis().GetBinLabel(ibinY)\n if tmpY == 'stat':\n continue\n impacts[tmpY] = himpact_grouped.GetBinContent(ibinX, ibinY)\n groupnames.append(tmpY)\n\n # sort impacts, descending\n impacts = OrderedDict(sorted(impacts.items(), key=lambda x: abs(x[1]), reverse=True))\n\n pulls = OrderedDict()\n for nuis in impacts.keys():\n if nuis not in groupnames:\n val = getattr(tree, nuis)\n err = getattr(tree, nuis+\"_err\")\n err = abs(err)\n else:\n # manually set the postfit of the grouped sys to [-1,1], and pulled at 0,\n # since only the impacts are useful to us\n val = 0.\n err = 1.\n pulls[nuis] = [val - err, val, val + err]\n\n # save to results\n for nuis in impacts.keys():\n systematic = OrderedDict()\n systematic['fit'] = pulls[nuis]\n systematic['groups'] = []\n systematic['impact_' + poiname] = impacts[nuis]\n systematic['name'] = getNuisName(nuis)\n systematic['prefit'] = [-1.0, 0., 1.0]\n systematic[poiname] = [poi['fit'][1] - impacts[nuis], poi['fit'][1], poi['fit'][1] + impacts[nuis]]\n systematic['type'] = \"Gaussian\"\n print(getNuisName(nuis), pulls[nuis][1], pulls[nuis][1]-pulls[nuis][0], impacts[nuis])\n\n results['params'].append(systematic)\n\n with open(ofilename, 'w') as fp:\n json.dump(results, fp, indent=2)", "def report_json(self):\n # type: () -> Optional[AnyStr]\n return json.dumps(self.gen_report(as_dict=True), indent=4)", "def to_json_string(self):\n\t\treturn json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def print_rendered_results(results_dict):\n class _HubComponentEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, base.HubComponent):\n return repr(o)\n return json.JSONEncoder.default(self, o)\n formatted = json.dumps(results_dict, indent=4, cls=_HubComponentEncoder)\n # the returned string contains lines with trailing spaces, which causes\n # doctests to fail. So fix that here.\n for s in formatted.splitlines():\n print(s.rstrip())", "def getResults():", "def add_results_to_json(js, nbest_hyps, char_list):\n # copy old json info\n new_js = dict()\n new_js[\"utt2spk\"] = js[\"utt2spk\"]\n new_js[\"output\"] = []\n\n for n, hyp in enumerate(nbest_hyps, 1):\n # parse hypothesis\n rec_text, rec_token, rec_tokenid, score = parse_hypothesis(hyp,\n char_list)\n\n # copy ground-truth\n if len(js[\"output\"]) > 0:\n out_dic = dict(js[\"output\"][0].items())\n else:\n # for no reference case (e.g., speech translation)\n out_dic = {\"name\": \"\"}\n\n # update name\n out_dic[\"name\"] += \"[%d]\" % n\n\n # add recognition results\n out_dic[\"rec_text\"] = rec_text\n out_dic[\"rec_token\"] = rec_token\n out_dic[\"rec_tokenid\"] = rec_tokenid\n out_dic[\"score\"] = score\n\n # add to list of N-best result dicts\n new_js[\"output\"].append(out_dic)\n\n # show 1-best result\n if n == 1:\n if \"text\" in out_dic.keys():\n logger.info(\"groundtruth: %s\" % out_dic[\"text\"])\n logger.info(\"prediction : %s\" % out_dic[\"rec_text\"])\n\n return new_js", "def output_fn(predictions, content_type):\n assert content_type == 'application/json'\n res = predictions.cpu().numpy().tolist()\n return json.dumps(res)", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json_string(self):\r\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def to_json(self):\n pass", "def to_json_string(self):\n return json.dumps(self.to_dict(), indent = 2, sort_keys = True) + \"\\n\"", "def get_json(self):\n return {\n \"power\": self.get_power(), \n \"timestamp\": self.get_timestamp(), \n \"shortage\": self.get_shortage()\n }", "def main():\n print(dumps(get_data()))\n return 0", "def output_json(regions, outfile):\n with open(outfile, 'w') as outF:\n json.dump(regions, outF)", "def ToJson(self):\n output = json.dumps(self.ToDictionary(), sort_keys=True, indent=4)\n return output", "def writeJobJSON(self):\n\n with self.c.prefix(f\"source {self.hostDefn[self.host]['condaPath']} {self.hostDefn[self.host]['condaEnv']}\"):\n result = self.c.run(f\"python {Path(self.hostDefn[self.host]['repoScpPath'], self.scpDefnRepo['jobJSON']).as_posix()} {self.hostDefn[self.host]['nbProcDir']/self.jsonProcFile.name}\")\n\n return result", "def write_krun_results_file(results, filename):\n\n with bz2.BZ2File(filename, 'wb') as file_:\n file_.write(json.dumps(results, indent=4))", "def to_json_string(self):\n\t\treturn json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + \"\\n\"", "def main() -> int:\n print(json.dumps(get_data())) # noqa: WPS421 (side-effect)\n return 0", "def get_json_to_stdout(file_name):\n json_object = get_json(file_name)\n if json_object is None:\n print(\"Failed to evaluate %s\" % file_name, file=sys.stderr)\n sys.exit(1)\n json.dump(json_object, sys.stdout)\n print()", "def get_json(self):\n data = {}\n data['ip'] = self.ip\n\n try:\n data['country'] = self.processedvtdata[\"country\"]\n except KeyError:\n data['country'] = 'None'\n try:\n data['as'] = self.processedvtdata[\"as_owner\"]\n except KeyError:\n data['as'] = 'None'\n try:\n data['rdns'] = self.processedvtdata[\"self.reversedns\"]\n except KeyError:\n data['rdns'] = 'None'\n try:\n data['label'] = self.expertlabel\n except AttributeError:\n data['label'] = ''\n\n # geodata\n #{\"status\":\"success\",\"country\":\"Yemen\",\"countryCode\":\"YE\",\"region\":\"SA\",\"regionName\":\"Amanat Alasimah\",\"city\":\"Sanaa\",\"zip\":\"\",\"lat\":15.3522,\"lon\":44.2095,\"timezone\":\"Asia/Aden\",\"isp\":\"Public Telecommunication Corporation\",\"org\":\"YemenNet\",\"as\":\"AS30873 Public Telecommunication Corporation\",\"query\":\"134.35.218.63\"}\n if self.geodata:\n data['geodata'] = self.geodata\n \n # vt resolutions. Is a list\n data['vt'] = {}\n try:\n if self.processedvtdata['resolutions'] != 'None':\n data['vt']['resolutions'] = []\n for count, resolution_tuple in enumerate(self.processedvtdata['resolutions']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = resolution_tuple[0]\n temp['domain'] = resolution_tuple[1]\n data['vt']['resolutions'].append(temp)\n except KeyError:\n pass\n\n # vt urls. Is a list\n try:\n if self.processedvtdata['detected_urls'] != 'None':\n data['vt']['detected_urls'] = []\n for count, url_tuple in enumerate(self.processedvtdata['detected_urls']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = url_tuple[0]\n temp['url'] = url_tuple[1][0]\n temp['detections'] = str(url_tuple[1][1]) + '/' + str(url_tuple[1][2])\n data['vt']['detected_urls'].append(temp)\n except KeyError:\n pass\n\n\n # vt detected communicating samples. Is a list\n try:\n if self.processedvtdata['detected_communicating_samples'] != 'None':\n data['vt']['detected_communicating_samples'] = []\n for count, communcating_tuple in enumerate(self.processedvtdata['detected_communicating_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = communcating_tuple[0]\n temp['detections'] = str(communcating_tuple[1][0]) + '/' + str(communcating_tuple[1][1])\n temp['sha256'] = communcating_tuple[1][2]\n data['vt']['detected_communicating_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt detected downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_downloaded_samples'] != 'None':\n data['vt']['detected_downloaded_samples'] = []\n for count, detected_tuple in enumerate(self.processedvtdata['detected_downloaded_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = detected_tuple[0]\n temp['detections'] = str(detected_tuple[1][0]) + '/' + str(detected_tuple[1][1])\n temp['sha256'] = detected_tuple[1][2]\n data['vt']['detected_downloaded_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt referrer downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_referrer_samples'] != 'None':\n data['vt']['detected_referrer_samples'] = []\n for count, referrer_tuple in enumerate(self.processedvtdata['detected_referrer_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['sha256'] = referrer_tuple[0]\n temp['detections'] = str(referrer_tuple[1][0]) + '/' + str(referrer_tuple[1][1])\n data['vt']['detected_referrer_samples'].append(temp)\n except AttributeError:\n pass\n\n # pt data\n data['pt'] = {}\n if self.processedptdata:\n count = 0\n data['pt']['passive_dns'] = []\n for result in self.processedptdata_results:\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['lastseen'] = result[0]\n temp['firstseen'] = result[1][0]\n temp['hostname'] = result[1][1]\n data['pt']['passive_dns'].append(temp)\n count += 1\n\n # shodan data\n try:\n if self.shodandata:\n data['shodan'] = self.shodandata\n except AttributeError:\n pass\n\n data = json.dumps(data)\n return data", "def as_json(self):" ]
[ "0.75546306", "0.7512504", "0.73262453", "0.7086924", "0.698022", "0.6938096", "0.6925703", "0.6855479", "0.67590535", "0.67026407", "0.66884065", "0.6552769", "0.64993656", "0.6483085", "0.6483022", "0.64827317", "0.6453574", "0.6452356", "0.64301246", "0.6410083", "0.63972706", "0.6379759", "0.6346694", "0.63428116", "0.6332092", "0.63147783", "0.6314217", "0.63038063", "0.62861246", "0.62844485", "0.6278422", "0.62759125", "0.6268234", "0.62647855", "0.6261376", "0.6205149", "0.61883885", "0.6153687", "0.614325", "0.6140303", "0.61358243", "0.6135321", "0.6091632", "0.6082575", "0.60816747", "0.60784996", "0.60764134", "0.6068842", "0.60447896", "0.6036478", "0.6030042", "0.6027898", "0.60223854", "0.6014138", "0.6013024", "0.6010734", "0.60096955", "0.60095036", "0.6009049", "0.598206", "0.5976638", "0.59649754", "0.59649754", "0.5956488", "0.59536374", "0.5947253", "0.5944664", "0.5943029", "0.59307927", "0.5919766", "0.59124696", "0.5902615", "0.59004146", "0.5898351", "0.5878857", "0.58566576", "0.58520895", "0.5849485", "0.5849485", "0.5849485", "0.5849485", "0.5849485", "0.5849485", "0.5849485", "0.5849485", "0.5849485", "0.5849485", "0.5846891", "0.5845258", "0.5842684", "0.5838955", "0.5826677", "0.58190465", "0.58150226", "0.5806785", "0.58033025", "0.5799933", "0.57994115", "0.57984704", "0.5794346" ]
0.66968524
10
Cleanup up so nothing dangles
def cleanup(self): if self.log_fo: self.log_fo.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def horde_cleanup(self):", "def cleanup(self):\n self.subpixel, self.pixel = self.stepup(self.subpixel, self.pixel, AxisDistance.pixelsize)\n self.pixel, self.tile = self.stepup(self.pixel, self.tile, AxisDistance.tilesize)", "def cleanup():", "def clean(self):\n for i in range(len(self.asteroid_type) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_type[i])\n if x < -self.gap:\n self.del_asteroid(i)", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def clean_up(self):\n\t\tpass", "def _clean_up(self):", "def cleanAll(self):\n for i in range(len(self.asteroid_type) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_type[i])\n self.del_asteroid(i)\n\n for i in range(len(self.asteroid_id_e) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_id_e[i])\n self.del_asteroid_e(i)", "def clean_up(self):\n pass", "def clean_up(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup (self):\n pass", "def cleanup(self):\r\n pass", "def cleanup(self):\r\n pass", "def _cleanup(self):\n\n self.netIns = []\n self.netOuts = []\n self.Gradients = [None]*self.size", "def cleanup(self):\n\n pass", "def cleanUp(self):\r\n pass", "def _cleanup(self):\n pass", "def cleanup(self):\r\n\r\n # Remove strip from window.\r", "def clean_up(self):\n # TODO: Implement if needed\n pass", "def cleanup(self):\r\n pass", "def cleanup(self):\n self.GP.cleanup()", "def cleanup(self, *args, **kwargs):", "def clean_edges(self):", "def cleanup(self):\r\n print(\"Cleanup not implemented\")", "def revise():", "def clean_up_data(self):\n pass", "def cleaning (data):", "def cleanup(self):\n self.sensor.cleanup()", "def remove_all(self):\n self.initial = None\n self.contour = None\n self.control_points = []", "def post_cleanup(self):\n pass", "def clear_transforms(self): # -> None:\n ...", "def clear():", "def cleanUp():\n pass", "def CleanUp(self):\n for Ind in self.IndList():\n if amax(abs(self[Ind]))<1e-10:\n del self[Ind]", "def cleanup_deadstuff(self):\r\n for bullet in self.bullets:\r\n if not bullet.alive:\r\n self.bullets.remove(bullet)\r\n\r\n for big_rock in self.rocks:\r\n if not big_rock.alive:\r\n self.rocks.remove(big_rock)", "def finalize(self):\n print('Cleaning up...')", "def cleanup(self):\n return True;", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def disarm(self):\n pass", "def cleanup(self):\n raise NotImplementedError", "def clean(_context):", "def clean(c):", "def clean(self):\n pass", "def clear_geometries(self):", "def __cleanup(self):\n while self.levels > 1 and self.head.next == None:\n temp = self.head\n self.head = self.head.below\n del temp\n self.levels -=1", "def finalise(self):", "def teardown(self):\n del self.testC, self.bounds, self.one_d_vars, self.unequal_one_d_vars\n del self.testI\n\n return", "def final_cleanup(self):\n raise NotImplementedError()", "def _cleanup():\n for (\n _,\n transformation,\n transformation_dict,\n _,\n _,\n increfed,\n _,\n ) in _queued_transformations:\n # For some reason, the logic here is different than for the async version\n # (see run_transformation_dict_async)\n if (\n increfed\n and bytes.fromhex(transformation) in transformation_cache.transformations\n ):\n transformation_cache.decref_transformation(transformation_dict, increfed)", "def teardown():\n os.remove('green-dot.tif')\n os.remove('green-dot.jpg')\n os.remove('green-dot.png')", "def teardown(self):\n del self.testInst, self.bounds1, self.bounds2, self.long_bins\n del self.mlt_bins\n\n return", "def cleanup(self):\r\n logging.info(\"entered the cleanup\")", "def clean(self):\n # Delete vertices / faces / colors / normals :\n self._vert_buffer.delete()\n self._index_buffer.delete()\n self._normals_buffer.delete()\n self._xrange_buffer.delete()\n self._math_buffer.delete()", "def clean(self):", "def cleanup(self):\n\n self.PLC['1'].set_plc_mode(0)\n self.PLC['1'].plc_clear('all')\n super(Test200SmartSanityClear005, self).cleanup()", "def teardown(self):\n\n del self.testInst, self.bounds1, self.bounds2, self.long_bins\n del self.mlt_bins\n\n return", "def cleanup(*args, **kwargs): # real signature unknown\n pass", "def clean_up(self) -> None:\n print('Doing some clean-up work...')", "def cleanup(self):\n raise NotImplementedError()", "def cleanup(self):\n raise NotImplementedError()", "def cleanup(self):\n self._tmp_obj.cleanup()", "def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.subplot2.clear()", "def teardown(self):\n del self.testC, self.testI, self.bounds\n\n return", "def teardown(self):\n del self.testInst, self.bounds1, self.bounds2\n\n return", "def teardown_simulate(self):\n self.positions = self.calibrated_positions", "def cleanup(filePath):\n restoreRenderSettings()\n cleanupCamera()\n cleanupWorld()\n deleteOutputPath(filePath)", "def free_intermediate_arrays(self):\n self._mgx = None\n self._mgy = None\n self._mgz = None\n self._vander = None\n self._bkg_cube = None\n self._bkg_cube_dirty = True", "def _clear(self):\n self._fillitem = self._fillpath = None\n for item in self.items:\n self.screen._delete(item)\n self.currentLineItem = self.screen._createline()\n self.currentLine = []\n if self._drawing:\n self.currentLine.append(self._position)\n self.items = [self.currentLineItem]\n self.clearstamps()", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def clear(self):\n self._x_prev = None\n self._y_prev = None", "def cleanup(self):\n return self.cleanupNonces(), self.cleanupAssociations()", "def clean(self):\n warnings.warn('No cleaning routines available for CHAIN GPS')\n\n return", "def cleanup(self) -> None:\n raise NotImplementedError()", "def finalize(self):", "def finalize(self):", "def clean(self):\n self.board_values = np.zeros((self.size, self.size))\n self.tiles_taken[:, :] = False", "def applyMorphologicalCleaning(self, image):", "def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.selected_element = None\n self.pressed_elements.clear()", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:" ]
[ "0.7524904", "0.7230241", "0.71339816", "0.7000594", "0.6759445", "0.6759445", "0.6759445", "0.66640466", "0.66440374", "0.65345025", "0.6510747", "0.6510747", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.6456663", "0.64300627", "0.64300627", "0.6418401", "0.64057565", "0.63987535", "0.6374086", "0.6352772", "0.63358456", "0.6327734", "0.6314523", "0.6298147", "0.62790287", "0.6233736", "0.6172763", "0.61595887", "0.6155526", "0.61538386", "0.61496675", "0.6131397", "0.6125942", "0.61090714", "0.61077505", "0.61016434", "0.6087298", "0.60820335", "0.60663927", "0.60424703", "0.6036373", "0.6032836", "0.6024828", "0.6017154", "0.6002765", "0.598336", "0.5965902", "0.5952465", "0.5949984", "0.5937863", "0.59314144", "0.592575", "0.5919457", "0.5905965", "0.5897327", "0.5888134", "0.5882497", "0.58811146", "0.588092", "0.58792573", "0.5878303", "0.5878303", "0.58665055", "0.586541", "0.5861734", "0.5858722", "0.58493197", "0.5849088", "0.58342755", "0.58289", "0.5816995", "0.58160305", "0.58017963", "0.5798631", "0.57942206", "0.5793758", "0.5793758", "0.57896835", "0.5788074", "0.57854664", "0.57817304", "0.57817304", "0.57817304", "0.57817304", "0.57817304", "0.57817304", "0.57817304", "0.57817304", "0.57817304", "0.57817304" ]
0.0
-1
Get a translation for the given message. This proxies for the internal translations object.
def gettext(self, string): return self._translations.gettext(string)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gettext(self, message):\n if self._translations.has_key(message):\n return self._translations[message]\n return super(Translations, self).gettext(message)", "def get(self, msgid):\r\n return self.trans.get(msgid, str(msgid))", "def get_translation(self):\n return self.translation", "def get_translation(self):\n trans_keys = ''.join(self._trans_dict.keys())\n trans_values = ''.join(self._trans_dict.values())\n\n trans_table = string.maketrans(trans_keys, trans_values)\n translation = self._puzzle.translate(trans_table)\n return translation", "def get_translation(obj, language_code):\n if not obj or not hasattr(obj, \"get_translation\"):\n return None\n return obj.get_translation(language_code)", "def get_translation(self):", "def GetTranslation(*args):\n return _gdi_.GetTranslation(*args)", "def translate(self, phrase):\n\n if phrase not in TRANSLATIONS or self.ui_lang_code not in TRANSLATIONS[phrase]:\n return phrase\n return TRANSLATIONS[phrase][self.ui_lang_code]", "def interpolate_insted_of_translate(\n self, msgid, mapping=None, *args, **kw): # pragma: no cover webdriver\n return zope.i18n.interpolate(msgid, mapping)", "def localize(self, msg):\n return self.translations.get(msg, msg)", "def localize(self, msg):\n return self.translations.get(msg, msg)", "def lgettext(self, message):\n\n return self.get_pseudo(message)", "def gettext(self, message):\n if self._fallback:\n return self._fallback.gettext(message)\n return message", "def ugettext(self, message):\n if isinstance(message, unicode):\n msg = message.encode(\"utf-8\")\n else:\n msg = message\n if self._translations.has_key(msg):\n return unicode(self._translations[msg], \"utf-8\")\n return super(Translations, self).ugettext(message)", "def get_translation(self, command, keyword=None, item=None):\n key = item if item is not None else \\\n keyword if keyword is not None else command\n cata = self.get_catalog(command)\n if cata is not None:\n dtr = cata.definition.get(\"translation\", {})\n trans = dtr.get(key)\n if trans is not None:\n return unicode(trans, 'utf-8')\n return GLOBAL_DICT.get(key, key)", "def translation(self):\n return self._translation", "def get_translation ( self ):\n self.verify_post_data ( )\n\n text = request.json[ 'text' ]\n src_lang = request.json[ 'source_lang' ]\n target_lang = request.json[ 'target_lang' ]\n\n # if translation is available in cache, just fetch it from there. Otherwise use translation service.\n translated_text = self.get_set_translation_from_cache ( text, src_lang, target_lang )\n\n return jsonify ( {\"Translation\": translated_text} )", "def get_translation_for_object(self, lang, obj=None, model=None, object_id=None):\n\n # Gets object model and pk if informed\n if obj:\n model = type(obj)\n object_id = obj.pk\n\n cache_key = self.make_cache_key(model, object_id, lang)\n \n # Checks if there is a cached object for this\n from_cache = cache.get(cache_key, None)\n\n if from_cache:\n return from_cache\n\n # Gets the related content type\n c_type = ContentType.objects.get_for_model(model)\n\n # Gets the translation\n trans = self.get(language=lang, content_type=c_type, object_id=object_id)\n\n # Stores in cache\n cache.set(cache_key, trans)\n\n # Returns the translation object\n return trans", "def get_translation_from_cache ( self, text, src_lang, target_lang ):\n return self.app_cache.get_translation_from_cache ( text, src_lang, target_lang )", "def get_localized_message(message, user_locale):\r\n if isinstance(message, Message):\r\n if user_locale:\r\n message.locale = user_locale\r\n return unicode(message)\r\n else:\r\n return message", "def gettext_for(locale='en'):\n return Translations.load(\n os.path.join(BASEDIR, 'app', 'translations'), [locale]\n ).ugettext", "def get(self, message_id: int, lang: str = None) -> Message:\n if lang is None:\n lang = SettingsDAO().get_value('language', str)\n\n data = dict(self.database.select(self.DATABASE_TABLE, {'ID': message_id})[0])\n\n curDate = datetime.fromordinal(data.get('date')) if data.get('date') else None\n\n message = Message(message_id, data['text'], curDate, bool(data['isMine']), data.get('partyCharacterId'),\n data.get('characterId'))\n\n return message", "def _lazy_gettext(msg):\r\n return Message(msg, domain)", "def _translate(self, key):\n return self.TRANSLATE.get(key, key)", "def pgettext(msgctxt, message):\r\n key = msgctxt + '\\x04' + message\r\n translation = get_translation().gettext(key)\r\n return message if translation == key else translation", "def translate_as(self, lang):\n trans = ItemTranslation.objects.filter(item=self,\n lang=lang,\n is_active=True)\n if trans: return trans.first()\n else: return self", "def translation(self, translation_id):\r\n return Translation(self, translation_id)", "def translation(self, translation_id):\r\n return Translation(self, translation_id)", "def translate():\n text = request.args.get('text')\n\n # Send a request to Google's Translate REST API using your API credentials defined above\n ans = service.translations().list(source='en', target='zh-CN', q=text).execute()\n\n # Return translated text back to user\n return ans['translations'][0]['translatedText']", "def get_message(self, _id):\n return Message.deserialize(self._get_single('messages', {'id': _id}))", "def test_simple_translation_using_get(self):\n pass", "def get_translation(location):\r\n return self.loc_mapper.translate_location(location, published, add_entry_if_missing=True)", "def get(self, id=None):\n\n\t\tif id:\n\t\t\tfor m in self.messages:\n\t\t\t\tif m[\"id\"] == id:\n\t\t\t\t\treturn(m)\n\n\t\t\tapi.abort(404, \"Message {} doesn't exist.\".format(id))\n\t\telse:\n\t\t\treturn(self.messages)", "def get_translation(self, word):\n qr = Query()\n val = self.db.search(qr.word == word)\n if val:\n return val[0][\"translations\"]\n else:\n return []", "def gettext_translate(string):\n # Try default translation first\n translation = localizer.old_translate(i18n.TranslationString(string))\n if translation == string:\n # translation failed then use FormEncode\n translation = formencode_api._stdtrans(string)\n return translation", "def GetApiMessage(message_name):\n messages = apis.GetMessagesModule(_BQ_API, _BQ_API_VERSION)\n return getattr(messages, message_name)", "def ugettext(self, message):\n if self._fallback:\n return self._fallback.ugettext(message)\n return unicode(message)", "def get_message_by_id(message_id):\n return Message.query.get(message_id)", "def get_set_translation_from_cache ( self, text, src_lang, target_lang ):\n translated_text = self.get_translation_from_cache ( text, src_lang, target_lang )\n if not translated_text:\n translated_text = self.service_creator.get_translation ( text, src_lang, target_lang )\n self.set_translation_to_cache ( text, src_lang, target_lang, translated_text )\n return translated_text", "def translate():\n pass", "def fetchTranslation(self, language):\n pass", "def get_message(self, message_id: int) -> discord.Message:\n return self._connection._get_message(message_id)", "def translations(self, **kwargs):\n\n path = self._get_movie_id_path('translations')\n resp = self._get_method(path, kwargs)\n return resp", "def Translate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Translate(*args, **kwargs)", "def gettext_translate( s ):\n return catalogs.translate(s)", "def get_message(self, message_id):\n r = requests.get('https://outlook.office.com/api/v2.0/me/messages/' + message_id, headers=self._headers)\n check_response(r)\n return Message._json_to_message(self, r.json())", "def get_translation(translated_text_json, language=None):\n if not language:\n language = translation.get_language()\n\n translations = translated_text_json or {}\n en = translations.get('en')\n translated = translations.get(language)\n return translated or en or ''", "def translations(self):\r\n return Translations(self)", "def translations(self):\r\n return Translations(self)", "def translate(self, message): # pylint:disable=no-self-use\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate(\"LinzDataImporter\", message)", "def translate(self):\n pass", "def get_gettext():\n local_path = os.path.realpath(os.path.dirname(sys.argv[0])) + \\\n '/translations'\n langs = []\n lc, encoding = locale.getdefaultlocale()\n if (lc):\n langs = [lc]\n osLanguage = os.environ.get('LANGUAGE', None)\n if (osLanguage):\n langs += osLanguage.split(\":\")\n langs += [\"en_US\"]\n lang = gettext.translation('wicd', local_path, languages=langs, \n fallback=True)\n _ = lang.gettext\n return _", "def machine_translation(request):\n log.debug(\"Get translation from machine translation service.\")\n\n try:\n text = request.GET['text']\n locale = request.GET['locale']\n check = request.GET['check']\n except MultiValueDictKeyError as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n if hasattr(settings, 'MICROSOFT_TRANSLATOR_API_KEY'):\n api_key = settings.MICROSOFT_TRANSLATOR_API_KEY\n else:\n log.error(\"MICROSOFT_TRANSLATOR_API_KEY not set\")\n return HttpResponse(\"apikey\")\n\n obj = {}\n\n # On first run, check if target language supported\n if check == \"true\":\n supported = False\n languages = settings.MICROSOFT_TRANSLATOR_LOCALES\n\n if locale in languages:\n supported = True\n\n else:\n for lang in languages:\n if lang.startswith(locale.split(\"-\")[0]): # Neutral locales\n supported = True\n locale = lang\n break\n\n if not supported:\n log.debug(\"Locale not supported.\")\n return HttpResponse(\"not-supported\")\n\n obj['locale'] = locale\n\n url = \"http://api.microsofttranslator.com/V2/Http.svc/Translate\"\n payload = {\n \"appId\": api_key,\n \"text\": text,\n \"from\": \"en\",\n \"to\": locale,\n \"contentType\": \"text/html\",\n }\n\n try:\n r = requests.get(url, params=payload)\n log.debug(r.content)\n\n # Parse XML response\n root = ET.fromstring(r.content)\n translation = root.text\n obj['translation'] = translation\n\n return HttpResponse(json.dumps(obj), content_type='application/json')\n\n except Exception as e:\n log.error(e)\n return HttpResponse(\"error\")", "def fake_ugettext(translations):\r\n def _ugettext(text): # pylint: disable=missing-docstring\r\n return translations.get(text, text)\r\n return _ugettext", "def translate(self):\n raise NotImplementedError('subclass must override this method')", "def load_message(message_id):\n pathname = \"messages/{}.json\".format(message_id)\n return _load_message(pathname)", "def translate(data:object, **kwargs) -> object:\n\n return translator.TranslateVisitor(**kwargs).translate(data)", "def fake_ugettext(translations):\n def _ugettext(text):\n return translations.get(text, text)\n return _ugettext", "def get(self, id):\n\n\t\treturn MessageStore.get(id)", "def translate(sentence,target,api_key):\n #translate without using googletrans wrapper library\n URL = \"https://translation.googleapis.com/language/translate/v2?target=\"+target+\"&key=\"+api_key+\"&q=\"+sentence\n # sending get request and saving the response as response object \n r = requests.get(url = URL)\n\n if r.status_code == 200:\n # extracting data in json format \n data = r.json()\n return data['data']['translations'][0]['translatedText']", "def fake_pgettext(translations):\r\n def _pgettext(context, text): # pylint: disable=missing-docstring\r\n return translations.get((context, text), text)\r\n return _pgettext", "def translate(self, language=None):", "def get_json_message(message_key):\n file_path = (os.getcwd() + '/ufo/static/locales/' +\n flask.session['language_prefix'] + '/messages.json')\n try:\n with open(file_path) as json_file:\n messages = json.load(json_file)\n return messages[message_key]\n except:\n return message_key", "def getTranslation(table, language = None):\n\n return cache.codeTableFind(table, language)", "def tr(self, message):\n\t\t# noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n\t\treturn QCoreApplication.translate('index_extractor', message)", "def get(self, key, domain=None, language=None, context=None):\n\n if domain is None:\n if self.default_domain is None:\n raise ValueError('No domain given!')\n domain = self.default_domain\n messages = self.get_domain(domain, language)\n\n if not key in messages and self.update_on_missing:\n messages = self.get_domain(domain, language, force_download=True)\n\n if not key in messages:\n raise ValueError('No message for the key {0}!'.format(key))\n\n message = messages[key]\n \n if context is not None:\n for i in range(0, len(context)):\n placeholder = \"${0}\".format(i + 1)\n message = message.replace(placeholder, unicode(context[i]))\n \n return message", "def fake_pgettext(translations):\n def _pgettext(context, text):\n return translations.get((context, text), text)\n return _pgettext", "def get_message(self, bulk_id):\n res = self.client.get(\"/v1/messages/\" + str(bulk_id))\n\n try:\n return Message(res.data[\"message\"])\n except:\n raise ValueError(\"returned response not valid\")", "def translate_as(self, lang):\n trans = CategoryTranslation.objects.filter(category=self,\n lang=lang,\n is_active=True)\n if trans: return trans.first()\n else: return self", "def resolve_translation(obj, _):\n return obj.translation.decode()", "def tr(self, message):\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('CCTVMapper', message)", "def ugettext(self):\n return self._translations.gettext", "def GetMessage(service, user_id, msg_id, snippetMessage=True):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n #print('Message snippet: %s' % message['snippet'])\n #print('Message snippet: %s' % message['payload']['headers'])\n #print(unicode('Message snippet: %s' % message['snippet'],'utf-8'))\n\n if snippetMessage:\n return message['snippet']\n else:\n return message\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def translate(self,phrase, **kwargs):\n \n #Load the input and output languages\n if 'output_language' in kwargs:\n out_lang = kwargs.pop('output_language')\n else:\n out_lang = self.language\n \n if 'input_language' in kwargs:\n in_lang = kwargs.pop('input_language')\n else:\n in_lang = 'english'\n \n #Identify the language based on intput\n if out_lang in ['Spanish', 'spanish', 'Espanol', 'espanol', 's', 'S']:\n output_language = 'spanish'\n elif out_lang in ['Portuguese', 'portuguese', 'Português', 'português', 'p', 'P']:\n output_language = 'portuguese'\n elif out_lang in ['English', 'english', 'E', 'e']:\n output_language = 'english'\n else:\n output_language = 'english'\n print('Unable to find language:', out_lang)\n \n #Open CSV with translations\n with open(self.translations, encoding='ISO-8859-15') as csv_file:\n csvread = csv.DictReader(csv_file)\n found = 0\n for row in csvread:\n if row[in_lang] == phrase:\n output_phrase = row[output_language] #translate phrase\n found = 1 #set flag indicating that the phrase was successfully translated\n\n #If no translation was found, return original phrase and present an error message\n if found == 0:\n output_phrase = phrase\n print('Unable to find phrase ', phrase, \"in language \", out_lang)\n \n return output_phrase", "def get_message_details(self, message_id):\n\n for message in self.message_list:\n if message['id'] == message_id:\n return message\n \n raise Exception('No message with this message id')", "def translate(lang):\n\n\tlangfilename = os.path.join(\"data\", \"translations\", lang + \".json\")\n\tif os.path.exists(langfilename):\n\t\twith open(langfilename, 'r') as langfile:\n\t\t\ttranslations = json.loads(langfile.read())\n\telse:\n\t\ttranslations = {}\n\n\twith open(os.path.join(\"data\", \"translations\", \"message_list.json\"), \"r\") as message_list_file:\n\t\tmessages = json.loads(message_list_file.read())\n\n\tcnt = 0\n\tfor m in messages:\n\t\tcnt += 1\n\t\t#if cnt > 15: break\n\t\tif not translations.get(m):\n\t\t\tprint 'translating: ' + m\n\t\t\tresponse = requests.get(\"\"\"https://www.googleapis.com/language/translate/v2\"\"\",\n\t\t\t\tparams = {\n\t\t\t\t\t\"key\": conf.google_api_key,\n\t\t\t\t\t\"source\": \"en\",\n\t\t\t\t\t\"target\": lang,\n\t\t\t\t\t\"q\": m\n\t\t\t\t}, verify=False)\n\n\t\t\tt = response.json[\"data\"][\"translations\"][0][\"translatedText\"] or m\n\t\t\ttranslations[m] = t.encode('utf-8')\n\n\t\t\twith open(langfilename, 'w') as langfile:\n\t\t\t\tlangfile.write(json.dumps(translations, indent=1, sort_keys=True))", "def trans(self):\n return self._base.trans", "def GetExistingResource(api_version, request_message):\n return utils.GetClient(api_version).projects_locations_transformers.Get(\n request_message)", "def get_message(message_id): # noqa: E501\n rtxFeedback = RTXFeedback()\n return rtxFeedback.getMessage(message_id)", "def get_message(message_id): # noqa: E501\n rtxFeedback = RTXFeedback()\n return rtxFeedback.getMessage(message_id)", "def translateText(text):\r\n\treturn translator.translate(text, src='en', dest='ro')", "def translation(self, d):\n newreg = self.copy()\n _translate(newreg, d)\n return newreg", "def getText(str, request, lang, formatted=True):\n # TODO: Should move this into a language instance. request.lang should be a language instance.\n\n # load texts if needed\n global _text_cache\n if not lang in _text_cache:\n (texts, unformatted) = loadLanguage(request, lang)\n # XXX add error handling\n _text_cache[lang] = texts\n _unformatted_text_cache[lang] = unformatted\n\n # get the matching entry in the mapping table\n trans = str\n try:\n if formatted:\n trans = _text_cache[lang][str]\n else:\n trans = _unformatted_text_cache[lang][str]\n except KeyError:\n try:\n language = languages[lang][ENAME]\n dictpagename = \"%sDict\" % language\n dicts = request.dicts\n if dicts.has_dict(dictpagename):\n userdict = dicts.dict(dictpagename)\n trans = userdict[str]\n else:\n raise KeyError\n except KeyError:\n # do not simply return trans with str, but recursively call\n # to get english translation, maybe formatted.\n # if we don't find an english \"translation\", we just format it\n # on the fly (this is needed for cfg.editor_quickhelp).\n if lang != 'en':\n trans = getText(str, request, 'en', formatted)\n elif formatted:\n trans = formatMarkup(request, str)\n\n return trans", "def get_message(message_id, service):\n message = service.users().messages().get(userId='me', id=message_id).execute()\n return message", "def get_message(self, message_id):\n req_data = [ str(message_id) ]\n return self.request(\"find:Message.stats, Message.content\", req_data)", "async def translate(self,ctx,lang=\"ja\",txt=None):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n await self.translater(ctx,lang,txt)", "def get_text(self):\n try:\n return self.get_translation().text\n except MissingTranslation:\n return _(\"(No text)\")", "def _lazy_ugettext(text: str):\n try:\n # Test if context is available,\n # cf. https://github.com/tracim/tracim/issues/173\n context = StackedObjectProxy(name=\"context\")\n context.translator\n return ugettext(text)\n except TypeError:\n return text", "def GetMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n\n return message\n except errors.HttpError:\n print('An error occurred: ')", "def get_localization(self, language: str) -> Localization:\n ...", "def get_translated_attribute(actual_object, attribute_name, translated):\n from django.conf import settings\n if len(settings.LANGUAGES) > 1 and translated.count() and hasattr(translated[0], attribute_name):\n return getattr(translated[0], attribute_name)\n elif hasattr(actual_object, attribute_name):\n return getattr(actual_object, attribute_name)\n else:\n return None", "def GetMessage(service, user_id, msg_id):\n try:\n #take out format='raw' if don't want base64\n message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()\n\n print('Message snippet: %s' % message['snippet'])\n\n return message\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def get_msg(self, timeout_in_seconds=-1):\n return self._client.get_msg(timeout_in_seconds)", "def get_message(self, message_id):\n res = requests.get(url=\"https://api.ciscospark.com/v1/messages/{}\".format(message_id),\n headers=self.headers)\n\n try:\n text = res.json()['text']\n except AttributeError as e:\n print(res.text)\n return None\n\n return text", "def auto_translate(string):\n return localizer.translate(MessageFactory(string))", "def translate_phrase(in_phrase, destination_lang='en'):\r\n\r\n try:\r\n in_blob = TextBlob(in_phrase)\r\n detected_lang = in_blob.detect_language()\r\n translated_blob = str(in_blob.translate(to=destination_lang))\r\n except NotTranslated:\r\n detected_lang = 'xx'\r\n translated_blob = 'Sorry, there was an error translating your phrase'\r\n return TranslatedPhrase(input_phrase=in_phrase, input_language=detected_lang, output_phrase=translated_blob)", "def get_name_translation(self):\n\t\treturn frappe.get_value(\n\t\t\t\"Translation\",\n\t\t\t{\"source_text\": self.doc_type, \"language\": frappe.local.lang or \"en\"},\n\t\t\t[\"name\", \"translated_text\"],\n\t\t\tas_dict=True,\n\t\t)", "def Translate(*args, **kwargs):\n return _gdi_.GraphicsContext_Translate(*args, **kwargs)", "def translate(self, trans_input: TranslatorInput) -> TranslatorOutput:\n if not trans_input.tokens:\n return TranslatorOutput(id=trans_input.id,\n translation=\"\",\n tokens=[\"\"],\n attention_matrix=np.asarray([[0]]),\n score=-np.inf)\n\n return self._make_result(trans_input, *self.translate_nd(*self._get_inference_input(trans_input.tokens)))", "def translate_text(query, source_lang_code, target_lang_code):\n\n try:\n translations = TRANSLATION_SERVICE.translations().list(\n source=source_lang_code,\n target=target_lang_code,\n q=query\n ).execute()\n translation = translations['translations'][0]\n if 'detectedSourceLanguage' in translation.keys():\n source_lang_code = translation['detectedSourceLanguage']\n resp = random.choice(_TRANSLATE_RESULT).format(\n text=translation['translatedText'],\n fromLang=language_code_dict[source_lang_code],\n toLang=language_code_dict[target_lang_code])\n except (HTTPError, URLError, HTTPException):\n resp = random.choice(_TRANSLATE_NETWORK_ERROR)\n except Exception:\n resp = random.choice(_TRANSLATE_ERROR)\n return resp" ]
[ "0.76010066", "0.7040674", "0.66040176", "0.6410865", "0.63327235", "0.6329257", "0.63121814", "0.6301266", "0.62613475", "0.61739236", "0.61739236", "0.6169561", "0.61192596", "0.6037068", "0.60274595", "0.6026245", "0.60075307", "0.5990841", "0.5933024", "0.58953375", "0.5854476", "0.5784814", "0.57732815", "0.5768205", "0.57668376", "0.5762763", "0.57437414", "0.57437414", "0.5702111", "0.55427784", "0.5541649", "0.5541371", "0.55367094", "0.55348057", "0.5493866", "0.5480957", "0.5477916", "0.54521775", "0.54469997", "0.5437104", "0.5426697", "0.54188484", "0.53987396", "0.5393778", "0.53834015", "0.5368028", "0.5356265", "0.5355179", "0.5355179", "0.5328121", "0.52934957", "0.5277685", "0.52571636", "0.52504945", "0.52443457", "0.52393883", "0.52306587", "0.52292407", "0.522898", "0.52245104", "0.5220935", "0.52131546", "0.51942724", "0.5191812", "0.5182698", "0.5179217", "0.5176289", "0.5167609", "0.5155671", "0.51480186", "0.5143456", "0.5140143", "0.5122779", "0.5120591", "0.5089884", "0.5085969", "0.50814366", "0.5070064", "0.5069963", "0.5069963", "0.50679874", "0.5046727", "0.50449896", "0.5038784", "0.5022388", "0.5000962", "0.49915525", "0.4975937", "0.49705464", "0.49612087", "0.49610275", "0.49592686", "0.49568248", "0.4931377", "0.49311286", "0.49301043", "0.4917568", "0.48912236", "0.48871017", "0.4836259" ]
0.5226103
59
A decorator that can exclude a view from csrf protection.
def exempt(self, view): if isinstance(view, Blueprint): self._exempt_blueprints.add(view.name) return view if isinstance(view, string_types): view_location = view else: view_location = '%s.%s' % (view.__module__, view.__name__) self._exempt_views.add(view_location) return view
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csrf_exempt(view_func):\r\n # We could just do view_func.csrf_exempt = True, but decorators\r\n # are nicer if they don't have side-effects, so we return a new\r\n # function.\r\n def wrapped_view(*args, **kwargs):\r\n return view_func(*args, **kwargs)\r\n wrapped_view.csrf_exempt = True\r\n return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)", "def csrf_view_exempt(view_func):\r\n warnings.warn(\"csrf_view_exempt is deprecated. Use csrf_exempt instead.\",\r\n PendingDeprecationWarning)\r\n return csrf_exempt(view_func)", "def csrf_response_exempt(view_func):\r\n warnings.warn(\"csrf_response_exempt is deprecated. It no longer performs a \"\r\n \"function, and calls to it can be removed.\",\r\n PendingDeprecationWarning)\r\n return view_func", "def auth_middleware_exempt(view_func):\n view_func._auth_middleware_exempt = True\n return view_func", "def csrf_protection(fn):\n def protected(*args):\n if 'X-Requested-With' in request.headers:\n return fn(*args)\n else:\n return \"X-Requested-With header missing\", HTTPStatus.FORBIDDEN\n return protected", "def xsrf_protected(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n non_xsrf_protected_verbs = ['options', 'head', 'get']\n if (self.request.method.lower() in non_xsrf_protected_verbs or\n self._RequestContainsValidXsrfToken()):\n return f(self, *args, **kwargs)\n else:\n try:\n self.XsrfFail()\n except Exception, e:\n self.handle_exception(e, self.app.debug)\n finally:\n self.session_store.save_sessions(self.response)\n return wrapper", "def add_csrf_validation(event):\n if event.request.method == 'POST':\n token = event.request.POST.get('_csrf')\n if token is None or token != event.request.session.get_csrf_token():\n headers = forget(event.request) # force a log out\n raise HTTPForbidden('CSRF token is missing or invalid',\n headers=headers)", "def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper", "def check_csrf(f):\n\n @wraps(f)\n @require_login\n def wrapper(*args, **kwds):\n if \"token\" not in session:\n raise PicoException(\n \"Internal server error\",\n data={\"debug\": \"CSRF token not found in session\"},\n )\n submitted_token = request.headers.get(\"X-CSRF-Token\", None)\n if submitted_token is None:\n raise PicoException(\"CSRF token not included in request\", 403)\n if session[\"token\"] != submitted_token:\n raise PicoException(\"CSRF token is not correct\", 403)\n return f(*args, **kwds)\n\n return wrapper", "def login_required_403(view):\n @wraps(view)\n def dec_view(request, *args, **kwargs):\n if not request.user.is_authenticated():\n return JsonResponse({\"detail\": \"You have to log in\"}, status=403)\n\n return view(request, *args, **kwargs)\n\n return dec_view", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def blocked_ip_exempt(view_func):\n\n def wrapped_view(*args, **kwargs):\n return view_func(*args, **kwargs)\n\n wrapped_view.blocked_ip_exempt = True\n return wraps(view_func)(wrapped_view)", "def enforce_csrf(self, request):\n return # To not perform the csrf check previously happening", "def xhr_forbidden_view(request):\n return HTTPForbidden()", "def not_authenticated(func):\n def decorated(request, *args, **kwargs):\n if request.user.is_authenticated():\n return HttpResponseRedirect(get_next_url(request))\n return func(request, *args, **kwargs)\n return decorated", "def requires_post(func):\n def decorator(request, *args, **kwargs):\n if DEBUG or request.method == 'POST':\n return func(request, *args, **kwargs)\n return HttpResponseNotAllowed(['POST'])\n return decorator", "def auth_required(self, view):\n\n @functools.wraps(view)\n def decorated(*args, **kwargs):\n log.info(\"Trying to get access to protected resource: '%s'\", view.__name__)\n if request.method == 'POST':\n token = request.form['token']\n if self.development or self.authenticated(token):\n return view(*args, **kwargs)\n else:\n log.warning(\"User has not been authorized to get access to resource: %s\", view.__name__)\n else:\n log.warning(\"Bad request type! Expected 'POST', actual '%s'\", request.method)\n\n return abort(403)\n\n return decorated", "def process_view(self, request, view_func, *view_args, **view_kwargs):\n # Nothing to do when not demo mode.\n if not settings.DEMO_MODE:\n return None\n\n if view_func in self.safe_views:\n return None # continue handling request\n return HttpResponseForbidden()", "def allow_post_during_maintenance(view_func):\n @functools.wraps(view_func)\n def wrapped_view(*args, **kwargs):\n return view_func(*args, **kwargs)\n wrapped_view.view_allow_post_during_maintenance = True\n return wrapped_view", "def enforce_csrf(request):\n check = CSRFCheck()\n check.process_request(request)\n reason = check.process_view(request, None, (), {})\n if reason:\n # CSRF failed, bail with explicit error message\n raise NotAuthenticated(\"CSRF validation failed: %s\" % reason)", "def forbidden_view(request, reason=''):\n\n from django.middleware import csrf\n t = loader.get_template('403.html')\n return http.HttpResponseForbidden(t.render(template.RequestContext(request, {\n 'DEBUG': settings.DEBUG,\n 'reason': reason,\n 'no_referer': reason == csrf.REASON_NO_REFERER,\n })))", "def requires_get(func):\n def decorator(request, *args, **kwargs):\n if DEBUG or request.method == 'GET':\n return func(request, *args, **kwargs)\n return HttpResponseNotAllowed(['GET'])\n return decorator", "def anonymous_view(view):\n\tview = cache_control(public=True)(view)\n\t@wraps(view)\n\tdef g(request, *args, **kwargs):\n\t\trequest.anonymous = True\n\t\trequest.COOKIES = { }\n\t\trequest.user = AnonymousUser()\n\t\tif hasattr(request, \"session\"): request.session = { }\n\n\t\tfor header in list(request.META.keys()):\n\t\t\tif header not in (\n\t\t\t\t\t'SERVER_NAME', 'SERVER_PORT', 'HTTPS', 'wsgi.url_scheme', 'SERVER_PROTOCOL', 'HTTP_HOST',\n\t\t\t\t\t'REQUEST_METHOD', 'REQUEST_URI', 'DOCUMENT_URI', 'PATH_INFO', 'QUERY_STRING', 'CONTENT_LENGTH', 'CONTENT_TYPE',\n\t\t\t\t\t'REMOTE_ADDR'):\n\t\t\t\tdel request.META[header]\n\t\t\t\t\n\t\t# In order for the Django debug template context processor to work, we can't\n\t\t# clear REMOTE_ADDR. Clear it if {{debug}} would be false. The resulting page\n\t\t# should not be cached since it may depend on REMOTE_ADDR.\n\t\tif 'REMOTE_ADDR' in request.META and (not settings.DEBUG or request.META['REMOTE_ADDR'] not in settings.INTERNAL_IPS):\n\t\t\tdel request.META['REMOTE_ADDR']\n\t\t\t\n\t\tresponse = view(request, *args, **kwargs)\n\t\tresponse.csrf_processing_done = True # prevent generation of CSRF cookies\n\t\treturn response\n\treturn g", "def validate_against_csrf(event, Validator=CSRFValidator):\n \n request = event.request\n settings = request.registry.settings\n \n # Only validate if enabled.\n if not settings.get('csrf.validate', True):\n return\n \n # Ignore specified routes.\n matched_route = request.matched_route\n ignore_routes = settings.get('csrf.ignore_routes', None)\n if matched_route and ignore_routes:\n if matched_route.name in ignore_routes.split():\n return\n \n # Ignore specified paths.\n ignore_paths = settings.get('csrf.ignore_paths', None)\n if ignore_paths:\n for path in ignore_paths.split():\n if request.path.startswith(path):\n return\n \n session_token = request.session.get_csrf_token()\n try:\n Validator(session_token).validate(request)\n except CSRFError:\n raise HTTPUnauthorized", "def csrf(request):\n return django_csrf(request)['csrf_token']", "def inbound(request):\n\n try:\n csrf_token = request.headers.cookie.get('csrf_token')\n csrf_token = '' if csrf_token is None else csrf_token.value\n csrf_token = _sanitize_token(csrf_token)\n # Use same token next time\n request.context['csrf_token'] = csrf_token\n except KeyError:\n csrf_token = None\n # Generate token and store it in the request, so it's\n # available to the view.\n request.context['csrf_token'] = _get_new_csrf_key()\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n # Note that get_host() includes the port.\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n raise Response(403, reason)\n\n if csrf_token is None:\n # No CSRF cookie. For POST requests, we insist on a CSRF cookie,\n # and in this way we can avoid all CSRF attacks, including login\n # CSRF.\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)", "def _wrapped_view(request, *args, **kwargs):\n return view_func(request, *args, **kwargs)", "def not_logged_in(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'idToken' in session:\n return redirect(url_for('index'))\n else:\n return f(*args, **kwargs)\n return decorated_function", "def api_login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return abort(401)\n\n return view(**kwargs)\n\n return wrapped_view", "def external_contractor_forbidden(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):\n\n decorator = user_passes_test(\n lambda u: not BaseUser.is_external_contractor(u),\n login_url=login_url,\n redirect_field_name=redirect_field_name)\n\n if view_func:\n return decorator(view_func)\n\n return decorator", "def rotate_token(request: http.Request):\n if hasattr(request, '_csrf_hook'):\n request._csrf_hook.rotate_token()", "def unauthorized_only(view_func):\n def is_anonymous(user):\n return user.is_anonymous()\n\n return user_passes_test(is_anonymous, login_url='/', redirect_field_name=None)(view_func)", "def superuser_required(method):\n @wraps(method)\n def _wrapped_view(request, *args, **kwargs):\n if request.user.is_superuser is False:\n raise PermissionDenied\n\n return method(request, *args, **kwargs)\n\n return _wrapped_view", "def test_csrf_required_if_normal_view(self):\n\n client = Client(enforce_csrf_checks=True)\n response = client.post(reverse(\"test_view\"))\n self.assertEqual(response.status_code, 403)\n\n response = client.post(reverse(\"test_view\"), HTTP_X_APPENGINE_TASKNAME=\"test\")\n self.assertEqual(response.status_code, 200)", "def autz_required(permission, context=None):\n def decorator(func):\n\n @wraps(func)\n async def wrapper(*args):\n request = (args[-1].request\n if isinstance(args[-1], web.View)\n else args[-1])\n\n if await autz.permit(request, permission, context):\n return await func(*args)\n\n raise web.HTTPForbidden()\n\n return wrapper\n\n return decorator", "def validate_csrf_token(event):\n request = event.request\n if request.is_xhr or request.method.upper() in ('POST', 'PUT', 'DELETE'):\n pyramid.session.check_csrf_token(request, token='XSRF_TOKEN',\n header='X-XSRF-TOKEN', raises=True)", "def check_acl(func):\n\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if request.method in EXEMPT_METHODS: # pragma: no cover\n return func(*args, **kwargs)\n # 'func' is a Flask.view.MethodView so we have access to some special\n # params\n cls = func.view_class\n login_required = getattr(cls, \"login_required\", True)\n if (\n bui.auth != \"none\"\n and login_required\n and not bui.config.get(\"LOGIN_DISABLED\", False)\n ):\n if current_user.is_anonymous:\n abort(403)\n return func(*args, **kwargs)\n\n return decorated_view", "def ajax_required(f):\n def wrap(request, *args, **kwargs):\n if not request.is_ajax():\n #return HttpResponseBadRequest()\n return HttpResponseRedirect(settings.BASE_URL+'404')\n return f(request, *args, **kwargs)\n wrap.__doc__=f.__doc__\n wrap.__name__=f.__name__\n return wrap", "def anonymous_required(func):\n\n async def wrapped(self, *args, **kwargs):\n if self.request.user is not None:\n add_message(self.request, \"Please log-out to continue.\")\n redirect(self.request, \"index\")\n return await func(self, *args, **kwargs)\n\n return wrapped", "def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.method == 'POST':\n if not request.POST.get('dataset_revision_id', request.POST.get('datastream-dataset_revision_id', None)):\n raise DatasetRequiredException()\n\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view", "def superuser_only(view_func):\n def _inner(request, *args, **kwargs):\n if not request.user.is_superuser:\n raise PermissionDenied\n return view_func(request, *args, **kwargs)\n return _inner", "def test_csrf_no_inject(self, mock_csrf):\n mw = CSRFHeaderInject()\n request = MagicMock()\n response = MagicMock()\n mw.process_response(request, response)\n response.set_cookie.assert_not_called()", "def ajax_required(func):\n @wraps(func)\n def wrapper(request, *args, **kwargs):\n if not request.is_ajax():\n return HttpResponseBadRequest()\n return func(request, *args, **kwargs)\n return wrapper", "def test_csrf():\n\n # The authenticate method must not be altered for this test to be valid.\n assert (\n SessionAuthentication.authenticate\n is CsrfExemptSessionAuthentication.authenticate\n )\n\n # The `enforce_csrf` method should just pass with any request.\n assert CsrfExemptSessionAuthentication().enforce_csrf(\"foo\") is None", "def __wrapper(request, *args, **kwds):\n if request.user_is_admin:\n return func(request, *args, **kwds) # pylint: disable-msg=W0142\n else:\n return utility.forbidden(\n request,\n error_message='You must be an administrator to view this page.')", "def require_visitor(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if g.user:\n return redirect(url_for('site.home'))\n return func(*args, **kwargs)\n\n return decorator", "def safe_middleware(cls):\n token = \"smt_%s_%s\" % (cls.__name__, id(cls))\n\n unsafe_process_request = cls.process_request\n def guarded_process_request(self, request):\n try:\n result = unsafe_process_request(self, request)\n except:\n raise\n else:\n setattr(request, token, True)\n return result\n cls.process_request = guarded_process_request\n\n unsafe_process_response = cls.process_response\n def guarded_process_response(self, request, response):\n if hasattr(request, token):\n return unsafe_process_response(self, request, response)\n else:\n return response\n cls.process_response = guarded_process_response\n\n return cls", "def non_admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == True:\n return jsonify({\"messsage\": \"Only Non admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def session_required(view):\n def validation(*args, **kwargs):\n request = args[0]\n if request.session.get('email', None):\n return view(*args, **kwargs)\n else:\n return HttpResponseForbidden('403 Forbbiden, You have to login first to use this amazing app')\n return validation", "def token_required(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n #wrapper of current func\n\n #take csrf-token from cookies\n token = request.cookies.get('token')\n if not token:\n #returning login page\n flash(\"Authentication required\", category='danger')\n return redirect(url_for('login'))\n #decoding token\n try:\n uuid = jwt.decode(token, app.config['SECRET_KEY'], algorithms=[\"HS256\"])['user_id']\n except:\n #returning login page\n flash(\"Token timeout\", category='danger')\n return redirect(url_for('login'))\n #get current user\n user = User.query.filter_by(uuid=uuid).first()\n if not user:\n #returning login page\n flash(\"Profile error\", category='danger')\n return redirect(url_for('login'))\n return func(self, *args, **kwargs)\n\n return wrapper", "def post_only(f):\n \n def wrapped_f(request):\n if request.method != \"POST\":\n response = HttpResponse(json.dumps({\"error\": \"This method only accepts posts!\"}))\n \n response.status_code = 500\n \n return response\n \n return wrapped_f", "def no_append_slash(view_func):\n # view_func.should_append_slash = False would also work, but decorators are\n # nicer if they don't have side effects, so return a new function.\n @wraps(view_func)\n def wrapper_view(*args, **kwargs):\n return view_func(*args, **kwargs)\n\n wrapper_view.should_append_slash = False\n return wrapper_view", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def login_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if current_app.login_manager._login_disabled:\n return func(*args, **kwargs)\n elif not current_user.is_authenticated and not current_user.is_active:\n return current_app.login_manager.unauthorized()\n return func(*args, **kwargs)\n\n return decorated_view", "def requires_entrepreneur(func):\n def decorator(request, *args, **kwargs):\n if request.user.is_authenticated() and not request.user.is_entrepreneur():\n return redirect('dashboard')\n else:\n return func(request, *args, **kwargs)\n return decorator", "def anonymous_required(func):\n\n async def wrapped(self, *args, **kwargs):\n if self.request.user is not None:\n print(\"Login please.\")\n # redirect(self.request, 'index')\n\n return await func(self, *args, **kwargs)\n\n return wrapped", "def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token", "def exclude():\n data = list(request.files.values())[0].file.read() if len(request.files) else request.body.read()\n return excludeView(data, request.params)", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def as_view(cls, *args, **kwargs):\n view = super().as_view(*args, **kwargs)\n return login_required(view)", "def login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if APP_KEY_ACCESS_TOKEN not in request.cookies:\n return redirect(GM_OAUTH_URL)\n return view(**kwargs)\n return wrapped_view", "def allow_unconfirmed_account(view_function):\n\t@wraps(view_function) # Tells debuggers that is is a function wrapper\n\tdef decorator(*args, **kwargs):\n\t\tauth = current_app.auth\n\t\tallowed = False\n\t\t# User must be logged in (no need to check verified)\n\t\tif current_user.is_authenticated:\n\t\t\tallowed=True\n\t\tif not allowed:\n\t\t\t# Redirect to unauthenticated page\n\t\t\treturn auth.unauthenticated()\n\t\t# It's OK to call the view\n\t\treturn view_function(*args, **kwargs)\n\treturn decorator", "def login_required_ajax(function=None):\n def _decorator(view_func):\n def _wrapped_view(request, *args, **kwargs):\n if not request.is_ajax():\n return HttpResponseBadRequest()\n if not request.user.is_authenticated:\n from establishment.errors.errors import BaseError\n return BaseError.USER_NOT_AUTHENTICATED\n return view_func(request, *args, **kwargs)\n return _wrapped_view\n\n if function is None:\n return _decorator\n else:\n return _decorator(function)", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login'))\n\n return view(**kwargs)\n\n return wrapped_view", "def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.auth_manager.has_privilege(privilege):\n return view_func(request, *args, **kwargs)\n else:\n raise InsufficientPrivilegesException(required_privileges=[privilege])\n\n return _wrapped_view", "def __wrapper(request, *args, **kwds):\n if request.profile.is_superuser:\n return func(request, *args, **kwds) # pylint: disable-msg=W0142\n else:\n return utility.forbidden(\n request,\n error_message='You must be a superuser to view this page.')", "def not_allowed(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponseNotAllowed, *args, **kwargs)", "def require_auth(view):\n def wrapper(request, *args):\n \n if not request.session.get('user_id', False):\n return HttpResponseRedirect(\"/clanovi/login/\")\n \n return view(request, *args) \n return wrapper", "def setup_csrf_protection(app, cookie_name='r3csrfprot'):\n\n middleware = CSRFProtectionMiddleware(app, cookie_name)", "def login_required(view):\r\n\r\n @functools.wraps(view)\r\n @jwt_required() # <-- ensures that the Authorization header contains a valid JWT\r\n def wrapped_view(*args, **kwargs):\r\n # load user instance into G object\r\n try:\r\n # 'user_data' is the object passed as identity when creating access_token\r\n user_data: Union[str, None] = get_jwt_identity()\r\n except Exception as error:\r\n logging.error('Failed to get JWT identity')\r\n logging.error(error)\r\n user_data = None\r\n setattr(g, 'user', None if user_data is None else User(**user_data))\r\n\r\n if getattr(g, 'user', None) is None:\r\n abort(401, 'Not authenticated. Login is required')\r\n\r\n return view(*args, **kwargs)\r\n\r\n return wrapped_view", "def login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"handlers.login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def test_dispatch_with_pre_dispatch_response(self):\n class MyView(PrePostDispatchViewMixin, View):\n def pre_dispatch(self, *args, **kwargs):\n return HttpResponseForbidden()\n\n view = MyView.as_view()\n response = view(RequestFactory().request())\n\n self.assertIsInstance(response, HttpResponseForbidden)", "def group_required(self, group):\n\n def decorator(view):\n @functools.wraps(view)\n def decorated(*args, **kwargs):\n log.info(\"Trying to get access to resource: %s protected by group: %s\", view.__name__, group)\n if request.method == 'POST':\n token = request.form['token']\n if self.development or self.group_authenticated(token, group):\n return view(*args, **kwargs)\n else:\n log.warning(\"User has not been authorized to get access to resource: %s\", view.__name__)\n else:\n log.error(\"Bad request type! Expected 'POST', actual '%s'\", request.method)\n\n return abort(403)\n\n return decorated\n return decorator", "def api_login_required(func):\n\n @wraps(func)\n def decorated_view(*args, **kwargs):\n \"\"\"decorator\"\"\"\n if request.method in EXEMPT_METHODS: # pragma: no cover\n return func(*args, **kwargs)\n # 'func' is a Flask.view.MethodView so we have access to some special\n # params\n cls = func.view_class\n login_required = getattr(cls, \"login_required\", True)\n if (\n bui.auth != \"none\"\n and login_required\n and not bui.config.get(\"LOGIN_DISABLED\", False)\n ):\n if not current_user.is_authenticated:\n if request.headers.get(\"X-From-UI\", False):\n abort(403)\n return Response(\n \"Could not verify your access level for that URL.\\n\"\n \"You have to login with proper credentials\",\n 401,\n {\"WWW-Authenticate\": 'Basic realm=\"Login Required\"'},\n )\n return func(*args, **kwargs)\n\n return decorated_view", "def pre_process_request(self, req, handler):\n\n if self.match_request(req):\n # We disable CSRF protection here and force ourselves as a handler\n req.form_token = None\n return self\n \n return handler", "def login_required_no_redirect(view_func):\n @wraps(view_func)\n def wrapper(request, *args, **kwargs):\n if request.user.is_authenticated:\n return view_func(request, *args, **kwargs)\n\n {% for sec_desc, sec_type in security_defs|dictsort(true) %}\n {% if sec_desc == 'basic' %}\n if \"HTTP_AUTHORIZATION\" in request.META:\n auth = request.META[\"HTTP_AUTHORIZATION\"].split()\n if len(auth) == 2:\n # NOTE: We only support basic authentication for now.\n if auth[0].lower() == \"basic\":\n base_val = base64.b64decode(auth[1])\n if sys.version_info[0] > 2:\n base_val = base_val.decode()\n uname, passwd = base_val.split(\":\")\n user = authenticate(username=uname, password=passwd)\n if user and user.is_active:\n login(request, user)\n request.user = user\n return view_func(request, *args, **kwargs)\n {% elif sec_desc == \"apiKey\" and sec_type[\"desc\"] == \"JWT\" %}\n if authenticate_jwt(request):\n return view_func(request, *args, **kwargs)\n {% elif sec_desc == \"apiKey\" and sec_type[\"desc\"] != \"JWT\" %}\n if \"HTTP_X_API_KEY\" in request.META:\n key = request.META[\"HTTP_X_API_KEY\"]\n # shared keys for development, must be defined in settings.py\n if key in settings.ALLOWED_API_KEYS:\n User = get_user_model()\n user = User.objects.get(username=settings.ALLOWED_API_KEYS[key])\n login(request,user)\n request.user = user\n return view_func(request, *args, **kwargs)\n {% endif %}\n {% endfor %}\n return HttpResponse(\"Unauthorized\", status=401)\n\n return wrapper", "def csrf_protect_app(app):\n\n @app.before_request\n def csrf_protect():\n if request.path == \"/api/login\" or session.get('bypass_csrf', False):\n # Bypass csrf protection for trusted api sessions (see /api/login_for_apps):\n return\n if request.method == \"POST\":\n token = session.get('_csrf_token', None)\n header = request.headers.get('X-csrf', None)\n if not token or not header or token != header:\n abort(make_response(\"Invalid x-csrf token\", 403))\n\n def generate_csrf_token():\n if '_csrf_token' not in session:\n session['_csrf_token'] = random_token()\n return session['_csrf_token']\n\n app.jinja_env.globals['csrf_token'] = generate_csrf_token", "def requires_dataset():\n def decorator(view_func):\n \"\"\" for registred and logged user. NO redirect to login\"\"\"\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.method == 'POST':\n if not request.POST.get('dataset_revision_id', request.POST.get('datastream-dataset_revision_id', None)):\n raise DatasetRequiredException()\n\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view\n return decorator", "def forbidden(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponseForbidden, *args, **kwargs)", "def trusted_host_required(view_func):\n def decorator(request, *args, **kwargs):\n if not hasattr(settings, 'TRUSTED_HOSTS') or not settings.TRUSTED_HOSTS:\n settings.TRUSTED_HOSTS = []\n ip = get_client_ip(request)\n allowed = IpRangeList(*settings.TRUSTED_HOSTS)\n if ip in allowed:\n return view_func(request, *args, **kwargs)\n response = HttpResponseForbidden(\"Access denied\")\n return response\n return decorator", "def check_request(views_func):\n @wraps(views_func)\n def wrapper(*args, **kwargs):\n try:\n return views_func(*args, **kwargs)\n except (KeyError, ValueError) as ex:\n return HttpResponseBadRequest(str(ex))\n return wrapper", "def test_decorator_middleware(self):\n request = self.factory.get(reverse('contact:home'))\n\n # middleware don't store request to decorated function\n decorated_func = not_record_request(home_page)\n request.user = self.user\n self.middleware.process_view(request, decorated_func)\n rs = RequestStore.objects.all()\n self.assertQuerysetEqual(rs, [])\n\n # middleware store request to undecorated function\n request.user = self.user\n self.middleware.process_view(request, home_page)\n rs = self.request_store.objects.all()\n self.assertEquals(len(rs), 1)\n only_one_rs = rs[0]\n self.assertEqual(only_one_rs.path, reverse('contact:home'))\n\n # middleware store request to undecorated function if user is anonymous\n request.user = AnonymousUser()\n self.middleware.process_view(request, home_page)\n rs = self.request_store.objects.all()\n self.assertEquals(len(rs), 2)\n only_one_rs = rs[1]\n self.assertEqual(only_one_rs.path, reverse('contact:home'))", "def test_no_permission(self):\n req = self.req(\"post\", \"/the/url\", data={\"action-doit\": \"3\"})\n req.user = Mock()\n req.user.has_perm.return_value = False\n\n res = self.view(\n req,\n decorator=self.actions(\n self.mock_model, [\"doit\"], permission=\"do_things\")\n )\n\n self.assertEqual(res.status_code, 403)\n req.user.has_perm.assert_called_with(\"do_things\")", "def login_web_required(view_func):\r\n @wraps(view_func, assigned=available_attrs(view_func))\r\n def _wrapped_view_func(request, *args, **kwargs):\r\n if hasattr(request, \"session\") and request.session.get('is_logon', False) and request.user.is_active:\r\n return view_func(request, *args, **kwargs)\r\n else:\r\n return HttpResponse(FailResponse(u'请先登录'))\r\n return _wrapped_view_func", "def authenticated(view):\n\n def decorated_view(request: HttpRequest, *args, **kwargs):\n if not request.user.is_authenticated:\n return HttpResponse(\"Unauthorized\", status=401)\n return view(request, *args, **kwargs)\n\n return decorated_view", "def admin_user_only(view):\r\n @google_login_required\r\n def wrapped(request, *args, **kwargs):\r\n if users.is_current_user_admin():\r\n return view(request, *args, **kwargs)\r\n context = RequestContext(request);\r\n return rtr( 'access_limited.html', context,None )\r\n return wraps(view)(wrapped)", "def test_view_disabled(self, method, url):\n response = getattr(self.client, method)(url)\n assert response.status_code == 403", "def csrf_failure(request, reason=\"\"):\n return csrf.csrf_failure(request, reason, template_name=get_template_name(request, \"403_csrf.html\"))", "def includeme(config):\n config.add_subscriber(on_GET_request_setup_csrf_cookie, NewResponse)\n config.set_default_csrf_options(require_csrf=True, header=HEADER_NAME)", "def validate_twilio_request(func):\n @wraps(func)\n def decorated_function(request, *args, **kwargs):\n # Create an instance of the RequestValidator class\n validator = RequestValidator(os.environ.get('TWILIO_AUTH_TOKEN'))\n\n # Validate the request using its URL, POST data,\n # and X-TWILIO-SIGNATURE header\n request_valid = validator.validate(\n request.build_absolute_uri(),\n request.POST,\n request.META.get('HTTP_X_TWILIO_SIGNATURE', ''))\n\n # Continue processing the request if it's valid, return a 403 error if\n # it's not\n if request_valid:\n return func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden()\n return decorated_function", "def login_required(f):\n @functools.wraps(f)\n def wrap(*args, **kwargs):\n if not user_session.is_auth:\n raise Forbidden()\n return f(*args, **kwargs)\n return wrap", "def requires_auth(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if not users.get_current_user():\n try:\n self.DenyAccess()\n except Exception, e:\n self.handle_exception(e, self.app.debug)\n finally:\n self.session_store.save_sessions(self.response)\n else:\n return f(self, *args, **kwargs)\n return wrapper", "def __call__(self, f):\n @wraps(f)\n async def wrapped(request):\n # set the area property inside the request object\n request.area = self.name\n try:\n await self.before_request(request)\n except InvalidCultureException:\n # redirect to a proper url\n return HTTPFound(self.get_fallback_url(request))\n except InvalidAntiforgeryTokenException:\n raise HTTPForbidden()\n\n return await f(request)\n return wrapped", "def access_allowed(test_func, redirect_url=None):\n def decorate(view_func):\n def wrapper(request, *args, **kwargs):\n if test_func(request.user):\n return view_func(request, *args, **kwargs)\n raise PermissionDenied\n return update_wrapper(wrapper, view_func)\n return decorate", "def admin_required(func):\n\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if current_app.login_manager._login_disabled:\n return func(*args, **kwargs)\n elif not current_user.is_authenticated and not current_user.is_active:\n return current_app.login_manager.unauthorized()\n elif not current_user.is_staff and not current_user.is_superuser:\n return current_app.login_manager.unauthorized()\n return func(*args, **kwargs)\n\n return decorated_view", "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def skip_cross_domain_referer_check(request):\n is_secure_default = request.is_secure\n request.is_secure = lambda: False\n try:\n yield\n finally:\n request.is_secure = is_secure_default", "def disallow_during_maintenance(view_func):\n @functools.wraps(view_func)\n def wrapped_view(*args, **kwargs):\n if settings.SYSTEM_MAINTENANCE_NO_CHANGES:\n raise ServiceUnavailable()\n return view_func(*args, **kwargs)\n return wrapped_view", "def custom_permission_required(permission):\n\n def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n\n if not request.user.is_authenticated:\n return redirect(\n reverse('login'),\n extra_context={'next': request.path},\n )\n if request.user.has_perm(permission):\n return view_func(request, *args, **kwargs)\n raise PermissionDenied()\n return _wrapped_view\n return decorator" ]
[ "0.81446385", "0.8072049", "0.7513782", "0.7303216", "0.72622657", "0.71674573", "0.67435366", "0.6682247", "0.6664801", "0.6651317", "0.6612883", "0.6552151", "0.64375037", "0.6431673", "0.6428775", "0.63179326", "0.6248671", "0.62409854", "0.621281", "0.6179554", "0.6161637", "0.6110333", "0.60795164", "0.6037404", "0.6036068", "0.602158", "0.6012695", "0.59822017", "0.59407276", "0.5940713", "0.59272116", "0.59261614", "0.5905881", "0.5891927", "0.5863886", "0.5843864", "0.5831662", "0.58271986", "0.5812923", "0.5801284", "0.5788532", "0.57680863", "0.5760137", "0.5739864", "0.5738631", "0.5725434", "0.5724588", "0.5712502", "0.56935275", "0.5688718", "0.5680299", "0.5667452", "0.56658477", "0.564393", "0.56400603", "0.5638803", "0.5631378", "0.5619038", "0.56015897", "0.56015897", "0.55978894", "0.5596139", "0.55940855", "0.55925363", "0.55908364", "0.5586874", "0.5581752", "0.5577023", "0.5558381", "0.55371493", "0.5533538", "0.55329895", "0.553053", "0.5530166", "0.5529542", "0.5526656", "0.55107516", "0.5509536", "0.549553", "0.54936653", "0.5492553", "0.54828066", "0.54626477", "0.5445442", "0.5441895", "0.5439671", "0.54331005", "0.542543", "0.5423781", "0.5417418", "0.54128957", "0.5409199", "0.5408068", "0.54077756", "0.54062784", "0.5396619", "0.5391927", "0.5389026", "0.5388038", "0.5386113" ]
0.61926156
19
A decorator that set the error response handler.
def error_handler(self, view): self._error_response = view return view
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errorhandler(self, status_code_or_exception_class):\n def decorated(f):\n self.error_handlers[status_code_or_exception_class] = f\n return f\n return decorated", "def error(self, func):\n self.error_handler = func\n return func", "def on_error(self, namespace=''):\n def decorator(exception_handler):\n if not callable(exception_handler):\n raise ValueError('exception_handler must be callable')\n self.exception_handlers[namespace] = exception_handler\n return decorator", "def error_handler_middleware(app):\n def wsgi_app(environ, start_response):\n try:\n return app(environ, start_response)\n except Exception, e:\n logging.exception(e)\n # ... display a custom error message ...\n response = webapp.Response()\n response.set_status(500)\n response.out.write('Ooops! An error occurred...')\n response.wsgi_write(start_response)\n return ['']\n\n return wsgi_app", "def error_aware(method):\n\n def _request(request_handler, *args):\n \"\"\"Surround request_handler.method(*args) with try/except for errors.\n\n Args:\n request_handler: Request handler which method is being called.\n \"\"\"\n try:\n method(request_handler, *args)\n except Error, error:\n response_body = {\n 'error': {\n 'status': error.code,\n 'message': error.message\n }\n }\n request_handler.response.clear()\n request_handler.response.set_status(error.code)\n util.write_response(request_handler, response_body)\n return _request", "def register_error_handler(app):\n def errorhandler(error):\n response = error.to_json()\n response.status_code = error.status_code\n print(response.status_code)\n return response\n\n app.errorhandler(ExceptionHandler)(errorhandler)", "def error_handler(self):\n return self._error_handler", "def error(self, handler):\n pass", "def add_errorhandler(self, cbl: typing.Callable, errorcode: int):\n # for simplicity sake, wrap it in a route.\n rtt = self.wrap_route(cbl, should_invoke_hooks=False)\n self.errorhandlers[errorcode] = rtt\n rtt.bp = self\n return rtt", "def handle_errors(func):\n def wrapper(*args, **kwargs):\n try:\n response = func(*args, **kwargs)\n except Exception as e:\n response = jsonify({\"success\": False, \"message\": str(e)})\n return response\n wrapper.func_name = func.func_name\n return wrapper", "def after_error_request(self, f):\n self.after_error_request_handlers.append(f)\n return f", "def register_error_handlers(self):\n\n def error_handler(error):\n if not isinstance(error, exceptions.HTTPException):\n error = exceptions.InternalServerError()\n return response.Response(bootstrap.card(body=_.span[_.p(style='color:#888')[error.description or ''],\n _.img(src=flask.url_for('mara_app.static',\n filename='mara.jpg'),\n style='margin-top:30px;max-width:100%;')]),\n title=f'{error.code} {error.name}',\n status=error.code)\n\n for cls in exceptions.HTTPException.__subclasses__():\n self.register_error_handler(cls, error_handler)", "def error_handler(func):\n def handle_caller():\n try:\n response, headers = func()\n if not isinstance(response, dict):\n raise ValueError(\n \"Call function %s (method %s) returned non-dict\"\n % (func.methods_to_viewfunc.get(request.method),\n request.method)\n )\n if 'success' not in response:\n response['success'] = True\n resp = jsonify(response)\n resp.headers.extend(headers)\n return resp\n except APICodingError:\n APP.logger.exception(\"API Coding error occured\")\n return jsonify({\n 'success': False,\n 'error': 'Internal coding error',\n }), 500\n except APIError as err:\n APP.logger.warning(\"API error occured, code: %s, msg: %s\",\n err.code,\n err.internal or err.message)\n response = {'success': False,\n 'error': err.message}\n return jsonify(response), err.code, err.headers\n except Exception:\n APP.logger.exception(\"Unexpected error during request processing\")\n return jsonify({\n 'success': False,\n 'error': 'Internal server error',\n }), 500\n return handle_caller", "def errorhandler(self, code: int):\n\n def _inner(cbl: typing.Callable):\n self.add_errorhandler(cbl, code)\n return cbl\n\n return _inner", "def error(self, func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(\"The local error handler must be an async function\")\r\n self._error_handler = func\r\n return func", "def use_error_page(func):\n @wraps(func)\n def wrapper(*args, **kwrds):\n try:\n return func(*args, **kwrds)\n except:\n try:\n etype, evalue, etrace = sys.exc_info()\n app_logger().error(\"ERROR HANDLER => %s: %s\\n%s\\n\", etype, evalue, etrace)\n errfmt = traceback.format_exception(etype, evalue, etrace)\n txtpre = \"Unexpected error:\"\n txtpost = '\\n'.join(errfmt) if current_app.debug else evalue\n return template(\"error.html\", errortext=txtpre+txtpost)\n except:\n app_logger().error(\"ERROR IN ERROR HANDLER - PUNTING - %s: %s\\n%s\\n\" % sys.exc_info())\n return abort(500) # Double Whoops!\n return wrapper", "def error_handler(response, **kwargs):\n if 400 <= response.status_code <= 499:\n message = response.json()['error_description'] \\\n if 'error_description' in response.json() \\\n else response.json()['error_detail']\n raise ClientError(response, message)\n\n elif 500 <= response.status_code <= 599:\n raise ServerError(response)\n\n return response", "def init_error_handler(app):\n app.register_error_handler(Exception, global_handler)\n return app", "def _catch_error(f):\n @wraps(f) \n def wrap(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n raise HTTPBadRequest(reason=e)\n return wrap", "def catch_errors(f):\n\n @functools.wraps(f)\n def func(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except HorizonError as exception:\n print_failure(f\"{str.upper(f.__name__)} request failed to successfully execute. {exception.status_code if exception else None}\")\n if exception and exception.message:\n print_server_error_details(exception.message)\n raise\n\n return func", "def setErrorDelegate(self, func):\r\n # Assign the user function to the internal callback handle\r\n self.errorDelegate = func", "def error_handler(call_on_errors):\n assert callable(call_on_errors)\n def entangle(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n try:\n return method(self, *args, **kwargs)\n except InputInvalidException:\n return call_on_errors(self, *args, **kwargs)\n return wrapper\n return entangle", "def wrapper_view_error(\n view: Any = None, class_exception: Any = None, status: int = None\n) -> Any:\n\n def _decorate(function):\n @functools.wraps(function)\n def wrapped_function(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except class_exception as obj_exception:\n return Response(data={\"error\": obj_exception.message}, status=status)\n\n return wrapped_function\n\n if view:\n return _decorate(view)\n return _decorate", "def get_error_handler(self):\n\t\treturn self.error_handler", "def _catch_error(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n raise HTTPBadRequest(reason=e)\n return wrap", "def _set_error_handler(self):\n if self.on_error:\n error_step = self.context.root.path_to_step(self.on_error)\n self._on_error_handler = error_step.run", "def register_errorhandlers(app):\n app.errorhandler(ApiError)(handle_api_error)\n app.errorhandler(HTTP_CODES['BAD_REQUEST'])(handle_http_error)\n app.errorhandler(HTTP_CODES['NOT_FOUND'])(handle_http_error)\n app.errorhandler(HTTP_CODES['METHOD_NOT_ALLOWED'])(handle_http_error)\n app.errorhandler(HTTP_CODES['INTERNAL_SERVER_ERROR'])(handle_http_error)", "def make_error_catcher(app, error_template):\n def callback(func):\n @wraps(func)\n def inner(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except HTTPException:\n raise\n except TemplateError as exc:\n app.logger.error(\"Caught exception:\\n{0}\".format(exc.text))\n trace = exc.text\n except Exception:\n app.logger.exception(\"Caught exception:\")\n trace = format_exc()\n return render_template(error_template, traceback=trace), 500\n return inner\n return callback", "def decorator(func):\n func.error_message = msg\n return func", "def handle_request(fun):\n\n def wrapper(self, *args, **kwargs):\n \"\"\"\n We raise an exception when\n the code on the client side fails\n Server side errors are taken care of\n through response codes\n \"\"\"\n try:\n return fun(self, *args, **kwargs)\n except Exception as req_exception:\n self.logger.exception(\"internal error\")\n raise ClientSideError(str(req_exception))\n\n return wrapper", "def response_transform_decorator(original_func):\n def response_transformer_wrapper(*args, **kwargs):\n \"\"\"\n Log errors and apply transformation in response_handler_func\n \"\"\"\n try:\n response = original_func(*args, **kwargs)\n response.raise_for_status()\n\n except requests.exceptions.HTTPError:\n help_string = ('Please consult the Coursera Data '\n 'Exports Guide for further assistance: '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n if (response.status_code == 403):\n help_string = ('Please authorize this application '\n 'by running:\\n'\n '\\t$ courseraoauth2client config authorize --app manage_research_exports\\n' # noqa\n 'See https://github.com/coursera/courseraoauth2client ' # noqa\n 'for more information on authorization.\\n'\n 'For further assistance, consult the '\n 'Coursera Data Exports Guide '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n logging.error(\n 'Request to {url} with body:\\n\\t{body}\\nreceived response'\n ':\\n\\t{text}\\n'\n '{help_string}\\n'\n .format(url=response.url,\n text=response.text,\n body=(response.request and response.request.body),\n help_string=help_string))\n raise\n\n return response_transformer(response)\n return response_transformer_wrapper", "def errors_wrapped(func):\n\n async def wrapped(self, *args, **kwargs):\n try:\n return await func(self, *args, **kwargs)\n except AuthenticationRequiredError as ex:\n logger.warning(f\"Trying to use unauth access: {ex}\")\n add_message(self.request, \"LogIn to continue.\")\n redirect(self.request, \"sign_in\")\n\n except BaseApplicationError as ex:\n message = getattr(ex, \"message\", None) or str(ex)\n details = getattr(ex, \"details\", None)\n if details:\n message = f\"{message}: {details}\"\n\n add_message(self.request, message, kind=\"error\")\n raise web.HTTPFound(self.request.path)\n\n return wrapped", "def errors_setup(app: Flask):\n @app.errorhandler(500)\n def error_handler_500(error):\n return create_error_response({'message': str(error)}, 500)\n\n @app.errorhandler(404)\n def error_handler_404(_):\n return create_error_response({'message': 'Not found'}, 404)\n\n @app.errorhandler(405)\n def error_handler_405(_):\n return create_error_response({'message': 'Method not allowed on resource uri'}, 405)\n\n @app.errorhandler(400)\n def error_handler_400(error):\n return create_error_response({'message': error}, 400)", "def error(self):\n return self._decorator_wrapper(EventName.error)", "def _request(request_handler, *args):\n try:\n method(request_handler, *args)\n except Error, error:\n response_body = {\n 'error': {\n 'status': error.code,\n 'message': error.message\n }\n }\n request_handler.response.clear()\n request_handler.response.set_status(error.code)\n util.write_response(request_handler, response_body)", "def __call__(self, http_error):\n http_error_status = str(http_error.status)\n if hasattr(self, 'error%s' % http_error_status):\n handler_method = getattr(self, 'error%s' % http_error_status)\n elif hasattr(self, 'error%sxx' % http_error_status[0]):\n handler_method = getattr(self, 'error%sxx' % http_error_status[0])\n else:\n handler_method = self.error\n return handler_method(http_error)", "def set_error_handler(self,error_handler):\n\t\tif(callable(error_handler)):\n\t\t\tself.error_handler = error_handler\n\t\telse:\n\t\t\traise NotCallableException(\"{} object is not callable\".format(type(error_handler)))", "def do(self, *args, **kwargs):\n try:\n return super().do(*args, **kwargs)\n except ResponseError as e:\n self.handle_error(e)", "def logged_errors(func):\n @wraps(func)\n def wrapper(*args, **kwrds):\n try:\n return func(*args, **kwrds)\n except:\n etype, evalue, etrace = sys.exc_info()\n app_logger().error(\"ERROR HANDLER => %s: %s\\n%s\\n\", etype, evalue, etrace)\n return abort(500)\n return wrapper", "def register_error_handlers(app: Flask) -> None:\n app.errorhandler(Forbidden)(jsonify_exception)\n app.errorhandler(Unauthorized)(jsonify_exception)\n app.errorhandler(BadRequest)(jsonify_exception)\n app.errorhandler(InternalServerError)(jsonify_exception)\n app.errorhandler(ServiceUnavailable)(jsonify_exception)\n app.errorhandler(NotFound)(jsonify_exception)\n app.errorhandler(MethodNotAllowed)(jsonify_exception)", "def with_handler(f):\n\n @wraps(f)\n def wrapped(self, *args, **kw):\n try:\n return f(self, *args, **kw)\n except pyvisa.VisaIOError:\n self._flush_all_buffers()\n\n raise\n\n wrapped.__name__ = f.__name__\n wrapped.__doc__ = f.__doc__\n\n return wrapped", "def error_handler(self, handler):\n if not self.opened():\n handler = handler or util.noop\n self._error_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n self._dll.JLINKARM_SetErrorOutHandler(self._error_handler)", "def response(code):\n\n def decorator(func):\n func.wsgi_code = code\n return func\n return decorator", "def bofore_response_handle(self, func):\n self.before_response.append(func)\n return func", "def _call_error_handler(self, event, err, **kwargs):\n if self._on_error_handler:\n event.error = str(err)\n event.origin_state = self.fullname\n return self._on_error_handler(event)", "def configure_error_handlers(app):\n\n def render_error(error):\n return (render_template('errors/%s.html' % error.code,\n title=error_messages[error.code], code=error.code), error.code)\n\n for (errcode, title) in error_messages.iteritems():\n app.errorhandler(errcode)(render_error)", "def json_response(f):\n \n def wrapped(*args, **kwargs):\n result = f(*args, **kwargs)\n \n response = HttpResponse(json.dumps(result))\n \n if type(result) == dict and \"error\" in result:\n response.status_code = 500\n \n \n return response", "def register_error_handlers(app: Flask):\n app.errorhandler(HTTPException)(jsonify_http_exception)\n\n # Don't jsonify an exception in dev mode\n if not app.config.get('DEBUG'):\n app.errorhandler(Exception)(jsonify_unknown_exception)", "def internal_error_handler(error):\r\n return render_template('error.500.html')", "def on_exception(self):\n\n def decorator(coro):\n self._hooks.append((\"exception\", coro))\n return coro\n\n return decorator", "def response_json_error_info(func):\n def wrapper(request):\n try:\n return func(request)\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper", "def __call__(self, *args, **kwargs):\r\n return self.error(*args, **kwargs)", "def on_response_validation_error(err):\n return jsonify(message='Bad response'), 500", "def fl_set_error_handler(pyfn_ErrorFunc):\n #FL_ERROR_FUNC = cty.CFUNCTYPE(None, xfdata.STRING, xfdata.STRING)\n _fl_set_error_handler = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_error_handler\",\\\n None, [xfdata.FL_ERROR_FUNC],\\\n \"\"\"void fl_set_error_handler(FL_ERROR_FUNC user_func)\"\"\")\n library.check_if_flinitialized()\n library.verify_function_type(pyfn_ErrorFunc)\n if not pyfn_ErrorFunc: # if it is None\n cfn_ErrorFunc = cty.cast(pyfn_ErrorFunc, cty.c_void_p)\n else: # real function\n cfn_ErrorFunc = xfdata.FL_ERROR_FUNC(pyfn_ErrorFunc)\n library.keep_cfunc_refs(cfn_ErrorFunc, pyfn_ErrorFunc)\n _fl_set_error_handler(cfn_ErrorFunc)", "def setErrorHandler(self,f,arg):\n libxml2mod.xmlParserCtxtSetErrorHandler(self._o,f,arg)", "def error_handler(f):\n @wraps(f)\n def inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except DepositionDoesNotExists:\n abort(404, message=\"Deposition does not exist\", status=404)\n except DraftDoesNotExists:\n abort(404, message=\"Draft does not exist\", status=404)\n except InvalidApiAction:\n abort(404, message=\"Action does not exist\", status=404)\n except DepositionNotDeletable:\n abort(403, message=\"Deposition is not deletable\", status=403)\n except ForbiddenAction:\n abort(403, message=\"Forbidden\", status=403)\n except InvalidDepositionType:\n abort(400, message=\"Invalid deposition type\", status=400)\n except FormDoesNotExists:\n abort(400, message=\"Form does not exist\", status=400)\n except FileDoesNotExists:\n abort(400, message=\"File does not exist\", status=400)\n except FilenameAlreadyExists:\n abort(400, message=\"Filename already exist\", status=400)\n except UploadError:\n abort(400)\n except DepositionError as e:\n if len(e.args) >= 1:\n abort(400, message=e.args[0], status=400)\n else:\n abort(500, message=\"Internal server error\", status=500)\n return inner", "def error_handler(self, e, name, handler):\n msg = '' if handler else ' (no handler)'\n LOGGER.debug('Monitoring error handling %s %s: %s', name, msg, e)\n if handler:\n try:\n handler(e)\n except Exception as handler_exception:\n LOGGER.error('Monitoring exception %s fail: %s', name, handler_exception)\n LOGGER.exception(handler_exception)\n else:\n LOGGER.exception(e)", "def handleExceptionsDecorator(object):\n\n\t\t@functools.wraps(object)\n\t\tdef handleExceptionsWrapper(*args, **kwargs):\n\t\t\t\"\"\"\n\t\t\tThis decorator is used for exceptions handling.\n\n\t\t\t:param \\*args: Arguments. ( \\* )\n\t\t\t:param \\*\\*kwargs: Keywords arguments. ( \\*\\* )\n\t\t\t\"\"\"\n\n\t\t\t_exceptions__frame__ = True\n\n\t\t\ttry:\n\t\t\t\treturn object(*args, **kwargs)\n\t\t\texcept exceptions as error:\n\t\t\t\tfor handler in handlers:\n\t\t\t\t\thandler(error)\n\n\t\treturn handleExceptionsWrapper", "def custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n\n return Response(\n str(exc),\n status=response.status_code if response is not None else HTTP_500_INTERNAL_SERVER_ERROR,\n )", "def handle_discovery_errors(fn):\n @functools.wraps(fn)\n def wrapped(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except (ValueError, requests.RequestException) as e:\n logger.warning('', exc_info=True)\n return redirect('/?' + urllib.parse.urlencode({'failure': str(e)}))\n\n return wrapped", "def init_error_responses(app):\n\tfrom werkzeug.exceptions import default_exceptions\n\tfrom werkzeug.exceptions import HTTPException\n\tmake_json_error = lambda ex: json_response(dict(description=str(ex)), ex.code)\n\tfor code in default_exceptions.iterkeys():\n\t\tif code != 500: app.errorhandler(code)(make_json_error)\n\t# Use HTTP Basic auth (json object in password field)\n\tapp.errorhandler(401)(lambda ex: json_response(\n\t\tdict(description='Authenticate with HTTP Basic json:{auth object}'), 401,\n\t\t#headers={'WWW-Authenticate': 'Basic realm=\"JSON auth required\"'}\n\t))", "def exception_handler(self, handler):\n self.java_obj.exceptionHandler(ExceptionHandler(handler))\n return self", "def _handle_exceptions(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except Exception as err:\n logger.exception(\n f\"{type(self).__name__}.{f.__name__}(*{args!r}, **{kwargs!r}) failed\"\n )\n content = self.message.content\n self.reply(f\"Oops, the {content} command encountered a problem: {err!r}\")\n\n wrapper._handle_exceptions = True\n return wrapper", "def on_request_error(locust_instance, exception, tb, **kwargs):", "def register_exceptions(app):\n\n @app.errorhandler(Exception)\n def exceptions(e):\n \"\"\"Exception handler that manages Flask/Werkzeug exceptions.\n\n For the other exception handlers check ``service/decorators.py``\n \"\"\"\n # NOTE: add log entry\n str(getattr(e, \"code\", \"unavailable\"))\n log_error_code = str(getattr(e, \"code\", \"unavailable\"))\n service_log.error(\n f\"{request.remote_addr} {request.method} {request.scheme} {request.full_path}\\n\"\n f\"Error code: {log_error_code}\\n\"\n f\"Stack trace: {traceback.format_exc()}\"\n )\n\n # NOTE: craft user messages\n if hasattr(e, \"code\"):\n code = int(e.code)\n\n # NOTE: return an http error for methods with no body allowed. This prevents undesired exceptions.\n NO_PAYLOAD_METHODS = \"HEAD\"\n if request.method in NO_PAYLOAD_METHODS:\n return Response(status=code)\n\n error: ServiceError\n if code == 400:\n error = ProgramHttpRequestError(e)\n elif code == 404:\n error = ProgramHttpMissingError(e)\n elif code == 405:\n error = ProgramHttpMethodError(e)\n elif code == 408:\n error = ProgramHttpTimeoutError(e)\n else:\n error = ProgramHttpServerError(e, code)\n\n return error_response(error)\n\n # NOTE: Werkzeug exceptions should be covered above, the following line is for\n # unexpected HTTP server errors.\n return error_response(e)", "def error_handler(self, error, *args, **kwargs):\n extras = []\n if args:\n extras.append(\"args={}\".format(args))\n if kwargs:\n extras.append(\"kwargs={}\".format(kwargs))\n if extras:\n msg = \"Handler (called with {}) raised error: {}\".format(\n \", \".join(extras), error)\n else:\n msg = \"Handler raised error: {}\".format(error)\n self.log_exception(msg)", "def handle_exceptions(\n generic_message='An error has occurred',\n status_code=500,\n error_handler=None):\n @web.middleware\n async def middleware(request, handler):\n try:\n response = await handler(request)\n return response\n except web.HTTPException:\n raise\n except Exception as ex:\n message = str(ex)\n if error_handler:\n error_handler(request, ex)\n logging.exception('Error: %s', message)\n return web.json_response(\n {'error': generic_message},\n status=status_code\n )\n return middleware", "def response(status, response_def):\n def decorator(fn): # pylint: disable=missing-docstring\n meta = RouteMeta.load(fn)\n meta.set_response(status, response_def)\n meta.save()\n return fn\n return decorator", "def wrap_pecan_controller_exception(func):\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except exc.QinlingException as e:\n LOG.error('Error during API call: %s', six.text_type(e))\n return webob.Response(\n status=e.http_code,\n content_type='application/json',\n body=json.dumps(dict(faultstring=six.text_type(e))),\n charset='UTF-8'\n )\n\n return wrapped", "def all_exception_handler(handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n speech = \"Sorry, an exception occurred. Please say again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n return handler_input.response_builder.response", "def after_request(response):\n # This avoids the duplication of registry in the log,\n # since that 500 is already logged via @app.errorhandler.\n if response.status_code != 500:\n ts = strftime('[%Y-%b-%d %H:%M]')\n logger.error('%s %s %s %s %s %s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n response.status)\n return response", "def errorPredicate(self, predicate, handler):\n return self.error( lambda ctx, x, statusCode: handler.apply(ctx, x, statusCode) \\\n if predicate.test(statusCode) else None)", "def handle_error(self, request_handler, client_address):\n logger.debug('handle_error(%s:%s)' % client_address)", "def exception(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except requests.exceptions.HTTPError as e:\n log.error(e)\n except requests.exceptions.RequestException as e: \n log.error(e)\n return wrapper", "def _handle_error(url, response):\n handlers = {\n http.client.NOT_FOUND: NotFoundError('Resource not found: %s' % url),\n http.client.FOUND: AlreadyExistsError(\n 'Resource already exists: %s' % url\n ),\n http.client.FAILED_DEPENDENCY: ValidationError(response),\n http.client.UNAUTHORIZED: NotAuthorizedError(response),\n http.client.BAD_REQUEST: BadRequestError(response),\n }\n\n if response.status_code in handlers:\n raise handlers[response.status_code]", "def server_error(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponseServerError, *args, **kwargs)", "def SetErrorHandler(self,f,arg):\n if f is None:\n libxml2mod.xmlTextReaderSetErrorHandler(\\\n self._o,None,None)\n else:\n libxml2mod.xmlTextReaderSetErrorHandler(\\\n self._o,_xmlTextReaderErrorFunc,(f,arg))", "def all_exception_handler(handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n\n speech = \"Sorry, there was some problem. Please try again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n\n return handler_input.response_builder.response", "def _default_error_handler(self, exception):\n\n self.log.error(exception)\n return '', 500", "def error(self, *args, **kwargs):", "def register_default_server_exception_handler(app: FastAPI):\n @app.exception_handler(status.HTTP_500_INTERNAL_SERVER_ERROR)\n async def default_server_exception_handler(request: Request, exception: Exception):\n response = get_response()\n logger.exception(\"Uncaught server exception: {exc}\", exc=exception)\n\n # Since the CORSMiddleware is not executed when an unhandled server exception\n # occurs, we need to manually set the CORS headers ourselves if we want the FE\n # to receive a proper JSON 500, opposed to a CORS error.\n # Setting CORS headers on server errors is a bit of a philosophical topic of\n # discussion in many frameworks, and it is currently not handled in FastAPI.\n # See dotnet core for a recent discussion, where ultimately it was\n # decided to return CORS headers on server failures:\n # https://github.com/dotnet/aspnetcore/issues/2378\n origin = request.headers.get('origin')\n\n if origin:\n # Have the middleware do the heavy lifting for us to parse\n # all the config, then update our response headers\n cors = CORSMiddleware(\n app=app,\n allow_origins=opal_common_config.ALLOWED_ORIGINS,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"])\n\n # Logic directly from Starlette's CORSMiddleware:\n # https://github.com/encode/starlette/blob/master/starlette/middleware/cors.py#L152\n\n response.headers.update(cors.simple_headers)\n has_cookie = \"cookie\" in request.headers\n\n # If request includes any cookie headers, then we must respond\n # with the specific origin instead of '*'.\n if cors.allow_all_origins and has_cookie:\n response.headers[\"Access-Control-Allow-Origin\"] = origin\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n elif not cors.allow_all_origins and cors.is_allowed_origin(origin=origin):\n response.headers[\"Access-Control-Allow-Origin\"] = origin\n response.headers.add_vary_header(\"Origin\")\n\n return response", "def http_response(code):\n def decorator(func):\n def wrapper(*args, **kwargs):\n def _http_response(response, http_status_code):\n \"\"\"\n Returns an API response for the client.\n\n Args:\n response (list/dict/serializable object): api response for the client.\n http_status_code (int): the http status code that the server should return.\n\n Returns:\n Response: a flask response object.\n \"\"\"\n return make_response(jsonify(response), http_status_code)\n try:\n response = func(*args, **kwargs)\n return _http_response(\n response=response if code != HttpCodes.NO_CONTENT else \"\", http_status_code=code\n )\n except BaseApiException as exc:\n return _http_response(response=exc.to_dict(), http_status_code=exc.status_code)\n return wrapper\n return decorator", "def set_error_callback(self, cb_func):\n self._error_callback = cb_func", "def handle_exception(error):\n return make_response(jsonify({'message': error.description}), 400)", "def errors_api_wrapped(func):\n\n async def wrapped(self, *args, **kwargs):\n try:\n return await func(self, *args, **kwargs)\n except Exception as ex:\n message = getattr(ex, \"message\", None) or \"Something went wrong\"\n details = getattr(ex, \"details\", None) or str(ex)\n status_code = getattr(ex, \"status_code\", 500)\n logger.exception(\n \"Couldn't perform action: %s. Error: %s, Details: %s\", ex, message, details\n )\n return {\"message\": message, \"details\": details}, status_code\n\n return wrapped", "def handle_500_error(_error):\n return make_response(jsonify(SERVER_ERROR), 500)", "def http_error_handler(ex, req, resp, params):\n resp.body = encode.encode({\n 'status': 1,\n 'msg': 'HTTP error: ' + ex.status\n })", "def all_exception_handler(handler_input, exception):\n return exception_request(handler_input, exception, logger)", "def wrap_exceptions(fn, mode=_exception_mode):\n if mode == 'native':\n return fn\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except bottle.HTTPResponse:\n raise\n except Exception as ex:\n import traceback\n print(traceback.format_exc())\n if mode == 'silent':\n bottle.abort(500, '')\n return # Unreachable\n\n error = {\n 'ref': None,\n 'name': 'unexpected-exception',\n 'text': 'An unexpected exception occured.'\n }\n if mode != 'quiet':\n error['exception'] = repr(ex.__class__)\n\n if mode == 'full':\n error['backtrace'] = traceback.format_exc()\n\n bottle.response.status = http.client.INTERNAL_SERVER_ERROR\n return {'errors': [error]}\n\n return wrapper", "def _error(self, request, status, headers={}, prefix_template_path=False, **kwargs):\n\n return self._render(\n request = request,\n template = str(status),\n status = status,\n context = {\n 'error': kwargs\n },\n headers = headers,\n prefix_template_path = prefix_template_path\n )", "def error_handling_router(error: HTTPException):\n log_error(error, getattr(error, \"description\", str(error)))\n\n http_error_code = 500 # fallback\n if hasattr(error, \"code\"):\n try:\n http_error_code = int(error.code)\n except ValueError:\n pass\n\n error_text = getattr(\n error, \"description\", f\"Something went wrong: {error.__class__.__name__}\"\n )\n\n if request.is_json:\n response = jsonify(\n dict(\n message=getattr(error, \"description\", str(error)),\n status=http_error_code,\n )\n )\n response.status_code = http_error_code\n return response\n # Can UI handle this specific type?\n elif hasattr(current_app, \"%s_handler_html\" % error.__class__.__name__):\n return getattr(current_app, \"%s_handler_html\" % error.__class__.__name__)(error)\n # Can UI handle HTTPException? Let's make one from the error.\n elif hasattr(current_app, \"HttpException_handler_html\"):\n return current_app.HttpException_handler_html(error)\n # This fallback is ugly but better than nothing.\n else:\n return \"%s:%s\" % (error.__class__.__name__, error_text), http_error_code", "def with_error_url(self, url):\n self.__error_url = url\n return self", "def _raise_http_error(self, *args, **kwargs):", "def getErrorHandler(self):\n pass", "def catch_exception(func):\n def wrapper(*args, **kwargs):\n try:\n ret_val = func(*args, **kwargs)\n return ret_val\n except Exception as err:\n logger.exception(\"func name: %s, error: %s\" % (func.__name__, err))\n result = {\"code\": -20001, \"msg\": str(err)}\n return JsonResponse(result)\n return wrapper", "def custom_exception_handler(exception, context):\n # Call REST framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler(exception, context)\n\n LOGGER.exception(exception)\n\n return response", "def errorType(self, Dtype, handler):\n return self.error( lambda ctx, x, statusCode: handler.apply(ctx, x, statusCode) \\\n if isinstance(x, Dtype) or isinstance(x.getCause(), Dtype) else None)", "async def _noop_error_handler(ctx: \"RequestContext\") -> None:", "def api_error_handler(ex):\n try:\n status_code = ex.code\n except AttributeError:\n status_code = 500\n if flask.request.path.startswith('/api/'):\n app.logger.error(str(ex))\n if app.config.get('DEBUG', False):\n resp = flask.jsonify(message=str(ex))\n else:\n resp = flask.jsonify(message='Internal Server Error')\n resp.status_code = status_code\n return resp\n return flask.make_response(\n flask.render_template(\n 'error.html', exc=ex,\n title=error_titles.get(status_code, 'Error')),\n status_code)", "def add_app_exception_handlers(app: Flask):\n\n from anubis.utils.http import error_response\n from anubis.utils.logging import logger\n\n # Set AuthenticationError handler\n @app.errorhandler(AuthenticationError)\n def handler_authentication_error(e: AuthenticationError):\n logger.error(traceback.format_exc())\n return jsonify(error_response(str(e) or 'Unauthenticated')), 401\n\n # Set LackCourseContext handler\n @app.errorhandler(LackCourseContext)\n def handle_lack_course_context(e: LackCourseContext):\n logger.error(traceback.format_exc())\n return jsonify(error_response(str(e) or 'Please set your course context'))\n\n @app.errorhandler(AssertError)\n def handle_assertion_error(e: AssertError):\n logger.error(traceback.format_exc())\n message, status_code = e.response()\n return jsonify(error_response(message)), status_code" ]
[ "0.7790632", "0.77100396", "0.727186", "0.70996445", "0.695573", "0.6953457", "0.6853296", "0.6826386", "0.67512983", "0.67437077", "0.6728367", "0.669882", "0.66451806", "0.6628181", "0.6596329", "0.658002", "0.6575629", "0.65438235", "0.6527638", "0.65149397", "0.6502375", "0.64994925", "0.649645", "0.6491688", "0.64746004", "0.6435782", "0.64309096", "0.6385044", "0.6380874", "0.6306446", "0.6288487", "0.62869424", "0.62627995", "0.6252792", "0.6208459", "0.6203696", "0.6176303", "0.6131706", "0.6124132", "0.6120444", "0.61123896", "0.61044693", "0.61014116", "0.60965985", "0.6088108", "0.60738766", "0.60624975", "0.60112464", "0.5989715", "0.59894395", "0.5984595", "0.59843594", "0.5955017", "0.59540015", "0.5953368", "0.5941188", "0.59348017", "0.59179854", "0.59120184", "0.5906582", "0.58977234", "0.58970153", "0.589688", "0.589246", "0.5876248", "0.5875424", "0.5874683", "0.58699197", "0.58693767", "0.58693004", "0.5860114", "0.58563536", "0.58487165", "0.5844966", "0.58381766", "0.5837814", "0.58318496", "0.5808163", "0.57982063", "0.5791241", "0.5785051", "0.57804006", "0.5770027", "0.5762999", "0.5756641", "0.5753912", "0.57511556", "0.57296175", "0.57176733", "0.571646", "0.5698159", "0.5693859", "0.5690405", "0.56899667", "0.56897175", "0.5683132", "0.5674595", "0.56737137", "0.56703216", "0.5669781" ]
0.7654537
2
Process raw inputs into a dataset.
def build_dataset(words): count = [] # count.extend(collections.Counter(words).most_common(n_words - 1)) count.extend(collections.Counter(words).most_common()) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() # unk_count = 0 for word in words: index = dictionary.get(word, 0) # if index == 0: # dictionary['UNK'] # unk_count += 1 data.append(index) # count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) data = [data[::2],data[1::2]] new_data = list() for i in range(len(data[0])): new_data.append([data[0][i],data[1][i]]) data = new_data vocabulary_size = len(dictionary) print("\n\ndictionary size = ") print(len(dictionary)) return data, count, dictionary, reversed_dictionary, vocabulary_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_inputs(self, inputs):", "def processInputs(self):", "def input_fn(self, ctx=None):\n sup_dataset = self.supervised_input.make_parsed_dataset(ctx)\n unsup_dataset = self.unsupervised_input.make_parsed_dataset(ctx)\n\n dataset = tf.data.Dataset.zip((sup_dataset, unsup_dataset))\n dataset = dataset.map(_combine_sup_unsup_datasets)\n\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n options = tf.data.Options()\n options.experimental_deterministic = False\n dataset = dataset.with_options(options)\n\n return dataset", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)", "def parse_dataset(self, data):\n pass", "def preprocess(self, data, label):\n\t\traise NotImplementedError", "def processData(self, rawData, queryMeta=None):\n\t\treturn self.service.run(self, rawData, queryMeta)", "def handle(self, data, context):\n \n model_input = self.preprocess(data)\n model_out = self.inference(model_input)\n return self.postprocess(model_out)", "def process_raw_data(self):\n \n # Define some variables of interest.\n vor = [\"n_sentences\", \"n_correct\", \"p_correct\", \"median_RT\", \\\n \"mean_RT\", \"stdev_RT\", \"scaled_stdev_RT\"]\n \n # Get all participant names, or return straight away if no data was\n # loaded yet.\n if hasattr(self, \"raw\"):\n participants = self.raw.keys()\n participants.sort()\n else:\n self.data = None\n return\n\n # Count the number of participants.\n n = len(participants)\n \n # Create a data dict for each variable of interest.\n self.data = {}\n self.data[\"ppname\"] = []\n for var in vor:\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n \n # Loop through all participants.\n for i, ppname in enumerate(participants):\n # Add the participant name.\n self.data[\"ppname\"].append(copy.deepcopy(ppname))\n # Skip empty datasets.\n if self.raw[ppname] is None:\n continue\n # Compute stuff relevant to this task.\n self.data[\"n_sentences\"][i] = len(self.raw[ppname][\"Sentence\"])\n self.data[\"n_correct\"][i] = numpy.sum(self.raw[ppname][\"correct\"])\n self.data[\"p_correct\"][i] = float(self.data[\"n_correct\"][i]) \\\n / float(self.data[\"n_sentences\"][i])\n self.data[\"median_RT\"][i] = numpy.nanmedian(self.raw[ppname][\"RT\"])\n self.data[\"mean_RT\"][i] = numpy.nanmean(self.raw[ppname][\"RT\"])\n self.data[\"stdev_RT\"][i] = numpy.nanstd(self.raw[ppname][\"RT\"])\n # Compute a scaled standard deviation of the response time, scaled to the\n # median response time to remove the correlation between the two.\n self.data[\"scaled_stdev_RT\"] = self.data[\"stdev_RT\"] / self.data[\"median_RT\"]", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def process(self, preprocess_args: Dict) -> None:\n dataset2preprocessor = {\n 'vhi': VHIPreprocessor,\n }\n\n for dataset, variables in preprocess_args.items():\n\n # check the format is as we expected\n assert dataset in dataset2preprocessor, \\\n f'{dataset} is not supported! Supported datasets are {dataset2preprocessor.keys()}'\n\n assert type(variables) is list, \\\n f'Expected {dataset} values to be a list. Got {type(variables)} instead'\n\n preprocessor = dataset2preprocessor[dataset](self.data)\n\n for variable in variables:\n preprocessor.preprocess(**variable)", "def input_fn():\n files = tf.data.Dataset.list_files(os.path.join(\n tft_working_dir, filebase + '*'))\n dataset = files.interleave(\n tf.data.TFRecordDataset, cycle_length=4, block_length=16)\n dataset = dataset.map(parser)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.prefetch(prefetch_buffer_size)\n iterator = dataset.make_one_shot_iterator()\n transformed_features, transformed_labels = iterator.get_next()\n\n return transformed_features, transformed_labels", "def process_data(self, spec):\n with torch.no_grad():\n \n assert(len(spec) == 5), 'dataloader should return (spec_masked, pos_enc, mask_label, attn_mask, spec_stacked)'\n # Unpack and Hack bucket: Bucketing should cause acoustic feature to have shape 1xBxTxD'\n spec_masked = spec[0].squeeze(0)\n pos_enc = spec[1].squeeze(0)\n mask_label = spec[2].squeeze(0)\n attn_mask = spec[3].squeeze(0)\n spec_stacked = spec[4].squeeze(0)\n\n spec_masked = spec_masked.to(device=self.device)\n pos_enc = torch.FloatTensor(pos_enc).to(device=self.device)\n mask_label = torch.ByteTensor(mask_label).to(device=self.device)\n attn_mask = torch.FloatTensor(attn_mask).to(device=self.device)\n spec_stacked = spec_stacked.to(device=self.device)\n\n return spec_masked, pos_enc, mask_label, attn_mask, spec_stacked # (x, pos_enc, mask_label, attention_mask. y)", "def normalize_dataset(self):", "def preprocess(data):\n raise NotImplementedError", "def process(self, data, reset=False):\n data = np.asarray(data)\n self.check_dims(data)\n data = self.highpass_filter(data, reset=reset)\n data = self.lowpass_filter(data, reset=reset)\n data = self.resample(data)\n data = self.reref_data(data)\n data = self.select_channels(data)\n data = self.normalize_data(data)\n data = self.add_context(data)\n return data", "def transform(self, inputs: list, stage: str) -> datapack.DataPack:", "def _proc_dataset(d):\n # merge 2dseq complex frame group if present\n if d.is_complex and d.type == '2dseq':\n d = FrameGroupMerger().merge(d, 'FG_COMPLEX')\n\n # prepare the data array\n if d.is_svs:\n data = _prep_data_svs(d)\n elif d.is_mrsi:\n data = _prep_data_mrsi(d)\n else:\n data = d.data\n\n # get properties\n properties = d.to_dict()\n\n # some Bruker datasets do not have affine property\n if d.type == 'fid': if not 'affine' in properties: properties.update({'affine':np.identity(4)})\n \n yield data, properties", "def process(self, inputs):\n output = None\n return output", "def _transform(self, dataset):\n raise NotImplementedError()", "def _canonize_input(self, dataset):\n unpack = lambda x: x\n if isinstance(dataset, _tc.SArray):\n dataset = _tc.SFrame({self.feature: dataset})\n elif isinstance(dataset, _tc.Image):\n dataset = _tc.SFrame({self.feature: [dataset]})\n unpack = lambda x: x[0]\n return dataset, unpack", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def process(dataset, f):\n logger.info('processing dataset ({0})'.format(len(dataset.samples)))\n for sample in dataset.samples:\n sample.proc = f(sample.image)", "def _prep_data(self, data, func_input_dtype):\n if func_input_dtype in (None, 'DataArray'):\n return data\n if func_input_dtype == 'Dataset':\n # TODO: add logic that creates a single Dataset comprising all of\n # the DataArray objects in `data`.\n raise NotImplementedError(\"func_input_dtype of `Dataset` not yet \"\n \"implemented.\")\n if func_input_dtype == 'numpy':\n self.coords = data[0].coords\n return [d.values for d in data]", "def processInputData(self, unformattedData):\n tempList = unformattedData.split('\\n')\n rawNodes = copy.copy(list(self.filterRawNodes(tempList)))\n self.vertices = self.createVertices(rawNodes)\n self.edges = self.createEdges(rawNodes, self.vertices)\n self.createEdgesList(rawNodes, self.vertices)", "def process_raw_dataset(path_csv, processed_data):\n \n labels = defaultdict(list)\n with open(path_csv, 'rb') as csvfile:\n stream_data = csv.DictReader(csvfile, delimiter=',')\n for row in stream_data:\n for (k, v) in row.items():\n labels[k].append(v)\n \n \n labels['ID'] = np.array(map(int, labels['ID']))\n labels['Nude'] = np.array(map(int, labels['Nude']))\n \n mark_nude = labels['Nude'] == 1\n mark_normal = labels['Nude'] == 0\n \n normal_id = labels['ID'][mark_normal]\n nude_id = labels['ID'][mark_nude]\n \n path_dataset = os.path.join(processed_data, 'nudity_dataset')\n for count, id in zip(range(len(nude_id)), nude_id):\n if count < 1000: \n copyfile(os.path.join(path_dataset, '%d.jpg' % id), os.path.join(processed_data, 'train/nude/%d.jpg' % id))\n \n else:\n copyfile(os.path.join(path_dataset, '%d.jpg' % id), os.path.join(processed_data, 'test/nude/%d.jpg' % id))\n \n for count, id in zip(range(len(normal_id)), normal_id):\n if count < 1000:\n copyfile(os.path.join(path_dataset, '%d.jpg' % id), os.path.join(processed_data, 'train/normal/%d.jpg' % id))\n \n else:\n copyfile(os.path.join(path_dataset, '%d.jpg' % id), os.path.join(processed_data, 'test/normal/%d.jpg' % id))\n \n return labels", "def prepare_dataset(self, data_raw):\n\n self._logger.debug(f'Preparing dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n #self._logger.debug(f'Line {line_count}/{len(data_raw)}')\n\n try:\n # TODO Call prepare_sample() here?\n sample = {}\n\n sample['text'] = line['text']\n sample['text_tokenized'] = None # set by add_tokens()\n sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = line['string']\n self.add_tokens(sample)\n sample['text_mention_mask'] = None # set by add_mention_mask()\n self.add_mention_mask(sample)\n\n # Once for correct Wikidata item\n sample['item_id'] = line['correct_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['correct_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = True\n data.append(sample)\n sample_count += 1\n\n # Once for wrong Wikidata item\n sample['item_id'] = line['wrong_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['wrong_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = False\n data.append(sample)\n sample_count += 1\n\n except ValueError as e: # skip sample when there is no embedding found\n self._logger.info(str(e))\n sample_count_failed += 1\n continue\n\n self._logger.debug(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def prepare_data(raw=False, round_ratings=False):\n print('Downloading input data...')\n if raw:\n # read data\n review = get_input(\n 'https://www.dropbox.com/s/mtln9b6udoydn2h/yelp_academic \\\n _dataset_review_sample.csv?dl=1')\n user = get_input(\n 'https://www.dropbox.com/s/pngrptljotqm4ds/yelp_academic \\\n _dataset_user.json?dl=1')\n business = get_input(\n 'https://www.dropbox.com/s/w0wy854u5swrhmc/yelp_academic \\\n _dataset_business.json?dl=1')\n\n # join datasets\n review_user = pd.merge(\n review, user, on=\"user_id\",\n how=\"left\", suffixes=(\"\", \"_user\"))\n review_user_business = pd.merge(\n review_user, business, on=\"business_id\",\n how=\"left\", suffixes=(\"\", \"_business\"))\n review_user_business = review_user_business[[\n 'user_id', 'business_id',\n 'stars', 'text',\n 'name', 'average_stars',\n 'name_business', 'stars_business',\n 'categories', 'state', 'city']]\n else:\n review_user_business = get_input(\n 'https://www.dropbox.com/s/sj445d95lljuc4p/small_sample.parquet?dl=1'\n )\n if round_ratings:\n # bucketize numeric features to reduce dimensions\n review_user_business['average_stars'] = review_user_business[\n 'average_stars'].apply(lambda x: round_of_rating(x))\n review_user_business['stars_business'] = review_user_business[\n 'stars_business'].apply(lambda x: round_of_rating(x))\n return review_user_business", "def generate_data(self):\n\n column_num = 1\n src_path = self.src_paths_after_pre_process\n target_path = self.tgt_paths_after_pre_process\n\n src_ds = load_textline_dataset([src_path], column_num)\n\n src_ds = src_ds[0]\n\n input_pipeline_func = self.get_input_pipeline(for_export=False)\n\n src_ds = src_ds.map(\n input_pipeline_func, num_parallel_calls=self.num_parallel_calls)\n\n src_size_ds = src_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n src_ds = src_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n if self.infer_without_label:\n data_set = tf.data.Dataset.zip((src_ds, src_size_ds))\n\n else:\n tgt = load_textline_dataset([target_path], column_num)\n tgt = tgt[0]\n tgt_out_ds = tgt.map(lambda x: x + ' ' + self.END_TOKEN)\n tgt_in_ds = tgt.map(lambda x: self.START_TOKEN + ' ' + x)\n\n tgt_in_ds = tgt_in_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len, self.\n text_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_size_ds = tgt_in_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_ds = tgt_in_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n inp_ds = tf.data.Dataset.zip(\n (src_ds, src_size_ds, tgt_in_ds, tgt_in_size_ds))\n\n if self.use_label_vocab:\n target_vocab_file_path = self.label_vocab_file_paths[0]\n else:\n target_vocab_file_path = self.text_vocab_file_path\n tgt_out_ds = tgt_out_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len,\n target_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_out_ds = tgt_out_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n data_set = tf.data.Dataset.zip((inp_ds, tgt_out_ds))\n\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n label_vocab_dict = load_vocab_dict(self.label_vocab_file_paths[0])\n label_vocab_size = len(label_vocab_dict)\n data_size = get_file_len(self.src_paths_after_pre_process)\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['label_vocab_size'] = label_vocab_size\n self.config['data']['{}_data_size'.format(self.mode)] = data_size\n\n return data_set", "def input_fn():\n bos_id = tf.constant(BOS_ID, tf.int32)\n eos_id = tf.constant(EOS_ID, tf.int32)\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_files)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.map(lambda record: _decode_record(record, name_to_features))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([[bos_id], src_ids, [eos_id]], 0),\n tf.concat([tgt_ids, [eos_id]], 0),\n label))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n src_ids[:FLAGS.max_sequence_length],\n tgt_ids[:FLAGS.max_sequence_length],\n label\n ))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([src_ids, tgt_ids], 0),\n tf.concat([tf.zeros_like(src_ids), tf.ones_like(tgt_ids)], 0),\n label\n ))\n\n d = d.map(lambda input_ids, segment_ids, label_ids: (\n input_ids,\n segment_ids,\n tf.ones_like(input_ids),\n label_ids\n ))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The entry is the source line rows;\n # this has unknown-length vectors. The last entry is\n # the source row size; this is a scalar.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([None]), # tgt\n tf.TensorShape([None]),\n tf.TensorShape([])), # src_len\n # Pad the source sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n PAD_ID, # src\n PAD_ID,\n PAD_ID,\n 0)) # src_len -- unused\n\n batched_dataset = batching_func(d)\n features = batched_dataset.map(lambda input_ids, segment_ids, input_mask, label:\n {\n \"input_ids\": input_ids,\n \"segment_ids\": segment_ids,\n \"input_mask\": input_mask,\n \"label_ids\": label\n\n })\n\n return features", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def eval_input_fn(features, labels, batch_size):\n #features=dict(features)\n features = dataframetodict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def process(self, fake=False):\n # set up condor input if it's the first time submitting\n if not self.prepared_inputs: self.prepare_inputs()\n\n self.run(fake=fake)\n\n if self.complete():\n self.finalize()\n\n self.backup()\n\n self.logger.info(\"Ended processing {0}\".format(self.sample.get_datasetname()))", "def preprocess_dataset(self, dataset, params=None):\n if params is None:\n assert self.params_loaded, (\n \"You must either provide parameters or load the model params before preprocessing.\")\n params = self.params\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\"):\n if params.whiten_method == \"FT\": # other methods require patching first\n if hasattr(params, \"whiten_batch_size\"):\n batch_size = params.whiten_batch_size\n else:\n batch_size = None\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data_batch(dataset[key].images, method=params.whiten_method,\n batch_size=batch_size)\n print(\"INFO:preprocessing:FT Whitened \"+key+\" data\")\n if hasattr(params, \"lpf_data\") and params.lpf_data:\n dataset[key].images, dataset[key].data_mean, dataset[key].lpf_filter = \\\n dp.lpf_data(dataset[key].images, cutoff=params.lpf_cutoff)\n print(\"INFO:preprocessing:Low pass filtered \"+key+\" data\")\n if hasattr(params, \"contrast_normalize\") and params.contrast_normalize:\n if hasattr(params, \"gauss_patch_size\"):\n dataset[key].images = dp.contrast_normalize(dataset[key].images,\n params.gauss_patch_size)\n else:\n dataset[key].images = dp.contrast_normalize(dataset[key].images)\n print(\"INFO:preprocessing:Contrast normalized \"+key+\" data\")\n if hasattr(params, \"standardize_data\") and params.standardize_data:\n if params.data_type == \"mnist\":\n eps = 1e-5\n else:\n eps = None\n dataset[key].images, dataset[key].data_mean, dataset[key].data_std = \\\n dp.standardize_data(dataset[key].images, eps)\n self.data_mean = dataset[key].data_mean\n self.data_std = dataset[key].data_std\n print(\"INFO:preprocessing:Standardized \"+key+\" data\")\n if hasattr(params, \"extract_patches\") and params.extract_patches:\n assert all(key in params.__dict__.keys()\n for key in [\"num_patches\", \"patch_edge_size\", \"overlapping_patches\",\n \"randomize_patches\"]), (\"Insufficient params for patches.\")\n out_shape = (int(params.num_patches), int(params.patch_edge_size),\n int(params.patch_edge_size), dataset[key].num_channels)\n dataset[key].num_examples = out_shape[0]\n dataset[key].reset_counters()\n if hasattr(params, \"patch_variance_threshold\"):\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n params.patch_variance_threshold, dataset[key].rand_state)\n else:\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n var_thresh=0, rand_state=dataset[key].rand_state)\n dataset[key].shape = dataset[key].images.shape\n dataset[key].num_rows = dataset[key].shape[1]\n dataset[key].num_cols = dataset[key].shape[2]\n dataset[key].num_channels = dataset[key].shape[3]\n dataset[key].num_pixels = np.prod(dataset[key].shape[1:])\n print(\"INFO:preprocessing:Extracted patches from \"+key+\" data\")\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\") and params.whiten_method != \"FT\":\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data(dataset[key].images, method=params.whiten_method)\n print(\"INFO:preprocessing:Whitened \"+key+\" data\")\n if hasattr(params, \"norm_data\") and params.norm_data:\n dataset[key].images, dataset[key].data_max = dp.normalize_data_with_max(dataset[key].images)\n self.data_max = dataset[key].data_max\n print(\"INFO:preprocessing:Normalized \"+key+\" data with maximum\")\n if hasattr(params, \"rescale_data\") and params.rescale_data:\n dataset[key].images, dataset[key].data_min, dataset[key].data_max = dp.rescale_data_to_one(dataset[key].images)\n self.data_max = dataset[key].data_max\n self.data_min = dataset[key].data_min\n print(\"INFO:preprocessing:Rescaled each \"+key+\" datapoint to one\")\n if hasattr(params, \"center_data\") and params.center_data:\n dataset[key].images, dataset[key].data_mean = dp.center_data(dataset[key].images,\n use_dataset_mean=True)\n self.data_mean = dataset[key].data_mean\n print(\"INFO:preprocessing:Centered \"+key+\" data\")\n return dataset", "def process(self):\n # check already processed\n proc_dir = os.path.join(self.root, self.processed_dir)\n train_path = os.path.join(proc_dir, self.train_fn)\n test_path = os.path.join(proc_dir, self.test_fn)\n if os.path.exists(train_path) and os.path.exists(test_path):\n # already exists => load process file\n print(\"processed dataset already exists; load it\")\n self.train_data = torch.load(train_path)\n self.test_data = torch.load(test_path)\n return\n\n # read and process raw data\n print(\"read and process raw dataset ...\")\n label_path = os.path.join(self.root, self.raw_dir, \"labels.txt\")\n image_path_format = os.path.join(self.root, self.raw_dir, \"img_{}.png\")\n \n with open(label_path) as f:\n for line in f:\n if not line.strip():\n break\n \n idx, label = map(int, line.strip().split('\\t'))\n image_path = image_path_format.format(idx)\n image = load_image(image_path)\n \n if idx <= self.split:\n self.train_data.append((image, label))\n elif idx > self.split:\n self.test_data.append((image, label))\n\n # write processed file\n if not os.path.exists(proc_dir):\n os.mkdir(proc_dir)\n\n with open(train_path, 'wb') as f:\n torch.save(self.train_data, f)\n with open(test_path, 'wb') as f:\n torch.save(self.test_data, f)\n\n print(\"Done!\")", "def _create_input_data(self):\n SCHEMA = parse_table_schema_from_json(\n '{\"fields\": [{\"name\": \"data\", \"type\": \"BYTES\"}]}')\n\n def format_record(record):\n # Since Synthetic Source returns data as a dictionary, we should skip one\n # of the part\n import base64\n return {'data': base64.b64encode(record[1])}\n\n with TestPipeline() as p:\n ( # pylint: disable=expression-not-assigned\n p\n | 'Produce rows' >> Read(\n SyntheticSource(self.parse_synthetic_source_options()))\n | 'Format' >> Map(format_record)\n | 'Write to BigQuery' >> WriteToBigQuery(\n dataset=self.input_dataset,\n table=self.input_table,\n schema=SCHEMA,\n create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=BigQueryDisposition.WRITE_EMPTY))", "def input_fn():\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=sample_length)\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: tf.parse_single_example(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n return d", "def process_dataset_file(data, label):\n\n # Select only required columns\n data = select_columns(data)\n\n # Colums are divided into features and labels\n data_x, data_y = divide_x_y(data, label)\n data_y = adjust_labels(data_y, label)\n data_y = data_y.astype(int)\n\n # Perform linear interpolation\n data_x = np.array([Series(i).interpolate() for i in data_x.T]).T\n\n # Remaining missing data is replaced with 0\n data_x[np.isnan(data_x)] = 0\n\n # Normalize sensor channels\n data_x = norm(data_x, MAX_THRESHOLDS, MIN_THRESHOLDS)\n\n return data_x, data_y", "def prepare_inputs(data, queries, tokenizer):\n # Prepare inputs\n table = pd.DataFrame.from_dict(data)\n inputs = tokenizer(table=table, queries=queries,truncation=True, padding=True,return_tensors=\"pt\").to(device)\n\n # Return things\n return table, inputs", "def process_raw_data(data_dir='/home/data/nbc/athena/athena-data/'):\n\n # Calls the process_corpus function, defined below\n # process_corpus reads in the text, performs abbreviation, spelling,\n # translation, and overall text Processing\n # process_corpus outputs the processed text for each file and the stemmed file\n for feature_source in ['abstract', 'full']:\n process_corpus(data_dir, feature_source)\n\n # Calls the label_data function, defined below\n # label_data reads in the metadata csv files, concatenates them, then\n # reads in the processed text files\n # label_data outputs a binary pmid by label metadata matrix\n label_data(data_dir)\n generate_gazetteer(data_dir)", "def main(input_filepath, output_filepath):\n # return processed data and save in the output files\n in_data_y, y_output, in_data = make_data_set(input_filepath)\n in_data_y.to_csv(output_filepath)\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n return in_data_y, y_output, in_data", "def _transform_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def input_fn(params):\n #batch_size = params[\"batch_size\"]\n batch_size = FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n if FLAGS.use_horovod:\n d = d.shard(hvd.size(), hvd.rank())\n d = d.repeat()\n d = d.shuffle(buffer_size=100, seed=FLAGS.random_seed)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n return d", "def __data_generation(self, rows):\n samples = np.zeros((rows, self.image_width, self.image_height, self.image_depth))\n targets = np.zeros((rows, self.image_width, self.image_height, self.num_classes))\n for j in range(rows):\n for row1, row2 in zip(self.reader1, self.reader2):\n array_row1 = np.array(row1, dtype=np.float)\n samples[j,:,:,:] = preprocess_feature(array_row1,\n self.image_width, self.image_height, self.image_depth)\n try:\n next(self.reader1)\n except StopIteration:\n print(\"CSV iteration end for feature. Calling 'break'.\")\n break\n\n array_row2 = np.array(row2, dtype=np.int)\n targets[j,:,:,:] = preprocess_label(array_row2,\n self.image_width, self.image_height, self.num_classes)\n try:\n next(self.reader2)\n except StopIteration:\n print(\"CSV iteration end for label. Calling 'break'.\")\n break\n\n return samples, targets", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder,\n )\n )\n\n return d", "def input_fn(is_training, data_dir, reid_data_dir= None,batch_size=32, num_epochs=1):\n dataset = tf.data.Dataset.from_tensor_slices(get_filenames(is_training, data_dir))\n dataset_seg = dataset.flat_map(tf.data.TFRecordDataset)\n\n # dataset_reid = tf.data.Dataset.from_tensor_slices(get_filenames_reid(is_training, reid_data_dir))\n # dataset_reid = dataset_reid.flat_map(tf.data.TFRecordDataset)\n\n\n if is_training:\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes have better performance.\n # is a relatively small dataset, we choose to shuffle the full epoch.\n dataset_seg = dataset_seg.shuffle(buffer_size=_NUM_IMAGES['train'])\n # dataset_reid = dataset_reid.shuffle(buffer_size=30248)\n\n\n dataset_seg = dataset_seg.map(parse_record)\n dataset_seg = dataset_seg.map(lambda image, label: preprocess_image(image, label, is_training))\n dataset_seg = dataset_seg.prefetch(batch_size)\n dataset_seg = dataset_seg.repeat(num_epochs)\n dataset_seg = dataset_seg.batch(batch_size)\n\n # dataset_reid = dataset_reid.map(parse_record_reid)\n # dataset_reid = dataset_reid.map(lambda image, label: preprocess_image_reid(image, label, is_training))\n # dataset_reid = dataset_reid.prefetch(batch_size)\n # dataset_reid = dataset_reid.repeat(num_epochs)\n # dataset_reid = dataset_reid.batch(batch_size)\n\n # iterator = dataset_reid.make_one_shot_iterator()\n # images_reid, label_reid = iterator.get_next()\n\n train_record_file = os.path.join(reid_data_dir, 'train-512-170.tfrecords')\n val_record_file = os.path.join(reid_data_dir, 'val-512-170.tfrecords')\n\n train_images, train_labels = read_records(train_record_file, _HEIGHT, _WIDTH, type='normalization')\n train_images_batch, train_labels_batch = get_batch_images(train_images, train_labels,\n batch_size=batch_size, labels_nums=labels_nums,\n one_hot=True, shuffle=True)\n print(\"reid2222222\", train_images_batch.shape, train_labels_batch.shape)\n val_images, val_labels = read_records(val_record_file, _HEIGHT, _WIDTH, type='normalization')\n val_images_batch, val_labels_batch = get_batch_images(val_images, val_labels,\n batch_size=batch_size, labels_nums=labels_nums,\n one_hot=True, shuffle=False)\n images_reid = train_images_batch\n label_reid = train_labels_batch\n # if is_training:\n # images_reid = train_images_batch\n # label_reid = train_labels_batch\n # else:\n # images_reid = val_images_batch\n # label_reid = val_labels_batch\n iterator = dataset_seg.make_one_shot_iterator()\n images_seg, label_seg = iterator.get_next()\n\n images = {\"seg\": images_seg, \"reid\": images_reid}\n labels = {\"seg\": label_seg, \"reid\": label_reid}\n\n # labels_seg_reid = tf.zeros(shape=[batch_size, labels_nums], dtype=tf.int32)\n # labels_reid_seg = tf.zeros(shape=[batch_size, 512, 170, 1], dtype=tf.int32)\n\n # images = tf.concat([images_seg, images_reid], 0)\n # labels_seg_all = tf.concat([label_seg, labels_reid_seg], 0)\n # labels_reid_all = tf.concat([labels_seg_reid, label_reid], 0)\n # labels = {\"seg\": labels_seg_all, \"reid\": labels_reid_all}\n # batch_out= 1\n\n return images, labels", "def process_input(input):\n # Create numpy array from csv file passed as input in apply()\n if isinstance(input, dict) and \"test_data\" in input and input[\"test_data\"].startswith('data:'):\n zipped_input = input[\"test_data\"]\n input_file = extract_data(zipped_input)\n try:\n np_array = np.genfromtxt(input_file, delimiter=',', skip_header=1)\n # Predict only on the first ten images.\n return np_array[:10]\n except Exception as e:\n raise Exception(\"Could not create numpy array from data\", e)\n else:\n raise Exception('Please provide input of the form {\"test_data\":\"data://YOUR_USERNAME/keras_model/test_keras_data.csv.zip\"}')", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size = 100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record:_decode_record(record, name_to_features),\n batch_size = batch_size,\n drop_remainder=drop_remainder))\n return d", "def _input_fn(input_pipeline_context=None):\n return _create_dataset(options, is_training, input_pipeline_context)", "def _preprocess_input(self, dataset):\n masker = self.masker or dataset.masker\n\n mask_img = masker.mask_img or masker.labels_img\n if isinstance(mask_img, str):\n mask_img = nib.load(mask_img)\n\n # Ensure that protected values are not included among _required_inputs\n assert \"aggressive_mask\" not in self._required_inputs.keys(), \"This is a protected name.\"\n\n if \"aggressive_mask\" in self.inputs_.keys():\n LGR.warning(\"Removing existing 'aggressive_mask' from Estimator.\")\n self.inputs_.pop(\"aggressive_mask\")\n\n # A dictionary to collect masked image data, to be further reduced by the aggressive mask.\n temp_image_inputs = {}\n\n for name, (type_, _) in self._required_inputs.items():\n if type_ == \"image\":\n # If no resampling is requested, check if resampling is required\n if not self.resample:\n check_imgs = {img: nib.load(img) for img in self.inputs_[name]}\n _check_same_fov(**check_imgs, reference_masker=mask_img, raise_error=True)\n imgs = list(check_imgs.values())\n else:\n # resampling will only occur if shape/affines are different\n # making this harmless if all img shapes/affines are the same as the reference\n imgs = [\n resample_to_img(nib.load(img), mask_img, **self._resample_kwargs)\n for img in self.inputs_[name]\n ]\n\n # input to NiFtiLabelsMasker must be 4d\n img4d = concat_imgs(imgs, ensure_ndim=4)\n\n # Mask required input images using either the dataset's mask or the estimator's.\n temp_arr = masker.transform(img4d)\n\n # An intermediate step to mask out bad voxels.\n # Can be dropped once PyMARE is able to handle masked arrays or missing data.\n nonzero_voxels_bool = np.all(temp_arr != 0, axis=0)\n nonnan_voxels_bool = np.all(~np.isnan(temp_arr), axis=0)\n good_voxels_bool = np.logical_and(nonzero_voxels_bool, nonnan_voxels_bool)\n\n data = masker.transform(img4d)\n\n temp_image_inputs[name] = data\n if \"aggressive_mask\" not in self.inputs_.keys():\n self.inputs_[\"aggressive_mask\"] = good_voxels_bool\n else:\n # Remove any voxels that are bad in any image-based inputs\n self.inputs_[\"aggressive_mask\"] = np.logical_or(\n self.inputs_[\"aggressive_mask\"],\n good_voxels_bool,\n )\n\n # Further reduce image-based inputs to remove \"bad\" voxels\n # (voxels with zeros or NaNs in any studies)\n if \"aggressive_mask\" in self.inputs_.keys():\n n_bad_voxels = (\n self.inputs_[\"aggressive_mask\"].size - self.inputs_[\"aggressive_mask\"].sum()\n )\n if n_bad_voxels:\n LGR.warning(\n f\"Masking out {n_bad_voxels} additional voxels. \"\n \"The updated masker is available in the Estimator.masker attribute.\"\n )\n\n for name, raw_masked_data in temp_image_inputs.items():\n self.inputs_[name] = raw_masked_data[:, self.inputs_[\"aggressive_mask\"]]", "def input_fn(params):\n batch_size = self.batch_size\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size)\n return d", "def pre_process(self, dataset):\n\n # np.empty creates an empty array only. You have to replace this with your code.\n X = np.empty((0,0))\n y = np.empty((0))\n\n if dataset == 0:\n # Implement for the abalone dataset\n df = pd.DataFrame(columns=['sex', 'length', 'diameter', 'height', 'whole_weight', 'shucked_weight', 'viscera_weight', 'shell_weight', 'rings'])\n count = 0\n\n with open('Dataset.data') as file: # reading data from file\n data = file.read()\n\n data = data.split('\\n') # split data into different rows\n data = data[:-1] # last one is empty\n for row in data:\n row = row.split()\n df.loc[count] = row # add in dataframe\n count += 1\n\n df['M'] = np.where(df.sex=='M', 1,0) # genders are turned to a one hot encoding\n df['F'] = np.where(df.sex=='F', 1,0)\n df['I'] = np.where(df.sex=='I', 1,0)\n df = df.drop(['sex'], axis=1)\n df = df.dropna()\n\n df = df.sample(frac=1).reset_index(drop=True) # shuffle dataframe\n\n X = df.drop(['rings'], axis=1)\n X = X.values\n X = X.astype(float)\n y = df['rings'].values\n y = y.astype(float)\n\n elif dataset == 1:\n # Implement for the video game dataset\n df = pd.read_csv('VideoGameDataset - Video_Games_Sales_as_at_22_Dec_2016.csv') # read csv directly into a dataframe\n df1 = df[['Critic_Score', 'User_Score', 'Global_Sales']]\n df1 = df1.dropna()\n df1 = df1[df1.User_Score != 'tbd']\n\n df1 = df1.sample(frac=1).reset_index(drop=True) # shuffle rows\n\n X = df1.drop(['Global_Sales'], axis=1)\n X = X.values\n X = X.astype(float)\n y = df1['Global_Sales'].values\n y = y.astype(float)\n\n elif dataset == 2:\n # Implement for the banknote authentication dataset\n df = pd.DataFrame(columns=['variance', 'skewness', 'curtosis', 'entropy', 'class'])\n count = 0\n\n with open('data_banknote_authentication.txt') as file: # reading file \n data = file.read()\n data = data.split('\\n')\n data = data[:-1]\n for row in data:\n row = row.split(',')\n df.loc[count] = [float(elt) for elt in row[:-1]] + [int(row[-1])] # last column has class so it is int rest are float\n count += 1\n\n df = df.sample(frac=1).reset_index(drop=True) # shuffle dataset\n\n X = df.drop(['class'], axis=1)\n X = X.values\n y = df['class'].values\n y = y.astype(int)\n\n return X, y", "def prepare_data_inputs(self, process, inputs, domain, **kwargs):\n temp_process = process.copy()\n\n domains = {}\n\n if domain is not None:\n domains[domain.name] = domain\n\n temp_process.domain = domain\n\n if not isinstance(inputs, (list, tuple)):\n inputs = [inputs, ]\n\n temp_process.inputs.extend(inputs)\n\n if 'gridder' in kwargs:\n temp_process.gridder = kwargs.pop('gridder')\n\n temp_process.add_parameters(**kwargs)\n\n processes, variables = temp_process.collect_input_processes()\n\n # Collect all the domains from nested processes\n for item in list(processes.values()):\n if item.domain is not None and item.domain.name not in domains:\n domains[item.domain.name] = item.domain\n\n variable = json.dumps([x.to_dict() for x in list(variables.values())])\n\n domain = json.dumps([x.to_dict() for x in list(domains.values())])\n\n operation = json.dumps([x.to_dict() for x in list(processes.values())])\n\n return variable, domain, operation", "def eval_input_fn(df):\n fts = df.drop(columns=['class'])\n labs = df.filter(items=['class']).values.astype(int)\n\n features = {k:list(v.values) for k,v in fts.items()}\n features = dict(features)\n x = fts.values\n x = np.array([[x]]).reshape((np.shape(x)[0], np.shape(x)[1], 1, 1))\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices({\"x_ph\":x,\"y_ph\":convert_to_one_hot(labs)})\n \n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).batch(np.shape(x)[0]).repeat()\n # Return the read end of the pipeline.\n return dataset.make_one_shot_iterator().get_next()", "def input_fn(params):\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n batch_size = params[\"train_batch_size\"]\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n else:\n batch_size = params[\"predict_batch_size\"]\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d", "def eval_input_fn(features, labels, batch_size):\n features = dict(features)\n if labels is None:\n # No labels, use only features. (in prediction)\n inputs = features\n else:\n inputs = (features, labels)\n print(inputs)\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def train_input(config, params):\n\n def _grouping(rim, rla, pim, pla, imp, lap):\n # group dataset elements as required by estimator\n features = {\n 'rawimages': rim,\n 'proimages': pim,\n 'rawimagespaths': imp,\n 'rawlabelspaths': lap,\n }\n labels = {\n 'rawlabels': rla,\n 'prolabels': pla,\n }\n\n # next line for distributed debugging\n # tf.string tensors is not supported for DMA read/write to GPUs (TF bug)\n if params.distribute:\n del features['rawimagespaths']\n del features['rawlabelspaths']\n\n return (features, labels)\n\n with tf.variable_scope('input_pipeline'):\n dataset = prebatch_dataset(config, params)\n dataset = dataset.batch(get_temp_Nb(config, params.Nb))\n dataset = postbatch_dataset(dataset, config, params)\n dataset = dataset.map(_grouping, num_parallel_calls=NUM_PARALLEL_CALLS)\n dataset = dataset.prefetch(None)\n\n return dataset", "def process_data(data, labels):\n\t\n\t# Split the dataset of string into train, validation, and test \n\t# Use a 70/15/15 split\n\t# train_test_split shuffles the data before splitting it \n\t# Stratify keeps the proportion of labels the same in each split\n\n\t# -- WRITE THE SPLITTING CODE HERE --\n\t# Split the data into 70 percent train and 30 percent test and validate data\n\ttrain_X, test_X_split, train_Y, test_Y_split = train_test_split(data, labels, test_size=0.30, stratify=labels,random_state= 1)\n\t# Split the remaining 30 percent data into 15 percent test and validate data each\n\ttest_X, val_X, test_Y, val_Y = train_test_split(test_X_split, test_Y_split, test_size=0.50, stratify=test_Y_split, random_state= 1)\n\n\t# Preprocess each dataset of strings into a dataset of feature vectors\n\t# using the CountVectorizer function. \n\t# Note, fit the Vectorizer using the training set only, and then\n\t# transform the validation and test sets.\n\n\t# -- WRITE THE PROCESSING CODE HERE --\n\t# Preprocess dataset using CountVectorizer from ngram range of 1 to 3\n\tvector = CountVectorizer(ngram_range=(1,3))\n\t# Fit data on train dataset\n\ttrain_X = vector.fit_transform(train_X)\n\t# Transform data on test dataset\n\ttest_X = vector.transform(test_X)\n\t# Transform data on validate dataset.\n\tval_X = vector.transform(val_X)\n\t# Return the training, validation, and test set inputs and labels\n\treturn train_X, train_Y, val_X, val_Y, test_X, test_Y\n\t# -- RETURN THE ARRAYS HERE -- ", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d", "def process_raw_data(self):\n \n # Get all participant names, or return straight away if no data was\n # loaded yet.\n if hasattr(self, \"raw\"):\n participants = self.raw.keys()\n participants.sort()\n else:\n self.data = None\n return\n\n # Count the number of participants.\n n = len(participants)\n \n # Find out how many questions there were.\n n_questions = 0\n for i, ppname in enumerate(participants):\n if self.raw[ppname] is None:\n continue\n if len(self.raw[ppname][\"QuestionNumber\"]) > n_questions:\n n_questions = len(self.raw[ppname][\"QuestionNumber\"])\n \n # Define some variables of interest.\n vor = []\n for i in range(n_questions):\n vor.append(\"Q%d_resp\" % (i+1))\n vor.append(\"Q%d_RT\" % (i+1))\n \n # Create a data dict for each variable of interest.\n self.data = {}\n self.data[\"ppname\"] = []\n for var in vor:\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n \n # Loop through all participants.\n for i, ppname in enumerate(participants):\n # Add the participant name.\n self.data[\"ppname\"].append(copy.deepcopy(ppname))\n # Skip empty datasets.\n if self.raw[ppname] is None:\n continue\n # Compute stuff relevant to this task.\n for j, qnr in enumerate(self.raw[ppname][\"QuestionNumber\"]):\n # Split Questionnaire 3, Q13 and Q14 into sub-questions\n if \"Q3\" in self._task_name and int(qnr) in [13,14]:\n # These questions split out into two parts: A description\n # of what each sub-part is, and a Boolean response for\n # each sub-part in the question. Example:\n # \"1_1_1_1_1_0//Television_VideogameConsole(suchas:WiiUPlayStationXboxorNintendoDS)_Tablet(likeanIPad)_Smartphone_LaptoporDesktopComputer_Noneofthese\"\n bool_resp, descr = self.raw[ppname][\"Response\"][j].split(\"//\")\n bool_resp = map(int, bool_resp.split(\"_\"))\n descr = descr.split(\"_\")\n # Store the data in the dict.\n for k, item in enumerate(descr):\n # Clean up the item name.\n if \"(\" in item:\n item = item[:item.find(\"(\")]\n var = \"Q%s_%s_resp\" % (int(qnr), item)\n # Create a new entry in the dict for this variable, if\n # one doesn't exist yet.\n if var not in self.data.keys():\n self.data[var] = numpy.zeros(n, dtype=float) * numpy.NaN\n # Store the data in the dict.\n self.data[var][i] = bool_resp[k]\n # Store response time for the whole item.\n self.data[\"Q%s_RT\" % (int(qnr))][i] = \\\n float(self.raw[ppname][\"TimeEndQuestion\"][j]) \\\n - float(self.raw[ppname][\"TimeStartQuestion\"][j])\n # All other questions are one-question one-response:\n else:\n # Store the data in the dict.\n self.data[\"Q%s_resp\" % (int(qnr))][i] = \\\n float(self.raw[ppname][\"Response\"][j])\n self.data[\"Q%s_RT\" % (int(qnr))][i] = \\\n self.raw[ppname][\"TimeEndQuestion\"][j] \\\n - self.raw[ppname][\"TimeStartQuestion\"][j]", "def apply(self, inputs):\n raise NotImplementedError()", "def input_fn(params):\n # batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=buffer_size)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d", "def process_sample_train(self):\n raise NotImplementedError", "def _pre_process_record(self, data):\n result = []\n symbolic_split = \",\"\n if isinstance(data, dict):\n if self.measure is None:\n logging.error(\"Missing the name of keys pointing to values\")\n raise UnSADException.data_format_exception()\n if self.timestamp is not None:\n if self.timestamp in data:\n try:\n result.append(float(data[self.timestamp]))\n [result.append(data[measure])\n for measure in self.measure]\n except RuntimeError:\n logging.error(\"Invalid input data type, should be a numerical type\")\n logging.error(\"Input data should contain all the fields \"\n \"that are specified when initialize the detector: \" + str(self.measure))\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"Input data should contain a timestamp field:\" + str(self.timestamp))\n raise UnSADException.data_format_exception()\n else:\n try:\n [result.append(data[measure]) for measure in self.measure]\n except RuntimeError:\n logging.error(\"Input data should contain all the fields \"\n \"that are specified when initialize the detector: \" + str(self.measure))\n raise UnSADException.data_format_exception()\n elif isinstance(data, Iterable) and not isinstance(data, str):\n if self.timestamp is not None:\n if len(data) == len(self.measure) + 1:\n try:\n result = list(data)\n result[0] = float(result[0])\n except RuntimeError as e:\n logging.error(\"Invalid input data type, timestamp should be a numerical type\")\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"The number of input parameters:\" + str(\n len(data)) + \" does not match with this detectors:\" + str(len(self.measure) + 1))\n raise UnSADException.input_number_exception()\n else:\n if self.measure is None or len(data) == len(self.measure):\n result = data\n else:\n logging.error(\"The number of input parameters:\" + str(\n len(data)) + \" does not match with this detectors:\" + str(len(self.measure)))\n raise UnSADException.input_number_exception()\n else:\n if (self.measure is None or len(self.measure) == 1) and self.timestamp is None:\n if self.symbolic:\n return str(data)\n else:\n try:\n return float(data)\n except RuntimeError as e:\n logging.error(\"Invalid input data type, should be a numerical type\")\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"This detector is not initialized properly\")\n raise UnSADException.not_proper_initialize_exception()\n\n if not self.symbolic:\n try:\n processed_result = [float(result[i])\n for i in range(len(result))]\n except RuntimeError as e:\n logging.error(\"Invalid input data type, should be a numerical type\")\n raise UnSADException.data_type_exception()\n\n return processed_result[0] if len(processed_result) == 1 else processed_result\n\n else:\n if self.timestamp is not None:\n return [result[0], symbolic_split.join([str(s) for s in result[1:]])]\n else:\n return symbolic_split.join([str(s) for s in result])", "def handle_input(data: dict):", "def input_fn(self, ctx=None):\n dataset = self.make_parsed_dataset(ctx)\n\n # Prefetch overlaps in-feed with training\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n if self.is_training:\n options = tf.data.Options()\n options.experimental_deterministic = False\n dataset = dataset.with_options(options)\n\n return dataset", "def eval_input_fn(features, labels, batch_size):\n features = dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(all_input_ids, \n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d", "def input(self, *input):\n for i in input:\n self._parser.feed(i)", "def eval_input_fn(features, labels, batch_size):\n dataset = tf.data.Dataset.from_tensor_slices(({'features': features}, labels))\n\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size,drop_remainder=True)\n return dataset", "def _data_process(self, v):\n pass", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n inputs = features\n else:\n inputs = (features, labels)\n\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n \n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n return dataset", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.float32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=1000)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d", "def data_prepare(raw_datapath, save_path, sample_size=256):\n ## data path\n data_path = raw_datapath\n ## sample size\n data_size = sample_size\n\n ## data lists\n pts = ['100', '104', '108', '113', '117', '122', '201', '207', '212', '217', '222', '231',\n '101', '105', '109', '114', '118', '123', '202', '208', '213', '219', '223', '232',\n '102', '106', '111', '115', '119', '124', '203', '209', '214', '220', '228', '233',\n '103', '107', '112', '116', '121', '200', '205', '210', '215', '221', '230', '234']\n\n ## map the ~19 classes to 5 classes\n ## according to the paper https://arxiv.org/pdf/1805.00794.pdf\n mapping = {'N': 0, 'L': 0, 'R': 0, 'e': 0, 'j': 0, 'B': 0, # N = 0\n 'A': 1, 'a': 1, 'J': 1, 'S': 1, # S = 1\n 'V': 2, 'E': 2, 'r': 2, 'n': 2, # V = 2\n 'F': 3, # F = 3\n '/': 4, 'f': 4, 'Q': 4, '?': 4} # Q = 4\n ignore = ['+', '!', '[', ']', 'x', '~', '|', '\"']\n\n ## we split the each set of the data into size 256( which we can see the ecg pulse, just one pulse)\n def dataSaver(dataset=pts, data_size=data_size):\n input_size = data_size ## default\n\n def dataprocess():\n ecg = np.zeros((1, input_size))\n label = np.zeros((1, 1))\n for num in tqdm(dataset):\n print(num, 'now')\n idx = 0 ## count for the matrixes\n record = wfdb.rdrecord(data_path + num, smooth_frames=True)\n\n ## normalize the data ecg\n signals0 = np.nan_to_num(record.p_signal[:, 0])\n # signals1 = np.nan_to_num(record.p_signal[:, 1])\n min_max_scaler = preprocessing.MinMaxScaler()\n signals0 = min_max_scaler.fit_transform(signals0.reshape(-1, 1))\n # signals1 = min_max_scaler.fit_transform(signals1.reshape(-1, 1))\n signals0 = signals0.reshape(-1)\n # signals1 = signals1.reshape(-1)\n\n ## find peaks # R-peaks\n ## we only use the channel 0\n peaks, _ = find_peaks(signals0, distance=150)\n\n X = np.zeros((len(peaks), input_size))\n Y = np.zeros((len(peaks), 1))\n\n # skip a first peak to have enough range of the sample\n # in the for loop, we look for the annotation\n for peak in tqdm(peaks[1:-1]):\n start, end = peak - input_size // 2, peak + input_size // 2\n start = max([0, start])\n end = min([len(signals0), end])\n ann = wfdb.rdann(data_path + num, extension='atr', sampfrom=start, sampto=end,\n return_label_elements=['symbol'])\n symbol = ann.symbol\n count = 0\n if len(symbol) != 1:\n for sym in symbol:\n if sym in ignore:\n count += 1\n continue\n elif sym == 'N':\n continue\n else:\n symbol = sym\n break\n if count > 0 and len(symbol) > 1:\n symbol = '+'\n elif len(symbol) > 1:\n symbol = 'N'\n elif len(symbol) == 0:\n symbol = '+'\n assert len(symbol) <= 1, \"the symbol is not only one.{} len\".format(len(symbol))\n\n if len(symbol) == 1:\n for ss in symbol:\n if ss in ignore:\n continue\n else:\n Y[idx, 0] = mapping[ss]\n sig = signals0[start:end]\n X[idx, :len(sig)] = sig\n idx += 1\n ecg = np.concatenate((ecg, X), axis=0)\n label = np.concatenate((label, Y), axis=0)\n ecg = ecg[1:, :]\n label = label[1:, :]\n ecg = pd.DataFrame(ecg)\n label = pd.DataFrame(label)\n\n return ecg, label\n ecg, label = dataprocess()\n return ecg, label\n\n ecg, label = dataSaver(pts)\n ecg_path = save_path + \"/ecg_signal_{}.csv\".format(data_size)\n label_path = save_path + \"/label_{}.csv\".format(data_size)\n ecg.to_csv(ecg_path, index=None, header=None)\n label.to_csv(label_path, index=None, header=None)\n return ecg, label", "def prepare_dataset(fpath):\n raise NotImplementedError", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d", "def run(self, *arg, **kw):\n self.dirty = False\n for port in self.inputs:\n self.get_input_data(port)", "def input_fn(params):\n print(params)\n batch_size = 500\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices(\n {\n \"label_ids\": tf.constant(\n all_label_ids, shape=[num_examples], dtype=tf.int32\n ),\n \"input_ids\": tf.constant(\n all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"input_mask\": tf.constant(\n all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"segment_ids\": tf.constant(\n all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n }\n )\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def fixture_input_data(wrapper_arguments):\n n_features, classes = wrapper_arguments\n num_data = 50\n data = np.random.rand(num_data, n_features)\n labels = np.random.choice(classes, size=num_data)\n\n return data, labels", "def _process_dataset(all_train_img, all_train_label, all_test_img, all_test_label):\n # Read all training and test images and set the correct path\n train_files = tf.io.gfile.listdir(all_train_img)\n test_files = tf.io.gfile.listdir(all_test_img)\n all_train_class_path = [os.path.join(all_train_img, f) for f in train_files]\n all_test_img_path = [os.path.join(all_test_img, f) for f in test_files]\n # Since Labels start at 1, substract -1 for correct indices with starting '0'\n label_np_test = read_labels_txt(all_test_label) - 1\n synsets_np_train = read_labels_mat(all_train_label)\n\n all_train_img_path = []\n label_np_train = []\n for folder in all_train_class_path:\n img_class_files = tf.io.gfile.listdir(folder)\n synset = os.path.basename(os.path.normpath(folder))\n label_train = synsets_np_train.index(synset)\n for f in img_class_files:\n all_train_img_path.append(os.path.join(folder, f))\n label_np_train.append(label_train)\n\n # Create the Datasets for training and test images with corresponding labels\n path_ds_train = tf.data.Dataset.from_tensor_slices((all_train_img_path, label_np_train))\n img_label_ds_train = path_ds_train.map(_process_image)\n path_ds_test = tf.data.Dataset.from_tensor_slices((all_test_img_path, label_np_test))\n img_label_ds_test = path_ds_test.map(_process_image)\n\n print(img_label_ds_train)\n print(img_label_ds_test)\n\n # Check an example image if necessary\n # example, = img_label_ds_test.take(1)\n for i in range(5):\n example, = img_label_ds_train.take(1)\n image, label = example[0], example[1]\n plt.figure(i)\n if image.shape[2] == 1:\n plt.imshow(tf.squeeze(image), cmap='gray')\n else:\n plt.imshow(image/255)\n print(\"Label: {}\".format(label.numpy()))\n plt.show()\n\n return img_label_ds_train, img_label_ds_test", "def input_fn(data_file, num_epochs, shuffle, batch_size):\n assert tf.gfile.Exists(data_file), (\n '%s not found. Please make sure you have either run data_download.py or '\n 'set both arguments --train_data and --test_data.' % data_file)\n\n def parse_csv(value):\n print('Parsing', data_file)\n columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)\n features = dict(zip(_CSV_COLUMNS, columns))\n\n labels = features.pop('price')\n return features, tf.equal(labels, '>1000')\n\n # Extract lines from input files using the Dataset API.\n\n dataset = tf.data.TextLineDataset(data_file)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])\n\n dataset = dataset.map(parse_csv, num_parallel_calls=5)\n\n # We call repeat after shuffling, rather than before, to prevent separate\n # epochs from blending together.\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n return dataset", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples, len(LABEL_COLUMNS)], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d", "def dataset_parser(self, value):\n pass", "def get_inputs_(self, batch, **kwargs):\n raise NotImplementedError", "def process(self, input, is_processed=False):\n raise NotImplementedError", "def prepare_data(qids_raw, conditions_raw, outputs_raw):\n\n qids = []\n conditions = []\n outputs = []\n dictionaries_standardization = []\n for qid_raw, condition_raw, output_raw in zip(qids_raw, conditions_raw, outputs_raw):\n qid, condition, output, dictionary = preprocess_sample(qid_raw, condition_raw, output_raw)\n qids.append(qid)\n conditions.append(condition)\n outputs.append(output)\n dictionaries_standardization.append(dictionary)\n\n return qids, conditions, outputs, dictionaries_standardization", "def __init__(self, dataset_name, split, *args, data_dir=None, **kwargs):\n super(TfdsInput, self).__init__(*args, **kwargs)\n self.dataset_name = dataset_name\n self.split = split\n self.data_dir = data_dir", "def eval_input_fn(features, labels, batch_size):\n features = dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n return dataset", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def build_input_pipeline(data_dir):\n votes = np.load(os.path.join(data_dir, \"votes.npy\"))\n bill_indices = np.load(os.path.join(data_dir, \"bill_indices.npy\"))\n senator_indices = np.load(os.path.join(data_dir, \"senator_indices.npy\"))\n senator_map = np.loadtxt(os.path.join(data_dir, \"senator_map.txt\"),\n dtype=str, \n delimiter=\"\\n\")\n num_bills = len(np.unique(bill_indices))\n num_senators = len(senator_map)\n dataset_size = len(votes)\n dataset = tf.data.Dataset.from_tensor_slices(\n (votes, bill_indices, senator_indices))\n # Use the complete dataset as a batch.\n batch_size = len(votes)\n batches = dataset.repeat().batch(batch_size).prefetch(batch_size)\n iterator = batches.make_one_shot_iterator()\n return iterator, senator_map, num_bills, num_senators, dataset_size", "def preprocess(self, data):\n (w,h,f) = self.rawinputformat()\n dt = numpy.dtype(numpy.uint8)\n nb = numpy.frombuffer(data,dt,-1,0)\n actual_stream_width = (w&1)+w # hack, rather get this from the app sink\n if(actual_stream_width != self.reqsize):\n nb = nb.reshape(h,actual_stream_width,3)\n nb = nb[0:h,0:w,0:3] # crop to network input size\n else:\n nb = nb.reshape((actual_stream_width,actual_stream_width,3))\n img = nb.astype('float32')\n #Preprocess image\n #for i in range(3):\n # img[:,:,i] = (img[:,:,i] - self.mean[i]) * self.std[i]\n #img = resize(img/255.0,(w,h),1)\n img = img/255.0\n print(img.shape)\n #print(img[0,0,:])\n return img.astype(numpy.float16)" ]
[ "0.72874445", "0.6668611", "0.64020455", "0.63785404", "0.63785404", "0.6376394", "0.6374364", "0.62511826", "0.6218678", "0.6197006", "0.61118746", "0.6104894", "0.6071575", "0.60514355", "0.60514355", "0.6045758", "0.6040936", "0.6020063", "0.6003791", "0.59921783", "0.598605", "0.5952605", "0.59506273", "0.5945905", "0.5936768", "0.5934657", "0.5914531", "0.58944803", "0.5891792", "0.5887318", "0.58855903", "0.5867118", "0.5859839", "0.58450073", "0.58233666", "0.58180195", "0.58133614", "0.5800514", "0.5789022", "0.57700056", "0.5767638", "0.5758881", "0.57548803", "0.57466483", "0.5739512", "0.5738125", "0.573181", "0.5725655", "0.57246035", "0.57143587", "0.571418", "0.5713456", "0.5709923", "0.5708881", "0.57040393", "0.5703052", "0.56917286", "0.5690987", "0.5687438", "0.5683868", "0.5655439", "0.56469303", "0.5643464", "0.5631215", "0.5625694", "0.5621366", "0.5590959", "0.5590138", "0.55805665", "0.55760646", "0.5571461", "0.5568745", "0.5566184", "0.5566184", "0.5566184", "0.5566112", "0.5559821", "0.5559316", "0.55575424", "0.5556808", "0.55552506", "0.55494404", "0.5549304", "0.5546999", "0.5545559", "0.55374825", "0.5537425", "0.5534549", "0.5527901", "0.5524191", "0.55212986", "0.55212295", "0.55194134", "0.5506975", "0.5502541", "0.550205", "0.54977155", "0.54976094", "0.54908144", "0.5488153", "0.5486309" ]
0.0
-1
Make sure the tables are dropped.
def test_drop_tables(self): self.assertEqual(Manager.table_exists().run_sync(), True) self.assertEqual(Band.table_exists().run_sync(), True) drop_tables(Manager, Band) self.assertEqual(Manager.table_exists().run_sync(), False) self.assertEqual(Band.table_exists().run_sync(), False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop(self):\n self.__init__()\n cursor = self.connection.cursor()\n cursor.execute(drop_tables)\n queries = cursor.fetchall()\n for i in queries:\n cursor.execute(i[0])\n\n self.commit()\n self.__init__()", "def drop_tables():\n drop_table(ShoppingList)\n drop_table(User)\n drop_table(Category)", "def drop(self):\n c = self.cursor()\n for table in ['experiment','fact']:\n c.execute(\"drop table if exists {}\".format(table))\n self.commit()", "def drop_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"DROP TABLE tweets\")\n conn.execute(\"DROP TABLE tweet_peaks\")", "def drop_tables(self):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\")\n cursor.close()\n con.commit()\n con.close()", "def drop_database_tables(cls):\n cursor = Database.connect_to_db()\n # drop users table\n sql_command = \"\"\" DROP TABLE IF EXISTS users CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop parties table\n sql_command = \"\"\" DROP TABLE IF EXISTS parties CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop offices table\n sql_command = \"\"\" DROP TABLE IF EXISTS offices CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop candidates table\n sql_command = \"\"\" DROP TABLE IF EXISTS candidates CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop votes table\n sql_command = \"\"\" DROP TABLE IF EXISTS votes CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop applications table\n sql_command = \"\"\" DROP TABLE IF EXISTS applications CASCADE;\n \"\"\"\n cursor.execute(sql_command)", "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def drop_tables() -> None:\n print(\"Dropping database tables using SQLAlchemy ORM\")\n Base.metadata.drop_all(engine)\n print(\"Done dropping tables\")", "def clean_up():\n drop_all_tables()\n create_all()", "def cleanup(self):\n for table in filter(lambda x: self.cmd.exists(x, silent=(log.level < DEBUG)), self.tables):\n log.info(\"MLoad\", \"Dropping table '{}'...\".format(table))\n self.cmd.drop_table(table, silent=True)", "def drop_all_tables():\n\tcommon_db.drop_all_tables()", "def _drop_tables(self, tables):\n cursor = self.conn.cursor()\n try:\n cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))\n except:\n pass\n finally:\n cursor.close()", "def drop(self):\n cursor = self.connect.create_cursor()\n queries = (\n (\"USE dbPurBeurre\"),\n (\"SET foreign_key_checks = 0\"),\n (\"DROP TABLE IF EXISTS Asso_Prod_Cat\"),\n (\"DROP TABLE IF EXISTS Categories\"),\n (\"DROP TABLE IF EXISTS Produits\")\n )\n\n for query in queries:\n cursor.execute(query)", "def refresh_tables(db):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"DROP TABLE waiting\")\r\n c.execute(\"DROP TABLE help\")\r\n c.execute(\"DROP TABLE helped\")\r\n create_tables()\r\n except Error as e:\r\n print(e)", "def test_table_drop(app, runner):\n result = runner.invoke(drop_tables, input=\"y\")\n\n with app.app_context():\n assert not db.engine.has_table('link')\n assert not db.engine.has_table('user')", "def delete_all_tables(self):\n\t\tif self.__dbfile is not None:\n\t\t\tfor table_name in list(LocalData.table_info.keys()):\n\t\t\t\tif self.table_exists(table_name):\n\t\t\t\t\tself._conn.execute(\"DROP TABLE %s\" % table_name)\n\t\t\tself._conn.commit()", "def tearDown(self):\n drop_all_tables()\n create_all()", "def tearDown(self):\n self.database.truncate_all_tables()", "def drop_tables (cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def tearDown(self):\n\n InitializeDb('TEST_DATABASE_URI').drop_tables()", "def clean_db():\n db = get_db()\n tables = db.tables\n for table in tables:\n db[table].drop()", "def tearDown(self):\r\n\r\n db.session.rollback()\r\n db.drop_all()", "def drop_tables():\n commands = (\n \"\"\"\n DROP TABLE utilizador_partilha CASCADE\n \"\"\",\n \"\"\" \n DROP TABLE album CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE compositores CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE grupo CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE letras_musica CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE playlist CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE editora CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE criticas CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE genero_musical CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE utilizador_partilha_criticas CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE utilizador_partilha_playlist CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE album_genero_musical CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE letras_musica_playlist CASCADE \n \"\"\")\n\n try:\n\n conn = psycopg2.connect(host=\"localhost\",database=\"SoundBox\", user=\"postgres\", password=\"postgres\")\n cur = conn.cursor()\n # DROP table one by one\n for command in commands:\n cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn) -> None:\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()\n print('All tables dropped.')", "def drop_tables(cur, conn): \n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_db(self) -> None:\n try:\n if not self._check_delete_okay():\n return\n except DatabaseWriteException as e:\n raise e\n\n existing_tables = self.list_tables()\n for table_name in existing_tables:\n self.dynamodb.Table(table_name).delete()", "def drop_tables(session):\n for query in drop_table_queries:\n session.execute(query)", "def drop_tables(cur, conn):\n \n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def _drop_tables(self):\n logging.info(\"Dropping all tables from the database!\")\n db_conn = self.engine.connect()\n query_result = list()\n query_result.append(db_conn.execute(\n \"DROP SCHEMA public CASCADE;CREATE SCHEMA public;\"))\n\n if self.database_choice == 'remote_database' or self.database_choice \\\n == 'remote_database_master':\n query_result.append(db_conn.execute('''\n GRANT ALL PRIVILEGES ON SCHEMA public TO housingcrud;\n GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO housingcrud;\n GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO housingcrud;\n GRANT ALL ON SCHEMA public TO public;\n '''))\n return query_result", "def _cleanup_object_tables(self, engine, metadata):\n tables = metadata.tables\n tables_to_drop = [\n table\n for tablename, table in tables.items()\n if not tablename.endswith(\"sf_ids\")\n ]\n if tables_to_drop:\n metadata.drop_all(tables=tables_to_drop)", "def drop_tables(session):\n\n for query in drop_table_queries:\n session.execute(query)", "def tearDown(self):\n\n db.session.rollback()\n db.session.remove()\n db.drop_all()", "def drop_tables(db_config):\n tables = [\"users\", \"incidents\", \"images\", \"videos\",\n \"images\", \"location\" \"login\"]\n try:\n conn = connect(db_config)\n cursor = conn.cursor()\n for table in tables:\n query = \"DROP TABLE IF EXISTS {} CASCADE;\".format(table)\n cursor.execute(query)\n conn.commit()\n # print('Table {} deleted'.format(tables), '\\n')\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"Warning: Table Deletion Error\", error)", "def dropTables(t=None):\n tablelist = tables.keys if t == None else [t]\n conn = getConnection()\n try:\n cur = conn.cursor()\n for table in tables.keys():\n query = \"DROP TABLE IF EXISTS %s;\" % table\n cur.execute(query)\n conn.commit()\n except Exception as ex:\n print(\"Failed to drop tables:\" )\n print(ex)\n sys.exit(1)", "def droptables(db, cursor):\n cursor.execute('''DROP TABLE IF EXISTS worlds;''')\n cursor.execute('''DROP TABLE IF EXISTS characters''')\n cursor.execute('''DROP TABLE IF EXISTS chardata''')\n db.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n except Exception as e:\n print(e)", "def drop():\n if prompt_bool(\"Are you sure you want to lose all your data\"):\n db.drop_all()\n db.engine.execute(\"drop table if exists alembic_version\")", "def tearDown(self):\n db.session.commit()\n db.drop_all()", "def drop_tables(self, table):\n drop_table = \"DROP TABLE IF EXISTS {} CASCADE;\".format(table)\n self.cursor.execute(drop_table)", "def tearDown(self):\n db.drop_all()", "def tearDown(self):\n db.drop_all()", "def _drop_db(keep_tables=None):\n server.db.session.remove()\n if keep_tables is None:\n keep_tables = []\n meta = server.db.metadata\n for table in reversed(meta.sorted_tables):\n if table.name in keep_tables:\n continue\n server.db.session.execute(table.delete())\n server.db.session.commit()", "def drop_fact_tables(cur,conn):\n\tfor query in drop_fact_queries:\n\t\tcur.execute(query)\n\t\tconn.commit()", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\", force_drop=True)\n cat.drop_table(\"batting\", force_drop=True)\n cat.drop_table(\"teams\", force_drop=True)", "def tearDown(self):\n self.db.drop_all()\n pass", "def tearDown(self):\n\n # Remove all tables from test db\n db.session.remove()\n db.drop_all()", "def drop_tables(session):\n \n for query in drop_table_queries:\n try:\n rows = session.execute(query)\n except Exception as e:\n print(e)", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()\n print(f\"\\nRunning: {query}\")", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()\n db.create_all()", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "def drop():\n if click.confirm(\"Are you sure you want to lose all your data\"):\n db.drop_all()", "def close(self):\n cur = self.cursor()\n for table in self.temp_tables:\n cur.execute('drop table %s' % table)\n\n super(_MockConnection, self).close()", "def tearDown(self):\n QuestionerDB.destroy_tables()", "def drop_table_if_exists():\n drop_users_table = \"\"\"\n DROP TABLE IF EXISTS users CASCADE\"\"\"\n drop_parties_table = \"\"\"\n DROP TABLE IF EXISTS parties CASCADE\"\"\"\n drop_offices_table = \"\"\"\n DROP TABLE IF EXISTS offices CASCADE\"\"\"\n drop_candidates_table = \"\"\"\n DROP TABLE IF EXISTS candidates CASCADE\"\"\"\n\n drop_voters_table = \"\"\"\n DROP TABLE IF EXISTS votes CASCADE\"\"\"\n return [drop_users_table, drop_parties_table, drop_offices_table,\n drop_candidates_table, drop_voters_table]", "def drop_data():\n\n tables = ['customers', 'products', 'rentals']\n with DropDataUnitTesting.mongo:\n norton_db = DropDataUnitTesting.mongo.connection.UnitTestNortonDB\n try:\n for table in tables:\n drop_table = norton_db[table]\n drop_table.drop()\n result = 'table drop success'\n except OperationFailure as e:\n logger.error('mongo drop table error %s', e, exc_info=True)\n result = 'mongo drop table error {}'.format(e)\n print(result)", "def teardown_db():\n engine = config['tg.app_globals'].sa_engine\n connection = engine.connect()\n\n # INFO - D.A. - 2014-12-04\n # Recipe taken from bitbucket:\n # https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/DropEverything\n\n inspector = reflection.Inspector.from_engine(engine)\n metadata = MetaData()\n\n tbs = []\n all_fks = []\n views = []\n\n # INFO - D.A. - 2014-12-04\n # Sequences are hard defined here because SQLA does not allow to reflect them from existing schema\n seqs = [\n Sequence('seq__groups__group_id'),\n Sequence('seq__contents__content_id'),\n Sequence('seq__content_revisions__revision_id'),\n Sequence('seq__permissions__permission_id'),\n Sequence('seq__users__user_id'),\n Sequence('seq__workspaces__workspace_id')\n ]\n\n for view_name in inspector.get_view_names():\n v = Table(view_name,metadata)\n views.append(v)\n\n for table_name in inspector.get_table_names():\n\n fks = []\n for fk in inspector.get_foreign_keys(table_name):\n if not fk['name']:\n continue\n fks.append(\n ForeignKeyConstraint((),(),name=fk['name'])\n )\n t = Table(table_name,metadata,*fks)\n tbs.append(t)\n all_fks.extend(fks)\n\n if not config['sqlalchemy.url'].startswith('sqlite'):\n for fkc in all_fks:\n connection.execute(DropConstraint(fkc))\n\n for view in views:\n drop_statement = 'DROP VIEW {}'.format(view.name)\n # engine.execute(drop_statement)\n connection.execute(drop_statement)\n\n for table in tbs:\n connection.execute(DropTable(table))\n\n\n for sequence in seqs:\n try:\n connection.execute(DropSequence(sequence))\n except Exception as e:\n logger.debug(teardown_db, 'Exception while trying to remove sequence {}'.format(sequence.name))\n\n transaction.commit()\n connection.close()\n engine.dispose()", "def drop_all_tables(args):\n engine = sqlalchemy.create_engine(CONFIG.db_uri)\n print(\"Dropping all tables on {}...\".format(CONFIG.db_uri), end=\" \")\n Base.metadata.drop_all(bind=engine)\n print(\"finished.\")", "def test_drop_table(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n # make sure table exists\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n\n tracker.drop_table()\n\n with pytest.raises(ProgrammingError):\n tracker.drop_table()", "def bulk_drop_table(cursor, t_name_list):\n for t_name in t_name_list:\n try:\n print(f\"Dropping table '{t_name}'...\")\n cursor.execute(f'DROP TABLE {t_name}')\n except mysql.connector.Error as err:\n if err.errno == 1051:\n print(f\"Table '{t_name}' DNE, moving on...\")\n pass\n else:\n print(str(err.errno) + \": \" + err.msg+\".\")\n exit(1)\n else:\n print(\"OK\")\n return None", "def drop(drop_all=False):\n\n engine = current_app.extensions['meowth_dbutils'].db.engine\n if current_app.extensions['meowth_dbutils'].metadata.bind is None:\n current_app.extensions['meowth_dbutils'].metadata.bind = engine\n with perform(\n name='dbutils drop',\n before='Dropping all project tables',\n fail='Error occured while droping project tables',\n ):\n current_app.extensions['meowth_dbutils'].metadata.drop_all()\n with perform(\n name='dbutils drop',\n before='Dropping alembic versioning table',\n fail='Error occured while dropping alembic table',\n ):\n engine.execute('drop table if exists alembic_version')\n if drop_all:\n with perform(\n name='dbutils drop',\n before='Dropping all other tables in database',\n fail='Error occured while dropping other tables',\n ):\n current_app.extensions['meowth_dbutils'].db.reflect()\n current_app.extensions['meowth_dbutils'].db.drop_all()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n except psycopg2.Error as e:\n print(\"Error: Could not drop table from query: {}\".format(query))\n print(e)", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\")\n cat.drop_table(\"batting\")\n cat.drop_table(\"teams\")", "def drop_old_iceqube_tables():\n queue.storage.recreate_tables()", "def rollback(self): \r\n if self._closed:\r\n raise Error('The connection to the database has been closed.')\r\n for table in self.tables.keys():\r\n if self.tables[table].open:\r\n self.tables[table].rollback()\r\n for table in self.createdTables:\r\n if self.tables.has_key(table):\r\n if self.tables[table].open:\r\n self.tables[table]._close()\r\n del self.tables[table]\r\n for end in self.tableExtensions:\r\n if os.path.exists(self.database+os.sep+table+end):\r\n os.remove(self.database+os.sep+table+end)\r\n self.createdTables = []", "def tearDown(self):\n with app.app_context():\n db = app.db.get_db()\n cur = db.cursor()\n with app.open_resource('sql/drop_tests.sql', mode='r') as f:\n cur.execute(f.read())\n db.commit()\n cur.close()\n db.close()", "def drop_table(self, tablename):\n # print \"table dropped\"\n # return\n query = 'drop table ' + tablename\n try:\n self.__cur.execute(query)\n self.__conn.commit()\n except Exception as e:\n self.__conn.rollback()\n raise e", "def clean_db():\n yield\n logging.info(\"Delete table\")\n db.delete_table(\"TestRules\")", "def clear_db():\n for name in TABLES:\n result = execute_query('truncate table {};'.format(name)), ())", "def clean_database(query_root, host, port, db_name, user, password):\n try:\n conn = PGDB(host, port, db_name, user, password)\n try:\n for table in TABLES:\n conn.executeQuery(\"DROP TABLE IF EXISTS %s \" % table)\n except Exception as e:\n print(\"unable to remove existing tables. %s\" % e)\n return 1\n print(\"dropped existing tables\")\n conn.commit()\n conn.close()\n return 0\n except Exception as e:\n print(\"unable to connect to the database. %s\" % e)\n return 1", "def test_drop_table(self):\n schema: t.List[DiffableTable] = []\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(class_name=\"Band\", tablename=\"band\", columns=[])\n ]\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.drop_tables.statements) == 1)\n self.assertEqual(\n schema_differ.drop_tables.statements[0],\n \"manager.drop_table(class_name='Band', tablename='band')\",\n )", "def tearDown(self):\n #db.session.remove()\n db.drop_all()", "def tearDown(self):\n db.session.close()\n db.drop_all()", "def tearDown(self) -> None:\n self.things.db.session.remove() # type: ignore\n self.things.db.drop_all() # type: ignore", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()", "def tearDown(self):\n\n db.session.close()\n db.drop_all()" ]
[ "0.83170086", "0.824251", "0.8148625", "0.80837333", "0.80614614", "0.80125594", "0.80077255", "0.798122", "0.79612356", "0.78991616", "0.7891649", "0.7845138", "0.775593", "0.77060443", "0.76956725", "0.7687249", "0.7681075", "0.76451164", "0.7631582", "0.76200426", "0.7605881", "0.7605537", "0.759196", "0.75888956", "0.75888956", "0.75888956", "0.75888956", "0.75888956", "0.75888956", "0.75888956", "0.75888956", "0.7573289", "0.7565544", "0.7540803", "0.7502063", "0.75020516", "0.74999297", "0.7497219", "0.7480593", "0.74298775", "0.7429059", "0.7424597", "0.7421051", "0.74142367", "0.74129", "0.7402577", "0.73902994", "0.7379906", "0.73574674", "0.73574674", "0.7299153", "0.7291836", "0.7286066", "0.72841215", "0.7283382", "0.7261714", "0.72348326", "0.72245914", "0.72231", "0.72231", "0.72231", "0.72231", "0.7220375", "0.71989", "0.71971804", "0.7190704", "0.7164881", "0.7151378", "0.71513283", "0.7145372", "0.71404594", "0.71176153", "0.71078014", "0.70833546", "0.7072865", "0.7065505", "0.7058092", "0.7051259", "0.7027673", "0.70253646", "0.7017927", "0.701709", "0.700745", "0.7006997", "0.6996553", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392", "0.6994392" ]
0.7902523
9
Load multiple datasets (simultaneously)
def processing_handler( datasets: list, load: Callable[[dict], None], cores: int, threads: int ) -> None: # Data output output = [] # Multi-core processing if cores > 1 and len(datasets) > 1: # Create process pool with Pool(cores) as pool: # Process datasets in pool output = pool.starmap(load, datasets) # Wait for Pool to finish pool.close() pool.join() # Multi-thread processing elif threads > 1 and len(datasets) > 1: # Create process pool with ThreadPool(threads) as pool: # Process datasets in pool output = pool.starmap(load, datasets) # Wait for Pool to finish pool.close() pool.join() # Single-thread processing else: for dataset in datasets: output.append(load(*dataset)) # Remove empty DataFrames filtered = list(filter(lambda df: df.index.size > 0, output)) return pd.concat(filtered) if len(filtered) > 0 else output[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_datasets():\n idx, data_paths, data_names, desc_paths, descrips, sql_paths, \\\n sql_names, loaded, table_size, \\\n loaded_names = mgr.build_datasets_table()\n return render_template('load_datasets.html',\n zip=zip(idx, data_paths, data_names, desc_paths,\n descrips, sql_paths, sql_names, loaded,\n table_size),\n data_names=loaded_names)", "def load_all_data() -> Tuple[pd.DataFrame, ...]:\n return tuple(\n pd.read_csv(path, sep='\\t') for path in (TARGETS_PATH, USER_INFO_PATH, INTERACTIONS_PATH, TRACK_INFO_PATH))", "def importAllDatasets(directory):\n head_index = findIndex(temp_list, \"Gaze\")\n point_index = findIndex(temp_list, \"Point\")\n grab_index = findIndex(temp_list, \"Grab\")\n pos_index = findIndex(temp_list, \"Position\")\n\n head_data = pd.read_csv(temp_list[head_index]) if head_index != None else None\n point_data = pd.read_csv(temp_list[point_index]) if point_index != None else None\n grab_data = pd.read_csv(temp_list[grab_index]) if grab_index != None else None\n pos_data = pd.read_csv(temp_list[pos_index]) if pos_index != None else None\n\n\n return head_data, point_data, grab_data, pos_data", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_train = np.load(\"data/X_train.npy\")\n\t\t\t\tX_val = np.load(\"data/X_val.npy\")\n\t\t\t\tY_train = np.load(\"data/Y_train.npy\")\n\t\t\t\tY_val = np.load(\"data/Y_val.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tdata_temp = np.zeros((50000,64,64,3))\n\t\t\t\tlabel_temp = []\n\n\t\t\t\tfor i in range(5):\n\n\t\t\t\t\tfile = path + str(i+1)\n\t\t\t\t\twith open(file, 'rb') as fo:\n\t\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\t\tlabel_temp.extend(temp_element[b'labels'])\n\n\t\t\t\t\tfor j in range(10000):\n\t\t\t\t\t\tdata_temp[j+(i*10000)] = self._reshape(temp_data[j])\n\n\t\t\t\tlabel_temp = np.eye(10)[np.array(label_temp)]\n\n\t\t\t\tnp.random.seed(123)\n\t\t\t\tpermutations = list(np.random.permutation(50000))\n\t\t\t\tX = data_temp[permutations, :, : , :] \n\t\t\t\tY = label_temp[permutations, :]\n\t\t\t\tX_train = X[0:40000, :, :, :] \n\t\t\t\tY_train = Y[0:40000, :]\n\t\t\t\tX_val = X[40000:50000, :, :, :] \n\t\t\t\tY_val = Y[40000:50000, :]\n\n\t\t\t\tnp.save(\"./data/X_train\", X_train)\n\t\t\t\tnp.save(\"./data/X_val\", X_val)\n\t\t\t\tnp.save(\"./data/Y_train\", Y_train)\n\t\t\t\tnp.save(\"./data/Y_val\", Y_val)\n\t\t\t\tbreak\n\n\t\treturn X_train, X_val, Y_train, Y_val", "def load_all_data(files):\n return pd.concat([load_file(file) for file in files], ignore_index=True)", "def data_loaders(dataset_path):\n dataset_path = dataset_path\n news_stock_dataset = NewsStockDataLoader(dataset_path)\n \n dataset_size = len(news_stock_dataset)\n indices = list(range(dataset_size))\n training_split = int(0.8 * dataset_size)\n validation_split = int(0.9 * dataset_size)\n\n np.random.seed(96)\n np.random.shuffle(indices)\n\n train_indices = indices[:training_split]\n valid_indices = indices[training_split:validation_split]\n test_indices = indices[validation_split:]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(valid_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n \n collate = PadSequence()\n\n training_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"training_batch_size\"),\n sampler = train_sampler,\n collate_fn = collate)\n\n validation_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"validation_batch_size\"),\n sampler = valid_sampler,\n collate_fn = collate)\n\n testing_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"testing_batch_size\"),\n sampler= test_sampler,\n collate_fn = collate)\n \n return training_loader, validation_loader, testing_loader", "def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)", "def load_all(self, dir_path):\n # each file name corresponds to another date\n input_paths = [os.path.join(dir_path, f) for f in os.listdir(dir_path)\n if os.path.isfile(os.path.join(dir_path, f)) and f.endswith('.csv')]\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(HDD_data.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(HDD_data.load_single(path) for path in input_paths)\n\n return all_df", "def __loadDataset(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'dataset':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n\n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"undefined:/%s\" % doc.getRaw()\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n \n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"local-tests:/%s\" % doc.getRaw()\n else:\n pass", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def load_datasets(self):\n if self.processed_extension == '.npz':\n logger.info(f'Loading sets from npz:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = sparse.load_npz(self.train_path)\n\n logger.info(f'val: {self.val_path}')\n self.val_data = sparse.load_npz(self.val_path)\n\n logger.info(f'test: {self.test_path}')\n self.test_data = sparse.load_npz(self.test_path)\n \n # Split x and y\n self.train_data = [sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,-1])]\n \n self.val_data = [sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,-1])]\n \n self.test_data = [sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,-1])]\n \n elif self.processed_extension == '.csv':\n logger.info(f'Loading sets from csv:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = pd.read_csv(self.train_path)\n train_cols = self.train_data.columns\n self.train_data = [self.train_data[train_cols.difference(['TARGET'])],\n self.train_data['TARGET']]\n \n logger.info(f'val: {self.val_path}')\n self.val_data = pd.read_csv(self.val_path)\n self.val_data = [self.val_data[train_cols.difference(['TARGET'])],\n self.val_data['TARGET']]\n \n logger.info(f'test: {self.test_path}')\n self.test_data = pd.read_csv(self.test_path)\n self.test_data = [self.test_data[train_cols.difference(['TARGET'])],\n self.test_data['TARGET']]\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n self.n_train = self.train_data[0].shape[0]\n self.n_val = self.val_data[0].shape[0]\n self.n_test = self.test_data[0].shape[0]\n self.input_size = self.train_data[0].shape[1]\n self.n_examples = self.n_train + self.n_val + self.n_test\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def data_loaders(args):\n\n transform = transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(),\n lambda image: (image - 0.5) * 2\n ])\n\n train_mnist = datasets.MNIST(\n root=args.database_root,\n train=True,\n download=True,\n transform=transform\n )\n train_loader = DataLoader(\n dataset=train_mnist,\n batch_size=args.train_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n test_mnist = datasets.MNIST(\n root=args.database_root,\n train=False,\n download=True,\n transform=transform\n )\n test_loader = DataLoader(\n dataset=test_mnist,\n batch_size=args.test_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n return train_loader, test_loader", "def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data", "def datasets(self):\n pass", "def load_data(root, num_seen, batch_size, num_workers):\n CIFAR10.init(root, num_seen)\n query_dataset = CIFAR10('query', transform=query_transform())\n seen_dataset = CIFAR10('seen', transform=train_transform())\n unseen_dataset = CIFAR10('unseen', transform=train_transform())\n retrieval_dataset = CIFAR10('retrieval', transform=train_transform())\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n seen_dataloader = DataLoader(\n seen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n unseen_dataloader = DataLoader(\n unseen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n retrieval_dataloader = DataLoader(\n retrieval_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n return query_dataloader, seen_dataloader, unseen_dataloader, retrieval_dataloader", "def getDatasets(self, dirname, dataset_list):\r\n \r\n files = self.loadDirectory(dirname)\r\n \r\n result = []\r\n for dataset_name in dataset_list:\r\n arr = np.concatenate([f[dataset_name] for f in files])\r\n result.append(arr)\r\n \r\n return result", "def download_all_datasets():\n print(\"Downloading all datasets ...\")\n for dataset in get_available_datasets():\n download_dataset(dataset)", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def load_raw_datasets():\n ds_train_raw = (\n tf.data.TextLineDataset(filenames=[train_data_path])\n .map(split_line, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n .shuffle(buffer_size=1000)\n .batch(BATCH_SIZE)\n .prefetch(tf.data.experimental.AUTOTUNE)\n )\n\n ds_test_raw = (\n tf.data.TextLineDataset(filenames=[test_data_path])\n .map(split_line, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n .batch(BATCH_SIZE)\n .prefetch(tf.data.experimental.AUTOTUNE)\n )\n return ds_train_raw, ds_test_raw", "def load_dataset(self, split, combine=False, **kwargs):\r\n data_json_path = os.path.join(self.args.data, \"{}.json\".format(split))\r\n self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)", "def load_datasets(path_sets, path_images):\n dataset_files = tuple(path_set_file.name \n for path_set_file in path_sets.glob('*.csv'))\n\n set_names = [dataset_file[: dataset_file.find('_')]\n for dataset_file in dataset_files]\n \n if len(dataset_files) == 3:\n name_order = ['training', 'validation', 'test']\n set_order = tuple(dataset_files.index(f'{name}_set.csv')\n for name in name_order)\n num_sets = 3\n else:\n training_index = dataset_files.index('training_set.csv')\n set_order = (training_index, 1 - training_index)\n num_sets = 2\n\n images_and_labels = [None] * num_sets * 2\n \n for k in range(num_sets):\n path_dataset_file = path_sets.joinpath(dataset_files[set_order[k]])\n\n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n dataset = list(csv_reader)\n\n path_dataset_images = [path_images.joinpath(f'label_{row[1]}', row[0])\n for row in dataset]\n\n images_and_labels[k] = np.array([np.fromfile(path_image, np.float64)\n for path_image\n in path_dataset_images])\n\n images_and_labels[k+num_sets] = [row[1] for row in dataset]\n\n return images_and_labels", "def load_dataloaders(args):\n logger.info(\"Loading dataloaders...\")\n p_path = os.path.join(\"./data/\", \"df_unencoded.pkl\")\n train_path = os.path.join(\"./data/\", \"df_encoded.pkl\")\n if (not os.path.isfile(p_path)) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=False)\n elif os.path.isfile(p_path) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=True)\n elif os.path.isfile(train_path):\n df = load_pickle(\"df_encoded.pkl\")\n \n # Train-Test split\n msk = np.random.rand(len(df)) < args.train_test_ratio\n trainset = df[msk]\n testset = df[~msk]\n \n trainset = text_dataset(trainset, args)\n max_features_length = trainset.max_x_len\n max_seq_len = trainset.max_y_len\n train_length = len(trainset)\n train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n \n testset = text_dataset(testset, args)\n test_length = len(testset)\n test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n return train_loader, train_length, max_features_length, max_seq_len, test_loader, test_length", "def load_all_datasets(series_dir: Path, downsampling: int) -> Tuple[Dict[str, tf.data.Dataset], int, int]:\n\n datasets = dict()\n for model_input_type in MODEL_INPUT_TYPES:\n dataset, n_agents, n_options = create_dataset(series_dir=series_dir,\n downsampling=downsampling,\n model_input_type=model_input_type)\n datasets[model_input_type] = dataset\n\n return datasets, n_agents, n_options", "def download_data_sets(ad_accounts: [adaccount.AdAccount]):\n download_account_structure(ad_accounts)\n download_ad_performance(ad_accounts)", "def get_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames=None, video2frames_target=None, visual_feats_target=None, caption_file_target=None, multi_flag=0):\n if video2frames_target!=None and visual_feats_target!=None:\n if multi_flag == 0:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n else:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target, visual_feat_source2=visual_feats['train2'], video2frames_source2=video2frames['train2'], caption_file_source2=cap_files['train2']),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n\n\n\n else:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train']),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n\n data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x],\n batch_size=batch_size,\n shuffle=(x=='train'),\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate_frame_gru_fn)\n for x in ['train', 'val', 'test']}\n return data_loaders", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def repeater(data_loader):\n for loader in itertools.repeat(data_loader):\n for data in loader:\n yield data", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_test = np.load(\"data/X_test.npy\")\n\t\t\t\tY_test = np.load(\"data/Y_test.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tX_test = np.zeros((10000,64,64,3))\n\t\t\t\tY_test = []\n\n\t\t\t\t\n\t\t\t\twith open(path, 'rb') as fo:\n\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\tY_test.extend(temp_element[b'labels'])\n\n\t\t\t\tfor j in range(10000):\n\t\t\t\t\tX_test[j] = self._reshape(temp_data[j])\n\n\t\t\t\tY_test = np.eye(10)[np.array(Y_test)]\n\t\t\t\t\n\t\t\t\tnp.save(\"./data/X_test\", X_test)\n\t\t\t\tnp.save(\"./data/Y_test\", Y_test)\n\n\t\t\t\tbreak\n\n\n\t\treturn X_test, Y_test", "def loadData(catalog):\n\n loadArtwork(catalog)\n loadArtists(catalog)", "def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets", "def loadData(catalog):\n loadVideos(catalog)\n loadCategories(catalog)", "def LoadTroikaDataset():\n data_dir = \"./datasets/troika/training_data\"\n data_fls = sorted(glob.glob(data_dir + \"/DATA_*.mat\"))\n ref_fls = sorted(glob.glob(data_dir + \"/REF_*.mat\"))\n return data_fls, ref_fls", "def load_all(self, root_dir, file_list=None, pattern=None, mode=None):\n\n # if func is None:\n # func = SemicondTraceData.load_single\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if (mode != 'simple') and (self.n_proc > 1):\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n # done like this because multiprocessing needs the *explicit* function call\n # and not a reference to a function, e.g. func = pd.read_csv\n all_df = pd.concat(pool.map(SemicondTraceData.load_single, input_paths))\n else: # read 1 file at a time\n if mode == 'simple':\n all_df = pd.concat(pd.read_csv(path) for path in tqdm(input_paths))\n else:\n all_df = pd.concat(SemicondTraceData.load_single(path) for path in tqdm(input_paths))\n\n return all_df", "def load_dataset(path_dir, filelist,numlist,dtype=None):\n \n actdat=pd.read_csv(os.path.join(path_dir,filelist[0]),dtype=dtype).iloc[0:numlist[0]] \n for i in range(1,len(filelist)):\n if numlist[i]>0:\n actdat=actdat.append(pd.read_csv(path_dir+filelist[i],dtype=dtype).iloc[0:numlist[i]])\n else:\n actdat=actdat.append(pd.read_csv(path_dir+filelist[i],dtype=dtype))\n \n actdat.reset_index(drop=True,inplace=True)\n \n return actdat", "def compute():\n dataset1 = 'project/data/dataset1.csv'\n dataset2 = \"project/data/dataset2.csv\"\n\n reader = CsvReader()\n\n data1 = reader.readCsv(dataset1)\n data2 = reader.readCsv(dataset2)\n\n database1 = DataBase(data1)\n database2 = DataBase(data2)\n\n Thread1 = threading.Thread(target=database1.fill, args= (1, ))\n Thread2 = threading.Thread(target=database2.fill, args= (2, ))\n\n\n Thread1.start()\n Thread2.start()", "def load_openml_data():\n datasets = dict()\n files = os.listdir(_DATA_DIRECTORY.value)\n for file_name in files:\n with open(_DATA_DIRECTORY.value + file_name, \"r\") as ff:\n task = np.loadtxt(ff, delimiter=\",\", skiprows=1)\n np.random.shuffle(task)\n datasets[file_name] = [task]\n return datasets, files", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def load_all_data_from_file(self) -> None:\n self.load_gene_data_from_file()\n self.load_ontology_from_file(ontology_type=DataType.GO, ontology_url=self.go_ontology_url,\n ontology_cache_path=self.go_ontology_cache_path,\n config=self.config)\n self.load_associations_from_file(associations_type=DataType.GO, associations_url=self.go_associations_url,\n associations_cache_path=self.go_associations_cache_path, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.DO, ontology_url=self.do_ontology_url,\n ontology_cache_path=self.do_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.DO, associations_url=self.do_associations_url,\n associations_cache_path=self.do_associations_cache_path,\n association_additional_cache_path=self.do_associations_new_cache_path,\n association_additional_url=self.do_associations_new_url, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.EXPR, ontology_url=self.expression_ontology_url,\n ontology_cache_path=self.expression_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.EXPR,\n associations_url=self.expression_associations_url,\n associations_cache_path=self.expression_associations_cache_path,\n config=self.config)\n self.load_orthology_from_file()\n self.load_expression_cluster_data()\n self.load_protein_domain_information()", "def load_examples_data(dataset_name):\n dataset_name = dataset_name.strip().lower()\n if dataset_name.lower() not in ['pokemon', 'hanzi', 'animals', 'nsfw', 'simpsons', 'horse2zebra', 'people',\n 'autodrive', 'superresolution', 'anpr', 'beauty','antisproofing','facelandmarks','dogs-vs-cats','chinese']:\n raise ValueError('Not a valid dataset_name.')\n dataset_name = 'examples_' + dataset_name\n dirname = os.path.join(_trident_dir, dataset_name)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n is_internet_ok = is_connected()\n if dataset_name == 'examples_pokemon':\n is_download=download_file_from_google_drive('1U-xc54fX9j9BcidvRa0ow6qjssMlSF2A', dirname, 'pokemon.tar')\n tar_file_path = os.path.join(dirname, 'pokemon.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n extract_path = os.path.join(dirname, 'pokemon')\n dataset = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n print('get pokemon images :{0}'.format(len(dataset)))\n return dataset\n\n\n elif dataset_name == 'examples_hanzi':\n download_file_from_google_drive('13UEzSG0az113gpRPKPyKrIE2HDaA2P4H', dirname, 'hanzi.tar')\n tar_file_path = os.path.join(dirname, 'hanzi.tar')\n extract_path = os.path.join(dirname, 'hanzi')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, os.path.join(dirname, 'train'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset_test = load_folder_images(dataset_name, os.path.join(dirname, 'test'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset.testdata = dataset_test.traindata\n dataset.class_names['zh-cn'] = dataset.class_names['en-us']\n return dataset\n\n elif dataset_name == 'examples_animals':\n download_file_from_google_drive('19Cjq8OO6qd9k9TMZxlPjDpejDOdiHJoW', dirname, 'animals.tar')\n tar_file_path = os.path.join(dirname, 'animals.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, dirname, folder_as_label=True)\n return dataset\n elif dataset_name == 'examples_nsfw':\n tar_file_path = os.path.join(dirname, 'nsfw.tar')\n if os.path.exists(tar_file_path) and get_file_create_time(tar_file_path)<datetime.datetime(2021, 2, 20, 0, 0, 0).timestamp():\n os.remove(tar_file_path)\n if os.path.exists(os.path.join(dirname,'porn_detection_data.pkl')):\n os.remove(os.path.join(dirname,'porn_detection_data.pkl'))\n _delete_h(dirname)\n download_file_from_google_drive('1EXpV2QUrSFJ7zJn8NqtqFl1k6HvXsUzp', dirname, 'nsfw.tar')\n\n extract_path = os.path.join(dirname, 'nsfw')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n folders = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']\n data=unpickle(os.path.join(dirname,'porn_detection_data.pkl'))\n\n trainData = []\n testData = []\n trainLabel = []\n testLabel = []\n for n in range(5):\n folder=folders[n]\n trainData.extend(data[folder]['train'])\n trainLabel.extend([n]*len(data[folder]['train']))\n testData.extend(data[folder]['test'])\n testLabel.extend([n] * len(data[folder]['test']))\n\n trainarray = ImageDataset(trainData,object_type=ObjectType.rgb)\n trainlabel = LabelDataset(trainLabel,object_type=ObjectType.classification_label)\n train_iter = Iterator(data=trainarray, label=trainlabel)\n\n testarray = ImageDataset(testData,object_type=ObjectType.rgb)\n testlabel = LabelDataset(testLabel,object_type=ObjectType.classification_label)\n test_iter = Iterator(data=testarray, label=testlabel)\n print('training images: {0} test images:{1}'.format(len(trainarray), len(testarray)))\n\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n dataset.binding_class_names(['drawing', 'hentai', 'neutral', 'porn', 'sexy'], 'en-us')\n dataset.binding_class_names(['绘画', '色情漫画', '中性', '色情', '性感'], 'zh-cn')\n dataset.binding_class_names(['繪畫', '色情漫畫', '中性', '色情', '性感'], 'zh-tw')\n dataset.scenario = 'train'\n return dataset\n elif dataset_name == 'examples_simpsons':\n download_file_from_google_drive('1hGNFbfBv3EZ4nx4Qod6PtSYzO8H4QIxC', dirname, 'simpsons.tar')\n tar_file_path = os.path.join(dirname, 'simpsons.tar')\n extract_path = os.path.join(dirname, 'simpsons')\n extract_archive(tar_file_path, extract_path, archive_format='tar')\n data_provider = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n data_provider.traindata.unpair = RandomNoiseDataset(shape=(100), random_mode='normal')\n print('get simpsons images :{0}'.format(len(data_provider.traindata.data.items)))\n return data_provider\n elif dataset_name == 'examples_horse2zebra':\n download_file_from_google_drive('1pqj-T90Vh4wVNBV09kYZWgVPsZUA2f7U', dirname, 'horse2zebra.tar')\n tar_file_path = os.path.join(dirname, 'horse2zebra.tar')\n extract_path = os.path.join(dirname, 'horse2zebra')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n trainA = ImageDataset(list_images(os.path.join(dirname, 'trainA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n trainB = ImageDataset(list_images(os.path.join(dirname, 'trainB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testA = ImageDataset(list_images(os.path.join(dirname, 'testA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testB = ImageDataset(list_images(os.path.join(dirname, 'testB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n train_iter = Iterator(data=trainA, unpair=trainB)\n test_iter = Iterator(data=testA, unpair=testB)\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n print('get horse2zebra images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_people':\n download_file_from_google_drive('1H7mJJfWpmXpRxurMZQqY4N_UXWLbQ2pT', dirname, 'people.tar')\n tar_file_path = os.path.join(dirname, 'people.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'imgs', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs=list(sorted(imgs))\n masks = list(sorted(masks))\n # make_dir_if_need(os.path.join(dirname, 'trimap'))\n # for i in range(len(masks)):\n # mask=mask2array(masks[i])\n # trimap=mask2trimap(mask)\n # save_mask(trimap,masks[i].replace('masks','trimap'))\n # print('trimap',len(masks))\n\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb)\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.binary_mask)\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get people images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_autodrive':\n download_file_from_google_drive('1JqPPeHqhWLqnI6bD8nuHcVx-Y56oIZMK', dirname, 'autodrive.tar')\n tar_file_path = os.path.join(dirname, 'autodrive.tar')\n extract_path = os.path.join(dirname, 'autodrive')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'images', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs = list(sorted(imgs))\n masks = list(sorted(masks))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb,symbol='image')\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.color_mask,symbol='mask')\n\n def parse_code(l):\n if len(l.strip().split(\"\\t\")) == 2:\n a, b = l.replace('\\t\\t', '\\t').strip().split(\"\\t\")\n return tuple(int(i) for i in b.split(' ')), a\n\n label_codes, label_names = zip(\n *[parse_code(l) for l in open(os.path.join(dirname, \"label_colors.txt\")).readlines()])\n for i in range(len(label_codes)):\n mskdata.palette[label_names[i]] = label_codes[i]\n\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get autodrive images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_superresolution':\n download_file_from_google_drive('1v1uoymrWI_MLSiGvSGW7tWJYSnzzXpEQ', dirname, 'superresolution.tar')\n tar_file_path = os.path.join(dirname, 'superresolution.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs.extend(glob.glob(os.path.join(dirname, '*.bmp')))\n imgs = list(sorted(imgs))\n\n print('get super resolution images :{0}'.format(len(imgs)))\n\n imgdata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='lr')\n labeldata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='hr')\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=labeldata))\n return dataset\n elif dataset_name == 'examples_beauty':\n download_file_from_google_drive('1aJhxN9IqsxuayhRTm-gmxk6PiLe5wm9X', dirname, 'beauty.tar')\n tar_file_path = os.path.join(dirname, 'beauty.tar')\n\n extract_archive(tar_file_path, dirname, archive_format='tar')\n # 讀取圖片數據\n images_dict = {}\n with open(os.path.join(dirname, 'images_dict.pkl'), 'rb') as fp:\n images_dict = pickle.load(fp)\n\n f = open(os.path.join(dirname, 'All_Ratings.txt'), encoding='utf-8-sig').readlines()\n imgs = []\n landmarks = []\n ratings = []\n for row in f:\n data = row.strip().split('\\t')\n if 'images\\\\' + data[0] in images_dict:\n img = images_dict['images\\\\' + data[0]][0]\n img = img.transpose([2, 0, 1])[::-1].transpose([1, 2, 0])\n imgs.append(img)\n landmark = images_dict['images\\\\' + data[0]][1].astype(np.float32)\n landmarks.append(landmark)\n rating = (float(data[1])) / 5.00\n ratings.append(rating)\n print('{0} faces loaded...'.format(len(imgs)))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb, symbol='faces')\n landmarkdata = LandmarkDataset(landmarks=landmarks, object_type=ObjectType.landmarks, symbol='target_landmarks')\n labeldata = LabelDataset(data=ratings,object_type=ObjectType.classification_label, symbol='target_beauty')\n data_provider = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=Dataset.zip(landmarkdata,labeldata)))\n return data_provider\n\n elif dataset_name == 'examples_facelandmarks':\n download_file_from_google_drive('1GtswQBAHPa_bXaB4tW2uOOQ8Lxfz2L5B', dirname, 'ibug_300W.tar')\n tar_file_path = os.path.join(dirname, 'ibug_300W.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n root_dir=os.path.join(dirname, 'ibug_300W_large_face_landmark_dataset')\n image_paths = {}\n landmarks = {}\n crops = {}\n\n for mode in ['train','test']:\n make_dir_if_need(os.path.join(dirname, 'crops',mode))\n tree = ElementTree.parse(os.path.join(root_dir, 'labels_ibug_300W_{0}.xml'.format(mode)))\n root = tree.getroot()\n image_paths[mode]=[]\n landmarks[mode] = []\n crops[mode] = []\n\n offset=5\n for j in tqdm(range(len(root[2]))):\n try:\n filename=root[2][j]\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmark=np.asarray(landmark)\n\n crop = filename[0].attrib\n for k in crop.keys():\n crop[k] = int(crop[k]) if isinstance(crop[k], str) else crop[k]\n for k in crop.keys():\n if k=='top' and int(landmark[:,1].min())<int(crop[k]):\n crop[k] = int( landmark[:,1].min())\n crop[ 'height']+=crop[k]-int(landmark[:,1].min())\n elif k=='left' and int(landmark[:,0].min())<int(crop[k]):\n crop[k] = int( landmark[:,0].min())\n crop['width']+= crop[k] - int(landmark[:, 0].min())\n elif k == 'width' and int(landmark[:, 0].max()-landmark[:, 0].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 0].max()-landmark[:, 0].min())\n elif k == 'height' and int(landmark[:, 1].max()-landmark[:, 1].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 1].max()-landmark[:, 1].min())\n\n crop['left']-=offset\n crop['top'] -= offset\n crop['width'] += 2*offset\n crop['height'] += 2*offset\n\n\n landmark[:,0]-=crop['left']\n landmark[:, 1] -= crop['top']\n\n\n if not os.path.exists(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j))):\n im=image2array(os.path.join(root_dir, filename.attrib['file']))\n if im.ndim==2:\n im=cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)\n im=im[crop['top']:min(crop['top']+crop['height'],im.shape[0]),crop['left']:min(crop['left']+crop['width'],im.shape[1]),:]\n\n if max(im.shape[:2])/max(min(im.shape[:2]),0)<=5:\n\n array2image(im).save(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n del im\n else:\n #im = image2array(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n\n if j%100==0:\n gc.collect()\n except Exception as e:\n pass\n\n print('ibug 300w train dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['train']),len(landmarks['train'])))\n print('ibug 300w test dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['test']), len(landmarks['test'])))\n imdata=ImageDataset(images=image_paths['train'],symbol='faces',object_type=ObjectType.rgb)\n landmarkdata = LandmarkDataset(landmarks=landmarks['train'], symbol='landmarks',object_type=ObjectType.landmarks)\n imtestdata = ImageDataset(images=image_paths['test'], symbol='faces',object_type=ObjectType.rgb)\n landmarktestdata = LandmarkDataset(landmarks=landmarks['test'], symbol='landmarks',object_type=ObjectType.landmarks)\n data_provider=DataProvider(traindata=Iterator(data=imdata,label=landmarkdata),testdata=Iterator(data=imtestdata,label=landmarktestdata))\n return data_provider\n\n elif dataset_name == 'examples_antisproofing':\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name,os.path.join(dirname, 'antisproofing'))\n return data_provider\n elif dataset_name == 'examples_anpr':\n download_file_from_google_drive('1uGBd8tXlP0TZAXNgrR6H0jl5MXj7VPbN', dirname, 'anpr.tar')\n tar_file_path = os.path.join(dirname, 'anpr.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs = list(sorted(imgs))\n\n # CCPD (Chinese City Parking Dataset, ECCV) and PDRC (license Plate Detection and Recognition Challenge)\n # https://github.com/detectRecog/CCPD\n provinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\",\n \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'O']\n ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\n def lp2char(lp):\n cols = lp.split('_')\n charstring = ''\n for i in range(len(cols)):\n if i == 0:\n charstring += provinces[int(cols[i])]\n elif i == 1:\n charstring += alphabets[int(cols[i])]\n else:\n charstring += ads[int(cols[i])]\n return charstring\n\n width = 720\n height = 1160\n for im_path in imgs:\n lbl = im_path.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\n charstring = lp2char(lbl)\n iname = im_path.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\n box = [leftUp[0], leftUp[1], rightDown[0], rightDown[1]]\n ori_w, ori_h = [float(int(el)) for el in [width, height]]\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, os.path.join(dirname, 'antisproofing'))\n return data_provider\n\n\n\n elif dataset_name == 'examples_dogs-vs-cats':\n download_file_from_google_drive('10czW0On7eIXkPP-MuQ-IRxMWdTizWjNC', dirname, 'dogs-vs-cats.tar')\n tar_file_path = os.path.join(dirname, 'dogs-vs-cats.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, dirname)\n return data_provider\n elif dataset_name == 'examples_chinese':\n to_half=ToHalfWidth()\n to_sc=ChineseConvert(convert_to='simplified')\n download_file_from_google_drive('1yzRzXpLuhSUxnixqCgpbdTk16ajnTEWF', dirname, 'chinese.tar')\n tar_file_path = os.path.join(dirname, 'chinese.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n\n as_train = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_training.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000' ,'|'))).splitlines()\n cityu_train =remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_training.utf8'), encoding='utf-8-sig').read().strip().replace(' ','|'))).splitlines()\n\n as_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_testing_gold.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000', '|'))).splitlines()\n cityu_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_test_gold.utf8'), encoding='utf-8-sig').read().strip().replace(' ', '|'))).splitlines()\n\n\n data = as_train + cityu_train # 把兩個語料合併\n test_data=as_test + cityu_test # 把兩個語料合併\n\n\n raw_data_train = [row.strip('\\n').strip('\\r') for row in data] # 移除分行字元\n raw_data_test = [row.strip('\\n').strip('\\r') for row in test_data] # 移除分行字元\n\n process_data_train=[]\n process_seg_label_train = []\n process_simplifided_label_train = []\n process_traditional_label_train = []\n\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n for k in tqdm(range(len(raw_data_train))):\n row=raw_data_train[k]\n if row.startswith('∥'):\n row=row[1:]\n words=row.replace('||','|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_train.append(word[i])\n #tmp_simplifided_label_train.append(to_half(to_sc(word[i])))\n #轉換為BMES\n\n if i==0 and len(word)>1: #B 是一個詞的開始\n tmp_seg_label_train.append('B')\n elif i==len(word)-1 and len(word)>=2 and tmp_seg_label_train[-1] in ['B','M']: #E 是一個詞的結束\n tmp_seg_label_train.append('E')\n elif len(word)==1 and i==0: #S 自己就是一個單詞\n tmp_seg_label_train.append('S')\n elif len(word)>=3 and tmp_seg_label_train[-1] in ['B','M']: #M 是一個詞的中間\n tmp_seg_label_train.append('M')\n\n if len(tmp_seg_label_train)>0 and tmp_seg_label_train[-1] in ['E','S']:\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)) and k+1<len(words):\n if word in [ '。','﹖']:\n pass\n\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_train.append(' ')\n tmp_seg_label_train.append('S')\n\n if (k+1<len(raw_data_train) and not raw_data_train[k+1].startswith( '」')) and words[-1] in [ '。','﹖']:\n #process_traditional_label_train.append(tmp_data_train)\n\n tmp_data_train=to_half(''.join(tmp_data_train))\n tmp_seg_label_train = ''.join(tmp_seg_label_train)\n # if len(tmp_data_train)!=len(tmp_seg_label_train):\n # print('')\n tmp_simplifided_label_train =to_sc(tmp_data_train)\n\n process_data_train.append(tmp_data_train)\n process_seg_label_train.append(tmp_seg_label_train)\n process_simplifided_label_train.append(tmp_simplifided_label_train)\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n # else:\n # tmp_data_train.append('\\n')\n # tmp_simplifided_label_train.append('\\n')\n # tmp_seg_label_train.append('\\n')\n corpus=process_data_train\n seg_corpus=process_seg_label_train\n simplifided_corpus =process_simplifided_label_train\n\n process_data_test = []\n process_seg_label_test = []\n process_simplifided_label_test = []\n process_traditional_label_test = []\n print('generate test labels')\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n for k in tqdm(range(len(raw_data_test))):\n row=raw_data_test[k]\n if row.startswith('∥'):\n row=row[1:]\n words = row.replace('||', '|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_test.append(word[i])\n # tmp_simplifided_label_test.append(to_half(to_sc(word[i])))\n # 轉換為BMES\n\n if i == 0 and len(word) > 1: # B 是一個詞的開始\n tmp_seg_label_test.append('B')\n elif i == len(word) - 1 and len(word) >= 2 and tmp_seg_label_test[-1] in ['B', 'M']: # E 是一個詞的結束\n tmp_seg_label_test.append('E')\n elif len(word) == 1 and i == 0: # S 自己就是一個單詞\n tmp_seg_label_test.append('S')\n elif len(word) >= 3 and tmp_seg_label_test[-1] in ['B', 'M']: # M 是一個詞的中間\n tmp_seg_label_test.append('M')\n\n if len(tmp_seg_label_test) > 0 and tmp_seg_label_test[-1] in ['E', 'S'] and k+1<len(words):\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)):\n if word in ['。', '﹖']:\n pass\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_test.append(' ')\n tmp_seg_label_test.append('S')\n\n if (k + 1 < len(raw_data_test) and not raw_data_test[k + 1].startswith('」')) and words[-1] in ['。', '﹖']:\n # process_traditional_label_test.append(tmp_data_test)\n\n tmp_data_test = to_half(''.join(tmp_data_test))\n tmp_seg_label_test = ''.join(tmp_seg_label_test)\n # if len(tmp_data_test)!=len(tmp_seg_label_test):\n # print('')\n tmp_simplifided_label_test = to_sc(tmp_data_test)\n\n process_data_test.append(tmp_data_test)\n process_seg_label_test.append(tmp_seg_label_test)\n process_simplifided_label_test.append(tmp_simplifided_label_test)\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n # else:\n # tmp_data_test.append('\\n')\n # tmp_simplifided_label_test.append('\\n')\n # tmp_seg_label_test.append('\\n')\n test_corpus = process_data_test\n test_seg_corpus = process_seg_label_test\n test_simplifided_corpus = process_simplifided_label_test\n\n\n data=TextSequenceDataset(corpus=corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_label = TextSequenceDataset(corpus=seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_label = TextSequenceDataset(corpus=simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_label = TextSequenceDataset(corpus= copy.deepcopy(corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n data_test=TextSequenceDataset(corpus=test_corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_test_label = TextSequenceDataset(corpus=test_seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_test_label = TextSequenceDataset(corpus=test_simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_test_label = TextSequenceDataset(corpus= copy.deepcopy(test_corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n\n chars = list(sorted(set(list( ''.join(corpus) +bpmf_phonetic+'\\n\\r\\t∥'+ ''.join(simplifided_corpus)+''.join(test_data)))))\n chars.insert(0, '[CLS]')\n chars.insert(1, '[SEP]')\n chars.insert(2, '[UNK]')\n chars.insert(3, '[PAD]')\n chars.insert(4, '[MASK]')\n\n data.vocabs =data_test.vocabs=simplifided_label.vocabs=simplifided_test_label.vocabs = chars\n data.text2index=data_test.text2index =simplifided_label.text2index=simplifided_test_label.text2index = dict((c, i) for i, c in enumerate(chars))\n data.index2text =data_test.index2text =simplifided_label.index2text=simplifided_test_label.index2text= dict((i, c) for i, c in enumerate(chars))\n traditional_label = copy.deepcopy(data)\n traditional_test_label = copy.deepcopy(data_test)\n traditional_label.object_type =traditional_test_label.object_type = ObjectType.sequence_label\n traditional_label.symbol =traditional_test_label.symbol = 'traditional_label'\n\n mask_label = copy.deepcopy(data)\n mask_test_label = copy.deepcopy(data_test)\n #mask_label.object_type =mask_test_label.object_type= ObjectType.corpus\n mask_label.symbol = mask_test_label.symbol = 'mask_label'\n\n\n\n nextword=copy.deepcopy(data)\n nextword_test = copy.deepcopy(data_test)\n nextword.object_type=nextword_test.object_type=ObjectType.sequence_label\n nextword.symbol=nextword_test.symbol='nextword_label'\n nextword.sequence_offset=nextword_test.sequence_offset=1\n\n label=ZipDataset(seg_label,nextword,simplifided_label,traditional_label,mask_label)\n label_test = ZipDataset(seg_test_label, nextword_test, simplifided_test_label, traditional_test_label, mask_test_label)\n provider=TextSequenceDataProvider(\n traindata=Iterator(data=data,label=label),\n testdata=Iterator(data=data_test,label=label_test))\n return provider\n #,sample_filter=lambda x:x[0][-1]==3\n else:\n return None", "def load_datasets(data_dir: str) -> Tuple[List[Annotation], List[Annotation], List[Annotation]]:\n train_data = annotations_from_jsonl(os.path.join(data_dir, 'train.jsonl'))\n val_data = annotations_from_jsonl(os.path.join(data_dir, 'val.jsonl'))\n test_data = annotations_from_jsonl(os.path.join(data_dir, 'test.jsonl'))\n return train_data, val_data, test_data", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)", "def load_dataset(path_dir, filelist,numlist,dtype=None):\n\n actdat=pd.read_csv(os.path.join(path_dir,filelist[0]),dtype=dtype,nrows=0)\n for i in range(0,len(filelist)):\n if numlist[i]>0:\n actdat=actdat.append(pd.read_csv(path_dir+filelist[i],dtype=dtype,nrows=numlist[i]))\n else:\n actdat=actdat.append(pd.read_csv(path_dir+filelist[i],dtype=dtype))\n\n actdat.reset_index(drop=True,inplace=True)\n\n return actdat", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def load_dataset_in_memory(self, demo_list, hdf5_file, obs_keys, dataset_keys, load_next_obs):\n all_data = dict()\n print(\"SequenceDataset: loading dataset into memory...\")\n for ep in LogUtils.custom_tqdm(demo_list):\n all_data[ep] = {}\n all_data[ep][\"attrs\"] = {}\n all_data[ep][\"attrs\"][\"num_samples\"] = hdf5_file[\"data/{}\".format(ep)].attrs[\"num_samples\"]\n # get obs\n all_data[ep][\"obs\"] = {k: hdf5_file[\"data/{}/obs/{}\".format(ep, k)][()].astype('float32') for k in obs_keys}\n if load_next_obs:\n all_data[ep][\"next_obs\"] = {k: hdf5_file[\"data/{}/next_obs/{}\".format(ep, k)][()].astype('float32') for k in obs_keys}\n # get other dataset keys\n for k in dataset_keys:\n if k in hdf5_file[\"data/{}\".format(ep)]:\n all_data[ep][k] = hdf5_file[\"data/{}/{}\".format(ep, k)][()].astype('float32')\n else:\n all_data[ep][k] = np.zeros((all_data[ep][\"attrs\"][\"num_samples\"], 1), dtype=np.float32)\n\n if \"model_file\" in hdf5_file[\"data/{}\".format(ep)].attrs:\n all_data[ep][\"attrs\"][\"model_file\"] = hdf5_file[\"data/{}\".format(ep)].attrs[\"model_file\"]\n\n return all_data", "def get_loaders(train_dataset, val_dataset, test_dataset, batch_size=128):\n train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8,\n shuffle=True)\n\n val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n return train_loader, val_loader, test_loader", "def load_dataset():\n # Get the start time\n start_time = time.time()\n\n # Load dataset YAML file\n # This contains all of our image labels, as well as locations of the images themself\n print(\"Reading dataset/dataset.yaml... \", end=\"\")\n with open(\"dataset/dataset.yaml\", \"r\") as file:\n dataset = yaml.safe_load(file)\n\n # Get paths, labels\n paths = []\n labels = []\n for sample in dataset:\n # Assign a \"1\" label if we're looking at the ground\n # 0 for everything else: trees, buildings, cars, etc\n label_semantic = max(sample[\"labels\"].keys(), key=sample[\"labels\"].get)\n if max(sample[\"labels\"].values()) < 0.80:\n # Samples that are not obviously in any one category: unsafe\n label=0\n elif label_semantic == \"GROUND\":\n # Safe if >80% ground\n label = 1\n else:\n # Unsafe otherwise, this is usually water\n label = 0\n\n paths.append(sample[\"path\"])\n labels.append(label)\n print(\"done!\", flush=True)\n\n print(\"Loading images\", end=\"\")\n # Get images\n images = np.zeros((len(paths), 128, 128, 3), dtype=np.float32)\n progress = 0.0\n for i, path in enumerate(paths):\n images[i] = np.array(PIL.Image.open(path).resize((128, 128))) / 255.0\n if i / len(paths) > progress:\n progress += 1.0 / 20.0\n print(\".\", end=\"\", flush=True)\n print(\" done!\")\n labels = np.array(labels, dtype=np.int)\n\n # Return\n print(f\"Loaded {len(images)} images in {time.time() - start_time} seconds!\")\n return images, labels", "def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def load_data():\n\n server_node = load_nodes(SERVER_NODE_INFILE)\n road_node = load_nodes(ROAD_NODE_INFILE)\n road_segment_point = load_nodes(ROAD_SEGMENT_POINT_INFILE)\n\n return server_node, road_node, road_segment_point", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def load_dataset(filenames, batch_size, corruption_func, crop_size):\n cache = dict()\n while True:\n source_batch = np.zeros((batch_size, crop_size[0], crop_size[1], 1))\n target_batch = np.zeros((batch_size, crop_size[0], crop_size[1], 1))\n chosen_filenames = _read_images_and_put_in_dict(filenames,\n cache,\n batch_size)\n for filename_idx in range(len(chosen_filenames)):\n filename = chosen_filenames[filename_idx]\n im = cache[filename]\n patch = _get_random_patch_of_the_image(im, crop_size)\n corrupted_im = corruption_func(patch)\n source_batch[filename_idx, :, :, 0] = corrupted_im - 0.5\n target_batch[filename_idx, :, :, 0] = patch - 0.5\n yield source_batch, target_batch", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_with_val']:\n\n self._images_train, self._labels_train = [], []\n desc = \"Loading train image pairs & flows\"\n with tqdm(total=len(self._img_trn_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_trn_path):\n pbar.update(1)\n label_path = self._lbl_trn_path[n]\n image, label = self._load_sample(image_path, label_path)\n self._labels_train.append(label)\n self._images_train.append(image)\n\n if self.mode == 'train_with_val':\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n if self.opts['tb_test_imgs'] is True:\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))\n\n elif self.mode in ['val', 'val_notrain']:\n\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n elif self.mode == 'test':\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def load_all(cls, data):\n return [cls.load(obj) for obj in data]", "def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)", "def DataLoader(data_place):\n # Nd = []\n # Np = []\n # Nz = []\n # channel_num = []\n # images = []\n # id_labels = []\n # pose_labels = []\n\n # mycase\n # Nz = 50\n # channel_num = 3\n # images = np.load('{}/images.npy'.format(data_place))\n # id_labels = np.load('{}/ids.npy'.format(data_place))\n # pose_labels = np.load('{}/yaws.npy'.format(data_place))\n #\n # Np = int(pose_labels.max() + 1)\n # Nd = int(id_labels.max() + 1)\n #\n # return [images, id_labels, pose_labels, Nd, Np, Nz, channel_num]\n\n # mycase MultiPIE\n Nz = 50\n channel_num = 3\n image_attributes_df = pd.read_csv(data_place)\n\n Nd = int(np.max(image_attributes_df['Id'])+1)\n Np = int(np.max(image_attributes_df['pose'])+1)\n Ni = int(np.max(image_attributes_df['illum'])+1)\n\n return [image_attributes_df, Nd, Np, Ni, Nz, channel_num]", "def load_all_dfs(clf_list = ['test_small','rt_small','test2_small']):\n \n start = time.clock()\n print('loading data')\n first_clf = clf_list[0]\n df = pd.read_csv('Pikki'+first_clf+'.csv')\n df['df'] = first_clf\n\n df = df.set_index(['id','df'])\n\n for clf in clf_list[1:]:\n file_name = 'Pikki' + clf + '.csv'\n df_tmp = pd.read_csv(file_name)\n df_tmp['df'] = clf\n\n df_tmp = df_tmp.set_index(['id','df'])\n\n df = pd.concat([df,df_tmp])\n\n \n df['std'] = df.apply(np.std,axis=1,raw = True)\n end = time.clock()\n print(end-start)\n return df#.swaplevel(0,1)", "def get_data_loaders(data, batch_size, ratio=0.8, num_workers=1):\n train_size = int(len(data) * ratio)\n val_size = len(data) - train_size\n train_set, val_set = random_split(data, [train_size, val_size])\n data_train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n data_val_loader = DataLoader(val_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n return data_train_loader, data_val_loader", "def load_dataset(ipc = 20000):\n files = os.listdir(\"..\\\\data\")\n ind = 0\n xs = []\n ys = []\n classNames = []\n for file in files:\n fileSplit = file.split('.')\n print('--Loading ' + fileSplit[0][18:] + ' data.')\n classNames.append(fileSplit[0][18:])\n x = np.load(\"..\\\\data\\\\\" + file)\n x = x.astype('float32')/255\n xs.append(x[0:ipc, :])\n y = np.array([float(ind) for i in range(ipc)])\n ys.append(y.reshape(ipc, 1))\n ind += 1\n\n xs = np.array(xs)\n ys = np.array(ys)\n xs = xs.reshape(xs.shape[0]*xs.shape[1], xs.shape[2])\n ys = ys.reshape(ys.shape[0]*ys.shape[1], ys.shape[2])\n return xs, ys, classNames", "def load_all(self, root_dir, file_list=None, pattern=None):\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(PMUData.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(PMUData.load_single(path) for path in input_paths)\n\n return all_df", "def get_datasets(load_key=None, maven=False):\n ds_names = {}\n if load_key == 'R2349': \n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_names['batsrus_multi_species'] = model_dir+'R2349/batsrus_3d_multi_species.h5'\n ds_names['batsrus_electron_pressure'] = model_dir+'R2349/batsrus_3d_pe.h5'\n ds_names['heliosares'] ='/Volumes/triton/Data/ModelChallenge/R2349/heliosares_multi.h5'\n #ds_names['rhybrid'] ='/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5'\n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'multi_fluid' in key],\n 'batsrus2':[key for key in ds_names.keys() if 'multi_species' in key],\n 'batsrus3':[key for key in ds_names.keys() if 'electron_pressure' in key],\n 'batsrus4':[key for key in ds_names.keys() if 'mf_lr' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key],\n 'rhybrid_helio':[key for key in ds_names.keys() if 'rhybrid' in key ]}\n if maven or True:\n ds_names['maven']=orbit_dir+'orbit_2349.csv'\n #ds_names['maven'] = orbit_dir+'orbit_plume_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'batsrus_mf_lowres':\n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_types = {'batsrus_mf_lr' : ['batsrus_mf_lr']}\n\n\n elif load_key == 'helio_multi':\n ds_names['t00550'] = model_dir+'R2349/Heliosares_Multi/t00550.h5'\n ds_names['t00560'] = model_dir+'R2349/Heliosares_Multi/t00560.h5'\n ds_names['t00570'] = model_dir+'R2349/Heliosares_Multi/t00570.h5'\n ds_names['t00580'] = model_dir+'R2349/Heliosares_Multi/t00580.h5'\n ds_names['t00590'] = model_dir+'R2349/Heliosares_Multi/t00590.h5'\n ds_names['t00600'] = model_dir+'R2349/Heliosares_Multi/t00600.h5'\n ds_names['t00610'] = model_dir+'R2349/Heliosares_Multi/t00610.h5'\n ds_names['t00620'] = model_dir+'R2349/Heliosares_Multi/t00620.h5'\n ds_names['t00630'] = model_dir+'R2349/Heliosares_Multi/t00630.h5'\n ds_names['t00640'] = model_dir+'R2349/Heliosares_Multi/t00640.h5'\n ds_names['t00650'] = model_dir+'R2349/Heliosares_Multi/t00650.h5'\n\n ds_types = {'heliosares':[key for key in ds_names.keys()]}\n if maven:\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'SDC_BATS':\n ds_names['LS180_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_max.h5'\n ds_names['LS270_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_max.h5'\n ds_names['LS090_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_max.h5'\n ds_names['LS180_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_max.h5'\n ds_names['LS270_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_max.h5'\n ds_names['LS090_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_max.h5'\n ds_names['LS180_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_max.h5'\n ds_names['LS270_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_max.h5'\n ds_names['LS090_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_max.h5'\n ds_names['LS180_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_min.h5'\n ds_names['LS270_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_min.h5'\n ds_names['LS090_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_min.h5'\n ds_names['LS180_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_min.h5'\n ds_names['LS270_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_min.h5'\n ds_names['LS090_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_min.h5'\n ds_names['LS180_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_min.h5'\n ds_names['LS270_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_min.h5'\n ds_names['LS090_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_min.h5'\n\n ds_types = {'batsrus':[key for key in ds_names.keys()]}\n\n elif load_key == 'SDC_G1':\n #BATSRUS\n ds_names['bats_min_LS270_SSL0'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG0.h5'\n ds_names['bats_min_LS270_SSL180'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG180.h5'\n ds_names['bats_min_LS270_SSL270'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG270.h5' \n \n #HELIOSARES\n #ds_names['helio_1'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_1.h5'\n \n #ds_names['helio_2'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_2.h5'\n \n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'bats' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key]}\n if maven:\n pass\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n #ds_types['maven']=['maven']\n\n elif load_key == 'rhybrid_res':\n ds_names = {'rhybrid240':'/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5',\n 'rhybrid120':'/Volumes/triton/Data/ModelChallenge/R2349/HYB/state00030000.h5'}\n ds_types = {'rhybrid1':['rhybrid240'], 'rhybrid2':['rhybrid120']}\n elif load_key == 'batsrus_tseries':\n ds_names = {'batsrus_mf':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_mf/3d__ful_4_n00040000.h5',\n 'batsrus_ms':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_ms/3d__mhd_6_n0050000.h5'}\n ds_types = {'batsrus_mf':['batsrus_mf'], 'batsrus_ms':['batsrus_ms']}\n\n elif load_key == 'maven':\n ds_names, ds_types = {},{}\n ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'exo_2349':\n keys = ['2349_1RM_225km','2349_1RM_450km', '2349_2RM_450km',\n '2349_2RM_900km','2349_4RM_900km'] \n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonA':\n keys = ['2349_1RM_225km', '2349_2RM_450km',\n '2349_1.5RM_338km'] \n ds_names = {k:exo_dir+'/ComparisonA/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonB':\n keys = ['2349_1RM_225km', 'T0_1RM_225km', 'T1_1RM_225km', \"T2_1RM_225km\"] \n ds_names = {k:exo_dir+'/ComparisonB/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n elif load_key == 'exo_t1':\n keys = ['T1_1RM_112km', 'T1_1RM_225km', #'T1_1RM_450km',\n 'T1_2RM_225km', 'T1_2RM_450km', #'T1_2RM_900km',\n 'T1_4RM_900km']\n\n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n else:\n print('No datasets selected')\n \n\n return (ds_names, ds_types)", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def loadData(catalog, size):\n loadCategories(catalog)\n loadVideos(catalog, size)", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def get_test_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames = None):\n dset = {'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames = video2frames['test'])}\n\n\n data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x],\n batch_size=batch_size,\n shuffle=False,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate_frame_gru_fn)\n for x in cap_files }\n return data_loaders", "def _load_data_worker(self,img_dir,lbl_dir):\n data = []\n\n for img,lbl in zip(glob(img_dir+\"/*.jpg\"),glob(lbl_dir+\"/*.txt\")):\n im = np.array(Image.open(img))\n im = make_square_image_with_padding(im, self.core_config.num_colors)\n lbl_fh = open(lbl,encoding='utf-8')\n\n objects = self._get_objects(lbl_fh)\n sorted_objects = sort_object_list(objects)\n object_class = self._get_object_classes(sorted_objects)\n \n image_with_objects = {\n 'img':im,\n 'objects':sorted_objects,\n 'object_class': object_class\n }\n\n image_with_mask = convert_to_mask(image_with_objects, self.core_config)\n\n data.append(image_with_mask)\n lbl_fh.close()\n\n return data", "def _retrieve_data(self, log, progressbar, files):\n # ESGF frequently doesn't work. Until I get a document from them\n # that specifies a reliable API, I'm giving up.\n msg = \"ESGF has become too unreliable, so it's temporarily unsupported.\"\n raise NotImplementedError(msg)\n# login_successful = self._authenticator.login()\n# if not login_successful:\n# self._app.logger.warn(\"Failed to login.\")\n# session = self._authenticator.session\n\n temp_ds = []\n url_length = len(files)\n session = None\n\n # Add two to the progress bar. One for just starting, and another\n # for when it's all finished. Without these extra, the user can be\n # looking at a blank progress bar for the whole time, since _clean()\n # takes so long.\n progressbar.start(2*url_length)\n for i, remotefile in files:\n\n # The remotefile is just the filename, which is nicer for display.\n # Need the full url.\n url = self._url_from_file(remotefile)\n if session is None and self._authenticator.login(url):\n session = self._authenticator.session\n \n if session is not None:\n xdataset = xr.open_dataset(url,\n decode_cf=False,\n engine='pydap',\n session=session)\n msg = \"Cleaning: {0}.\".format(remotefile)\n# # Normalize it.\n# # FIX ME: Consider moving this to another place. This\n# # operation is the biggest bottleneck of this searching and\n# # retrieving data.\n self._clean(x)\n\n temp_ds.append(xdataset)\n msg = \"Retained: {0}\".format(filename)\n log.debug(msg) \n progressbar.update(msg)\n \n else:\n msg = \"Login failed.\"\n print msg\n log.debug(msg)\n progressbar.update(msg)\n\n # Don't stay logged on.\n self._authenticator.logout()\n\n # Return the list of xarray Dataset objects. The Data_repospecset data\n # structure can't hold the datasets thus far collected because, in\n # general, their coordinates will be defined on different lattices.\n return temp_ds", "def load_data(limit=None, load_test=False):\n data_snli_dev, data_snli_train = None, None\n data_multinli_train, data_multinli_dev = None, None\n data_mli_train, data_mli_dev = None, None\n data_mli_test = None\n\n if SNLI_TRAIN_FILENAME.exists():\n data_snli_train = read_nli_data(SNLI_TRAIN_FILENAME, set_genre='snli', limit=limit)\n data_snli_dev = read_nli_data(SNLI_DEV_FILENAME, set_genre='snli', limit=limit)\n logging.info('SNLI: train - %s, dev - %s', data_snli_train.shape, data_snli_dev.shape)\n\n if MULTINLI_TRAIN_FILENAME.exists():\n data_multinli_train = read_nli_data(MULTINLI_TRAIN_FILENAME, limit=limit)\n data_multinli_dev = read_nli_data(MULTINLI_DEV_FILENAME, limit=limit)\n logging.info('MultiNLI: train - %s, dev - %s', data_multinli_train.shape, data_multinli_dev.shape)\n\n if MLI_TRAIN_FILENAME.exists():\n data_mli_train = read_nli_data(MLI_TRAIN_FILENAME, set_genre='clinical', limit=limit)\n data_mli_dev = read_nli_data(MLI_DEV_FILENAME, set_genre='clinical', limit=limit)\n logging.info('MLI: train - %s, dev - %s', data_mli_train.shape, data_mli_dev.shape)\n\n if load_test:\n data_mli_test = read_nli_data(MLI_TEST_FILENAME, set_genre='clinical', limit=limit)\n\n # Drop columns that are presented not in all datasets\n columns_to_drop = ['captionID', 'promptID', 'annotator_labels']\n for d in [data_snli_dev, data_snli_train, data_multinli_train, data_multinli_dev, data_mli_train, data_mli_dev,\n data_mli_test]:\n if d is not None:\n d.drop(columns_to_drop, axis=1, inplace=True, errors='ignore')\n\n # concatenate all data together\n data_train = pd.concat([data_snli_train, data_multinli_train, data_mli_train], axis=0)\n data_dev = pd.concat([data_snli_dev, data_multinli_dev, data_mli_dev], axis=0)\n\n data_train.set_index('genre', inplace=True)\n data_dev.set_index('genre', inplace=True)\n\n if data_mli_test is not None:\n data_mli_test.set_index('genre', inplace=True)\n\n if not load_test:\n return data_dev, data_train\n else:\n return data_dev, data_train, data_mli_test", "def get_dataloaders(folder: str, dimensions: int, batch_size: int, s: int,\n num_workers: int):\n image_data = get_data(folder, dimensions)\n for train_inds, val_inds in cv_index_partitions(len(image_data), s):\n train_dataloader = DataLoader(\n Subset(image_data, train_inds),\n batch_size=batch_size,\n shuffle=True,\n num_workers=num_workers\n )\n val_dataloader = DataLoader(\n Subset(image_data, val_inds),\n batch_size=batch_size,\n num_workers=num_workers\n )\n yield train_dataloader, val_dataloader", "def load_all(self, root_dir, file_list=None, pattern=None):\n # each file name corresponds to another date. Also tools (A, B) and others.\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(WeldData.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(WeldData.load_single(path) for path in input_paths)\n\n return all_df", "def load_data(dataset, root, batch_size, workers):\n # Data transform\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n query_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n\n # Construct data loader\n index = dataset.index(\"IF\")\n sub = dataset[index:]\n if sub == 'IF100':\n train_dir = os.path.join(root, 'train-IF100')\n elif sub == 'IF50':\n train_dir = os.path.join(root, 'train-IF50')\n elif sub == 'IF20':\n train_dir = os.path.join(root, 'train-IF20')\n elif sub == 'IF10':\n train_dir = os.path.join(root, 'train-IF10')\n elif sub == 'IF1':\n train_dir = os.path.join(root, 'train-IF1')\n else:\n print('train path error')\n return\n # train_dir = os.path.join(root, 'train')\n query_dir = os.path.join(root, 'query')\n database_dir = os.path.join(root, 'database')\n\n train_dataset = ImagenetDataset(\n train_dir,\n transform=train_transform,\n targets_transform=Onehot(100),\n )\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n pin_memory=True,\n )\n\n query_dataset = ImagenetDataset(\n query_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n database_dataset = ImagenetDataset(\n database_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n database_dataloader = DataLoader(\n database_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n return train_dataloader, query_dataloader, database_dataloader", "def fetch_dataset(data_root_dir):\n pattern = \"winemag_dataset_*.csv\"\n\n file_list = glob.glob(os.path.join(data_root_dir, pattern))\n\n df_list = [pd.read_csv(fname) for fname in file_list]\n\n full_df = pd.concat(df_list)\n\n # give unique row names to all\n full_df.index = range(full_df.shape[0])\n\n print(\"Dataset fetched.\")\n return full_df", "def make_dataloaders(params):\r\n transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465],\r\n [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor()])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root=params['path'], train=True, transform=transform_train)\r\n testset = torchvision.datasets.CIFAR10(root=params['path'], train=False, transform=transform_validation)\r\n\r\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, num_workers=4)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=params['batch_size'], shuffle=False, num_workers=4)\r\n return trainloader, testloader", "def get_dataloaders(datasets, split, args, is_eval=False):\n dataloaders = []\n for task, dataset in datasets.items():\n if is_eval:\n num_rows = dataset.num_rows if args.eval_rows == -1 else args.eval_rows\n else:\n num_rows = dataset.num_rows if args.train_rows == -1 else args.train_rows\n all_input_ids = np.zeros([num_rows, args.max_length])\n all_attention_mask = np.zeros([num_rows, args.max_length])\n all_token_type_ids = np.zeros([num_rows, args.max_length])\n for i in range(num_rows):\n features = dataset[i]\n curr_len = len(features[\"attention_mask\"])\n all_input_ids[i,:curr_len] = features[\"input_ids\"]\n all_attention_mask[i,:curr_len] = features[\"attention_mask\"]\n all_token_type_ids[i,:curr_len] = features[\"token_type_ids\"]\n all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)\n all_attention_mask = torch.tensor(all_attention_mask, dtype=torch.long)\n all_token_type_ids = torch.tensor(all_token_type_ids, dtype=torch.long)\n all_label = torch.tensor(dataset[:num_rows][\"label\"], dtype=torch.long)\n if task == \"stsb\":\n all_label = all_label.float()\n \n data = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label)\n if split in [\"train\", \"support\"]:\n sampler = RandomSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.train_batch_size)\n else:\n sampler = SequentialSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.eval_batch_size)\n dataloaders.append(dataloader)\n return dataloaders", "def prepare_multisubjectdataset(args, load_training=True, load_validation=True,\n load_testing=True,\n log_level=logging.root.level):\n with Timer(\"\\nPreparing datasets\", newline=True, color='blue'):\n dataset = MultiSubjectDataset(\n args.hdf5_file, lazy=args.lazy, cache_size=args.cache_size,\n log_level=log_level)\n dataset.load_data(load_training, load_validation, load_testing)\n\n logging.info(\"Number of subjects loaded: \\n\"\n \" Training: {}\\n\"\n \" Validation: {}\\n\"\n \" Testing: {}\"\n .format(dataset.training_set.nb_subjects,\n dataset.validation_set.nb_subjects,\n dataset.testing_set.nb_subjects))\n\n return dataset", "def load_dataset(data_dir, img_size):\n global input_set\n global test_set\n\n imgs = []\n img_files = os.listdir(data_dir)\n for img in img_files:\n # try:\n tmp = scipy.misc.imread(data_dir + \"/\" + img)\n x, y, z = tmp.shape # shape : width * length * chanel\n coords_x = int(x / img_size) # 坐标\n coords_y = int(y / img_size) #\n coords = [(q, r) for q in range(coords_x) for r in range(coords_y)] # 列表 x * y\n for coord in coords:\n imgs.append((data_dir + \"/\" + img, coord)) # 为列表添加文件目录\n # except BaseException:\n # print(\"oops\")\n test_size = min(10, int(len(imgs) * 0.2))\n random.shuffle(imgs)\n test_set = imgs[:test_size]\n train_set_X = imgs[test_size:][:200]\n train_set = imgs[test_size:][200:400]\n return", "def read_datasets(itype, att, nfiles=16):\n # Output array.\n data = []\n # Loop over each file and extract the data.\n for i in range(nfiles):\n f = h5.File(fname+'.%i.hdf5'%i, 'r')\n tmp = f['PartType%i/%s'%(itype, att)][...]\n data.append(tmp)\n\n f.close()\n # Combine to a single array.\n if len(tmp.shape) > 1:\n data = np.vstack(data)\n else:\n data = np.concatenate(data)\n return data", "def LoadBatch(filename):", "def download_all(): #@save\n for name in DATA_HUB:\n download(name)", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def loadImages(start, stop, csvFile):\n dataset = []\n\n for i in tqdm(range(start, stop)):\n # print(DATASET_PATH + \"/\" + csvLabels.loc[i][\"image_id\"])\n # print(csvFile.loc[i][\"image_id\"])\n img = image.load_img(DATASET_PATH + \"/\" + csvFile.loc[i][\"image_id\"], target_size=IMAGE_DIMS)\n img = image.img_to_array(img)\n img = img / 255\n dataset.append(img)\n\n return dataset", "def get_data_iters(params):\n\n # Loading Data\n train_transform, valid_transform = data_transforms_cifar10(cutout=params[\"cutout\"], cutout_length=16)\n dataset_train = datasets.CIFAR10(\"./datasets\", train=True, download=True, transform=train_transform)\n dataset_valid = datasets.CIFAR10(\"./datasets\", train=True, download=True, transform=valid_transform)\n\n # training set contains 40,000 images, validation and test set contain 10,000 images\n dataset_valid = Subset(dataset_valid, range(4 * len(dataset_train) // 5, len(dataset_train)))\n dataset_train = Subset(dataset_train, range(4 * len(dataset_train) // 5))\n\n # building cyclic iterators over the training and validation sets\n loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=params[\"batch_size\"], shuffle=True)\n loader_valid = torch.utils.data.DataLoader(dataset_valid, batch_size=params[\"batch_size\"], shuffle=True)\n\n print(\"Length of datasets: Train: {}, Valid: {}\".format(len(dataset_train), len(dataset_valid)))\n print(\"Length of loaders: Train: {}, Valid: {}\".format(len(loader_train), len(loader_valid)))\n\n return loader_train, loader_valid", "def import_func(path_):\n\n datasets_dic = {}\n\n for dataset_path in path_:\n # Parse labels from filenames\n dataset_label = os.path.split(dataset_path)[1].split('.')[0]\n\n # Read from csv to Pandas\n dataset = pd.read_csv(dataset_path)\n\n # insert dataset label to the dataframes\n dataset.insert(0, 'trial', dataset_label)\n dataset.insert(0, 'maneuver', dataset_label.split('_')[0])\n\n # Datasets are stored in a dictionary\n datasets_dic.update({dataset_label: dataset})\n\n # list of imported maneuvers\n dataset_names = list(datasets_dic.keys())\n\n return datasets_dic, dataset_names", "def fetch_dataloaders(types,n_cpu,folder_name):\n shot_size=2\n #how many images in a shot\n\n test_suffix='_test'\n val_suffix='_val'\n\n dataloaders = {}\n for split in ['train', 'test', 'val']:\n if split in types:\n # use the train_transformer if training data,\n # else use eval_transformer without random flip\n if split == 'train':\n dl = DataLoader(\n RandomShotDataset(work_folder+'/'+folder_name+'/LR',\n work_folder+'/'+folder_name+'/HR',shot_size),\n batch_size=shot_size, # full-batch in episode\n shuffle=True,num_workers=n_cpu,pin_memory=True)\n elif split == 'test':\n dl = DataLoader(\n RandomShotDataset(work_folder+'/'+folder_name+test_suffix+'/LR',\n work_folder+'/'+folder_name+test_suffix+'/HR',shot_size),\n batch_size=shot_size, # full-batch in episode\n shuffle=False,num_workers=n_cpu,pin_memory=True)\n elif split == 'val':\n dl = DataLoader(\n RandomShotDataset(work_folder+'/'+folder_name+val_suffix+'/LR',\n work_folder+'/'+folder_name+val_suffix+'/HR',shot_size),\n batch_size=shot_size, # full-batch in episode\n shuffle=False,num_workers=n_cpu,pin_memory=True)\n else:\n raise NotImplementedError()\n dataloaders[split] = dl\n\n return dataloaders", "def process_datasets(size, counts):\n global FUNCTION_LOGS\n FUNCTION_LOGS.append((f\"-----> Processing size {size}\", counts))\n # process small data sets\n counts = import_data('data',\n f'products_{size}.csv',\n f'customers_{size}.csv',\n f'rentals_{size}.csv')\n logging.info('Imported %d products, %d customers, and %d rentals', *counts)\n\n show_available_products()\n show_rentals('prd0000')\n\n drop_data()", "def get_datasets(config: ModelSettings, df: pd.DataFrame):\n train_filenames = df.loc[df.train_data == 1, \"filename\"].values\n val_filenames = df.loc[df.val_data == 1, \"filename\"].values\n test_filenames = df.loc[df.test_data == 1, \"filename\"].values\n\n train_zspacings = df.loc[df.train_data == 1, \"pixel_spacingz\"].values\n val_zspacings = df.loc[df.val_data == 1, \"pixel_spacingz\"].values\n test_zspacings = df.loc[df.test_data == 1, \"pixel_spacingz\"].values\n\n train_dataset = BPRDataset(\n data_path=config.data_path,\n filenames=train_filenames,\n z_spacings=train_zspacings,\n landmark_path=config.landmark_path,\n landmark_sheet_name=\"landmarks-train\",\n random_seed=config.random_seed,\n custom_transform=config.custom_transform,\n albumentation_transform=config.albumentation_transform,\n equidistance_range=config.equidistance_range,\n num_slices=config.num_slices,\n )\n\n val_dataset = BPRDataset(\n data_path=config.data_path,\n filenames=val_filenames,\n z_spacings=val_zspacings,\n landmark_path=config.landmark_path,\n landmark_sheet_name=\"landmarks-val\",\n random_seed=config.random_seed,\n custom_transform=config.custom_transform,\n albumentation_transform=config.albumentation_transform,\n equidistance_range=config.equidistance_range,\n num_slices=config.num_slices,\n )\n\n test_dataset = BPRDataset(\n data_path=config.data_path,\n filenames=test_filenames,\n z_spacings=test_zspacings,\n landmark_path=config.landmark_path,\n landmark_sheet_name=\"landmarks-test\",\n random_seed=config.random_seed,\n custom_transform=config.custom_transform,\n albumentation_transform=config.albumentation_transform,\n equidistance_range=config.equidistance_range,\n num_slices=config.num_slices,\n )\n\n return train_dataset, val_dataset, test_dataset", "def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])", "def package_datasets(ds_all, dirname=''):\n ds_all = copy.deepcopy(ds_all)\n assert dirname != '', \"dirname required\"\n package_dataset(ds_all['ds_train_um'], dirname=join('.', dirname, 'train'))\n package_dataset(ds_all['ds_valid_um'], dirname=join('.', dirname, 'valid'))\n package_dataset(ds_all['ds_test_um'], dirname=join('.', dirname, 'test'))", "def dataload():\n\t\n\tglobal A, B, fnA, fnB, lPcnA, lPcnB\n\t\n\tdwd = os.getcwd() # Data WD\n\t\t\n\t# First sample A is loaded. This is the \"calibrating\" sample.\n\t# In this case it is the OGLE III LMC small amplitude RGs.\n\t\n\tfnA = '/LMC-CalSample-cleaned_2.fits'\n\tA = Table.read(dwd+fnA)\n\n\t# Then sample B is loaded. For comparison/testing purposes, this is\n\t# again the OGLE III LMC SARGs.\n\t\n\tfnB = '/LMC-CalSample-cleaned_2.fits'\n\tB = Table.read(dwd+fnB)\n\t\n\t\"\"\" Fix tables so only the stars with all three good periods are \n\tconsidered. \"\"\"\n\t\n\tlPcnA = get_logPcn(A)\n\tlPcnB = get_logPcn(B)\n\t\n\tfor cn in lPcnA:\n\t\tA = A[A[cn]>0]\n\tfor cn in lPcnB:\n\t\tB = B[B[cn]>0]", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def get_loaders(opt):\n train_samples, val_samples = get_train_val_metadata(opt.dataset_dir,\n opt.validation_cities,\n opt.patch_size,\n opt.stride)\n print('train samples : ', len(train_samples))\n print('val samples : ', len(val_samples))\n\n logging.info('STARTING Dataset Creation')\n\n full_load = full_onera_loader(opt.dataset_dir, opt)\n\n train_dataset = OneraPreloader(opt.dataset_dir,\n train_samples,\n full_load,\n opt.patch_size,\n opt.augmentation)\n val_dataset = OneraPreloader(opt.dataset_dir,\n val_samples,\n full_load,\n opt.patch_size,\n False)\n\n logging.info('STARTING Dataloading')\n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.num_workers)\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.num_workers)\n return train_loader, val_loader", "def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n paths = self.args.data.split(':')\n assert len(paths) > 0\n data_path = paths[(epoch - 1) % len(paths)]\n\n # infer langcode\n \n lg_datasets = []\n for lg in self.gt_langs:\n src, tgt = lg, lg \n bos_id = self.tgt_dict.index('[{}]'.format(lg))\n data_path_lg = os.path.join(data_path, lg)\n dataset = load_generation_pair_dataset(\n data_path_lg, split, tgt, self.src_dict, self.tgt_dict,\n combine=combine, dataset_impl=self.args.dataset_impl,\n upsample_primary=self.args.upsample_primary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n max_source_positions=getattr(self.args, 'max_source_positions', 1024),\n max_target_positions=getattr(self.args, 'max_target_positions', 1024),\n load_alignments=self.args.load_alignments,\n prepend_bos=getattr(self.args, 'preprend_bos', False),\n append_source_id=True,\n common_eos=self.args.common_eos,\n lg_id=bos_id\n )\n lg_datasets.append(dataset)\n \n dataset_lengths = np.array([len(d) for d in lg_datasets], dtype=float) \n\n sample_probs = self._get_sample_prob(dataset_lengths)\n logger.info(\"| Sample probability by language: \", {\n lang: \"{0:.4f}\".format(sample_probs[id])\n for id, lang in enumerate(self.gt_langs)\n }\n )\n size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths\n logger.info(\"| Up/Down Sampling ratio by language: \", {\n lang: \"{0:.2f}\".format(size_ratio[id])\n for id, lang in enumerate(self.gt_langs)\n }\n )\n if split == getattr(self.args, \"train_subset\", \"train\"):\n resampled_lang_datasets = [\n ResamplingDataset(\n lg_datasets[i],\n size_ratio=size_ratio[i],\n seed=self.args.seed,\n epoch=epoch,\n replace=size_ratio[i] >= 1.0,\n )\n for i, d in enumerate(lg_datasets)\n ]\n dataset = ConcatDataset(\n resampled_lang_datasets,\n )\n else:\n dataset = ConcatDataset(lg_datasets)\n lang_splits = [split]\n for lang_id, lang_dataset in enumerate(lg_datasets):\n split_name = split + '_' + self.gt_langs[lang_id]\n lang_splits.append(split_name)\n self.datasets[split_name] = lang_dataset\n \n if hasattr(self.args, \"valid_subset\"):\n if split in self.args.valid_subset:\n self.args.valid_subset = self.args.valid_subset.replace(\n split, ','.join(lang_splits)\n )\n\n with data_utils.numpy_seed(self.args.seed + epoch):\n shuffle = np.random.permutation(len(dataset))\n self.datasets[split] = SortDataset(\n dataset,\n sort_order=[\n shuffle,\n dataset.sizes,\n ],\n )", "def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets" ]
[ "0.68618584", "0.6650741", "0.6626212", "0.66049284", "0.65869296", "0.6572509", "0.6537458", "0.65346485", "0.65204453", "0.65077704", "0.65056384", "0.64877063", "0.64608717", "0.644231", "0.64391696", "0.6436018", "0.64284915", "0.6427725", "0.64143807", "0.63900393", "0.6386112", "0.6377638", "0.6368329", "0.63593245", "0.6356833", "0.63539135", "0.6348181", "0.6338754", "0.63314456", "0.63094896", "0.6261167", "0.625865", "0.62568825", "0.6250395", "0.6231192", "0.62269497", "0.62264293", "0.62246305", "0.6220626", "0.6220102", "0.62145144", "0.62069935", "0.6203575", "0.6198492", "0.6195892", "0.6195669", "0.61903197", "0.61903197", "0.61894184", "0.6188858", "0.618681", "0.6182777", "0.6178185", "0.6175636", "0.6173242", "0.6166592", "0.6164855", "0.6161542", "0.615802", "0.6156818", "0.61393833", "0.6128466", "0.6121269", "0.6112184", "0.6104087", "0.6092058", "0.6090282", "0.6071736", "0.6071671", "0.6071118", "0.60701025", "0.606487", "0.6063259", "0.6060074", "0.6054554", "0.6047024", "0.60444534", "0.60434514", "0.6037014", "0.60289896", "0.6027615", "0.60153496", "0.6015323", "0.6012258", "0.6010527", "0.6010505", "0.5997866", "0.5996176", "0.59872264", "0.5987056", "0.59843385", "0.59797573", "0.59796566", "0.59789217", "0.5977028", "0.5975986", "0.5970635", "0.5966761", "0.5965978", "0.59646165", "0.59643126" ]
0.0
-1
Load a single CSV file into a DataFrame
def load_handler( endpoint: str, path: str, columns: list, types: Union[dict, None], parse_dates: list, coerce_dates: bool = False, ) -> pd.DataFrame: try: # Read CSV file from Meteostat endpoint df = pd.read_csv( endpoint + path, compression="gzip", names=columns, dtype=types, parse_dates=parse_dates, ) # Force datetime conversion if coerce_dates: df.iloc[:, parse_dates] = df.iloc[:, parse_dates].apply( pd.to_datetime, errors="coerce" ) except (FileNotFoundError, HTTPError): # Create empty DataFrane df = pd.DataFrame(columns=[*types]) # Display warning warn(f"Cannot load {path} from {endpoint}") # Return DataFrame return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)", "def _load_csv_into_df(csv_file: Any, csv_name: str) -> pd.DataFrame:\n try:\n df = pd.read_csv(csv_file, sep=\"|\", header=0, dtype=str, encoding=\"UTF-8\")\n except ValueError as e:\n print(f\"ERROR! Could not read the file {csv_name}: {e}\")\n raise\n return df", "def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df", "def read_data_from_csv(filename):\n df = pd.read_csv(filename)\n return df", "def read_csv(csv_path):\n \n df = pd.read_csv(csv_path)\n\n return df", "def load_dataset_from(csv_file: str) -> pd.DataFrame:\n\n print(\">>> LOADING DATASET FROM FILE {filename}\".format(filename=csv_file))\n if not csv_file.endswith(\".csv\"):\n print(\"File has to be CSV type file!\")\n exit(1)\n\n try:\n data = pd.read_csv(csv_file)\n print(\">>> Finished loading data!\")\n return data\n except FileNotFoundError:\n print(\"File couldn't be found. Verify if '{f_path}' is a correct file path!\".format(f_path=csv_file))\n exit(1)", "def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data", "def csv_to_df(self, path=None):\n # reads the csv file and puts it to the dataframe\n df = pd.read_csv(path)\n return df", "def read_data_from_csv(filename: str) -> pd.DataFrame:\n try:\n data = pd.read_csv(filename)\n return data\n except(FileNotFoundError):\n print('Error: Could not read the data from csv.')\n return None", "def load_from_csv(path, delimiter=','):\n return pd.read_csv(path,encoding = \"ISO-8859-1\",dtype=object)", "def load_csv():\n df = pd.read_csv(datafolder+filename, decimal=decimal).astype(\n {'min': 'float', 'max': 'float'})\n return df", "def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')", "def read_from_csv(path):\n if not os.path.exists(path):\n return None\n if not path.endswith('.csv'):\n return None\n\n with open(path, 'r') as file:\n data = pd.read_csv(file, header=0)\n\n return data", "def read_csv(path):\n return pd.read_csv(path)", "def create_dataframe_from_csv(path_to_csv_file):\r\n df = pd.read_csv(path_to_csv_file)\r\n return df", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def _get_df_from_csv(self, filename):\n df = pd.read_csv(filename)\n df.set_index('Date', drop=True, inplace=True)\n df.index = pd.to_datetime(df.index)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def load() -> DataFrame:\n return load_file(__file__, \"default.csv.gz\")", "def loadData(path):\n try:\n return pd.read_csv(path)\n except Exception as e:\n raise Exception(\"Could not read df, possbily incorrect path: {}\".format(e))", "def read_csv(\n csv_file: str,\n header: Optional[Union[str, int, Sequence]] = 'infer',\n names: Optional[Sequence] = None) -> pd.DataFrame:\n\n if header is None and not names:\n names = constants.IMAGE_CSV_COLUMNS\n\n with tf.io.gfile.GFile(csv_file) as f:\n return pd.read_csv(f, names=names, header=header)", "def load_data(file_path):\n data = pandas.read_csv(file_path)\n\n return data", "def importData(filename):\r\n data = pd.read_csv(filename)\r\n return data", "def read_csv_ur10(self, csv_file):\r\n df = pd.read_csv(csv_file, sep=';', decimal=',', header=0)\r\n return df", "def initialize_from_file(filename):\r\n df = pd.read_csv(filename)\r\n return df", "def _load_stored_csv(path: Union[Path, str]) -> Union[pd.DataFrame, pd.Series]:\n data = pd.read_csv(path, index_col=0, parse_dates=[0]).round(12)\n data.index = data.index.tz_convert(REFERENCE_TZ)\n return data", "def from_csv(cls, filename, pulse_number=None):\n df = pd.read_csv(filename)\n return cls._sort_and_filter_dataframe(df, pulse_number)", "def read_csv_to_dataframe(file_name):\n df = pd.read_csv(file_name)\n df = df.drop(['Unnamed: 0'], axis=1)\n return df", "def read_csv_file_data(file_path):\n if os.path.exists(file_path):\n df = pandas.read_csv(file_path)\n else:\n raise ValueError('ERROR: file_path doesnt exist in read_csv_file_data()')\n return df", "def test_from_file_csv(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.csv')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def load_csv_file(file_name):\n return pandas.read_csv(path_dataset + file_name)", "def load_data(path, file, verbose=False, index=0):\n \n df = pd.read_csv(path+file, index_col=index)\n \n if verbose:\n shape = f'{df.shape}'\n dtypes = f'{df.dtypes[:30]}'\n head = f'{df.head()[:10]}'\n name = file.split('.')[0]\n \n print(f'{name} shape'.center(80, '-'))\n print(shape.center(80))\n print(f\"{name}'s column types\".center(80, '-'))\n print(dtypes)\n print(f\"{name} first five rows\".center(80, '-'))\n print(head)\n \n return df", "def read_traffic_sensor_from_csv(path: str) -> pd.DataFrame:\n\n df = pd.read_csv(path)\n df[\"measuredTime\"] = pd.to_datetime(df[\"measuredTime\"])\n df.set_index(\"measuredTime\", inplace=True)\n return df", "def load_csv(csv_path):\n\n try:\n # Tries to read .csv file into a dataframe\n csv = pd.read_csv(csv_path, header=None)\n\n except FileNotFoundError as e:\n # If file is not found, handle the exception and exit\n logger.error(e)\n raise\n\n return csv", "def load(self, path):\n self.df = pd.read_csv(path)\n print(\"Loaded data from {}\".format(path))", "def load(self) -> pd.DataFrame:\n if os.path.exists(self.file_name):\n df = pd.read_csv(self.file_name, index_col=0)\n df = self._clean(df)\n else:\n _LOG.debug(\"No file '%s'\", self.file_name)\n df = pd.DataFrame()\n return df", "def dataset_from_csv(self, filename, time_column='point_in_time'):\n return pd.from_csv(filename, parse_dates=[time_column])", "def get_dataset(filepath):\n return pandas.read_csv(filepath, header='infer')", "def load_csv(*, path, filename, sep=\"\\t\", verbose=True):\n \n os.chdir(path)\n if len(glob.glob(filename))==1: \n df = pd.read_csv(filename, sep=sep, low_memory=False)\n \n # display example,\n if verbose==True:\n display(df.head(3))\n print(df.shape)\n else:\n pass\n \n # return,\n return df\n \n else:\n if verbose==True:\n print(f\"\"\"ERROR :csv file {filename}, was not found in: \\n {path}\"\"\")\n else:\n pass", "def load_label(path_file):\n if '.csv' not in path_file:\n raise FileNotFoundError('Only CSV format is supported currently')\n\n t0 = time()\n df = pd.DataFrame()\n\n with open(path_file, 'r') as f:\n # TODO: Implement the logic once the format is finalised\n pass\n\n logging.info('Loading label data with {} rows from {} takes {} secs'.format(df.shape[0],\n path_file, time() - t0))\n return df", "def load_data(filepath):\n\n file_path_casted = Path(filepath)\n if not file_path_casted.exists():\n raise FileNotFoundError(\"File does not exist.\")\n\n data = pd.read_csv(filepath, delimiter=\",\")\n\n return data", "def read_csv(file, header=None, sep=','):\n\n if header is not None and header:\n header = 0 # first row is header\n\n data_df = DataFrame.from_csv(file, header=header, sep=sep, index_col=None)\n\n #datamat = np.ndarray(shape=data_df.shape, dtype=float)\n #datamat[:, :] = data_df.iloc[:, 0:data_df.shape[1]]\n\n return data_df", "def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def get_data_from_csv_full_path(filepath, datatypes, date_column_list):\n\n dataframe = pandas.read_csv(filepath, dtype=datatypes, date_parser=pandas.to_datetime, parse_dates=date_column_list)\n\n return dataframe", "def get_data(path):\n df = pd.read_csv(path)\n\n return df", "def _csv_to_df(csv_path, headers):\n\n # Assume all columns are strings\n columns_types = {i: str for i, header in enumerate(headers)}\n\n temp_df = pd.read_csv(csv_path, converters=columns_types, skip_blank_lines=False)\n # TODO: check that there are only two columns of type string, then convert to our format\n temp_df.columns = headers\n # Add the column split, this is all training data\n temp_df['annotation_unit_id'] = None\n return temp_df", "def loadData(path_file):\n data = pd.read_csv(path_file) \n data.head()\n return data", "def read_csv():\n csv_file = \"dow.csv\"\n\n # read the data from the csv file, parsing the Dates to make the x-axis, setting index_col to zero to remove it\n data_frame = pd.read_csv(csv_file, parse_dates=True, index_col=0)\n return data_frame", "def load_csv(path: str, ncols: int, nonames: bool) -> DataFrame:\n cols = range(ncols) if ncols else None\n return pandas.read_csv(path, usecols=cols, skipinitialspace=True, header='infer' if not nonames else None)", "def load_to_dataframe(self) -> DataFrame:\n return read_csv(self._csv_path, converters={\n # Check if embedding size is the empty string,\n # as it would be for Count models\n \"Embedding size\": lambda v: int(float(v)) if len(v) > 0 else nan\n })", "def read_csv(self, csv_file):\n mylog.debug('Reading csv file %s for data' % csv_file)\n csv_data = pandas.read_csv(csv_file)\n mylog.debug('Read of csv file complete.')\n #mylog.debug('%s' % csv_data)\n #sometimes the csv has an empty dataframe #\n if csv_data.empty:\n mylog.debug('Data frame is empty; repopuating data')\n csv_info = []\n for item in csv_data:\n #add the data one cell at a time to the list #\n #for some reason, some csvs have the data #\n #with random decimal points #\n csv_info.append(item.split(\".\")[0])\n df = pandas.DataFrame(columns=csv_info)\n df.loc[0]=csv_info\n #write the data from the list back into the cells#\n #one at a time #\n for column in range(0, len(csv_info)): \n df.iloc[0,column] = csv_info[column]\n csv_data = df \n return csv_data", "def read_data_csv(path):\n df = pd.read_csv(path)\n df.drop([\"Unnamed: 0\"], axis=1, inplace=True)\n return df", "def csv_to_dataframe(file_name: str) -> pd.DataFrame:\n\n # check if csv contains timestamps,\n # find where the data starts if so, raise error otherwise\n has_header, header_lines, columns_list = csv_has_timestamps(file_name, 50)\n\n if not has_header:\n sys.stdout.write(f\"{file_name} does not appear to have timestamps\\n\")\n raise TypeError\n\n file = open(file_name, \"r\")\n\n # based on return from __csv_process_header(), move the\n # file pointer forward until we find the start of the data\n for i in range(header_lines):\n file.readline()\n\n # finally, call the pandas library function\n data_frame = pd.read_csv(file,\n header=None,\n names=columns_list,\n index_col=None,\n parse_dates=True)\n\n # convert the relevant rows to timestamps and floats\n data_frame = __convert_timestamps(data_frame)\n\n file.close()\n return data_frame", "def _csv_engine(filename, node):\n sep = node.get(\"sep\", \",\")\n header = node.get(\"header\", 0)\n logger.debug(\n \"Parsing CSV '{}'. sep={}, header={}.\".format(filename, sep, header)\n )\n index = node.get(\"index\")\n encoding = node.get(\"encoding\")\n if not index:\n raise InvalidConfig(\"An 'index' column is required. It should \"\n \"be the sample id column.\")\n\n df = pd.read_csv(filename, sep=sep, header=header, encoding=encoding)\n df.set_index(index, verify_integrity=True, inplace=True, drop=True)\n df.index = df.index.astype(str)\n\n return df", "def read_csv(self) -> None:\n\n self._df = pd.read_csv(self._dataset_file)", "def load(self, path, separator=\",\", header_type=\"infer\", nrows=None,\n skiprows=None, usecols=None):\n\n return pd.read_csv(path, header=header_type, sep=separator,\n usecols=usecols, nrows=nrows, skiprows=skiprows)", "def _read_csv(filename):\n loaded = pd.read_csv(filename, index_col=0)\n if len(loaded.columns) == 1:\n return pd.read_csv(filename, index_col=0, header=None)[1]\n else:\n return loaded", "def read_csv(self, filepath):\n try:\n self.df = pd.read_csv(filepath)\n return self\n except FileNotFoundError as e:\n raise OperationError(f\"File not found - {filepath}\") from e\n except ParserError as e:\n raise OperationError(f\"Fails to parse file - {e}\") from e", "def df_read(path: pathlib.Path, **kwargs) -> pd.DataFrame:\n # Always specify the datatype so pandas does not have to infer it--much\n # faster.\n return pd.read_csv(path, sep=';', float_precision='high', **kwargs)", "def _csv2df(data_file):\n df = pd.read_csv(data_file, encoding=\"ISO-8859-1\", low_memory=False)\n return df", "def from_csv(cls, name, csv, **kwargs):\r\n data = pd.read_csv(csv, **kwargs)\r\n return Dataset(name, data, **kwargs)", "def get_df_from_csv(input_file_name):\n df = pd.read_csv(input_file_name)\n df = df.drop(['Unnamed: 0'], axis=1)\n return df", "def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)", "def load_data(input_file):\n print('loading file:', input_file)\n df = pd.DataFrame()\n show_progress = make_show_progress()\n chunk_iterator = pd.read_csv(input_file,\n compression='gzip',\n chunksize=100_000,\n index_col=0,\n usecols=cols_to_use,\n dtype=data_types,\n parse_dates=dates_to_parse,\n infer_datetime_format=True\n )\n for chunk in chunk_iterator:\n df = pd.concat([df, chunk])\n show_progress(len(chunk))\n return df", "def ImportCSV(Filepath):\n try:\n PandaFrame = pd.read_csv(Filepath)\n except Exception as E:\n raise E\n SizeOfPandaFrame = PandaFrame.size\n KeysPandaFrame = PandaFrame.keys()\n Data = [PandaFrame[key] for keys in KeysPandaFrame]\n return Data", "def loadCSV(input_file):", "def parse_csv_file(self, csv_file: str):\n try:\n df = pd.read_csv(csv_file)\n\n if not set(['Question', 'Answer']).issubset(df.columns):\n raise BadCSVFile(\n \"CSV file does not contain ['Question', 'Answer'] columns.\")\n\n df.dropna(inplace=True)\n\n except Exception as e:\n raise BadCSVFile(\n \"Error while reading the csv file. Please check the path of the file or the file might be curropted.\")\n\n return df", "def read_load_data_from_csv(csv_path):\n # Load the original DataFrame, use easier-to-read column names, and drop unnecessary column\n original_df = pd.read_csv(csv_path).rename(columns={\"OperDay\" : \"Date\"}).drop([\"TOTAL\", \"DSTFlag\"],axis=1)\n\n original_df.name = csv_path.split(\"_\")[1]\n\n # Combine the originally separate date and hour columns into a single DateTime column\n return combine_date_and_hour_columns(original_df)", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)", "def from_file(filename, delimiter):\n df = pd.read_csv(filename, delimiter=delimiter)\n return df", "def open_csv(filename=\"NOTEEVENTS.csv\", index=['SUBJECT_ID', 'HADM_ID']):\n df = pd.read_csv(DATA_DIR / filename,\n index_col=index,\n # nrows=1000,\n infer_datetime_format=True)\n logger.info(f\"opening {filename}\")\n logger.info(f\"Dataframe columns: {df.columns}\")\n # logger.info(f\"Clinical note types: {df['CATEGORY'].unique()}\")\n return df", "def from_file(filename, delimiter):\n return pd.read_csv(filename, sep=delimiter)", "def test_read_csv_to_dataframe(fname):\n df = read_csv_to_dataframe(fname)\n print(df.head())", "def load_data(path):\n try:\n data = pd.read_csv(path, sep='\\t')\n except FileNotFoundError:\n logger.exception(\"Traceback of data file '{}' not found.\".format(path))\n else:\n return data", "def get_data(filename):\r\n return pd.read_csv(filename)", "def get_data_from_csv(filepath, filename, datatypes, date_column_list):\n\n concatenated_file = os.path.join(filepath, filename)\n\n dataframe = get_data_from_csv_full_path(concatenated_file, datatypes, date_column_list)\n\n return dataframe", "def extract(filepath: str) -> pd.DataFrame:\n try:\n return pd.read_csv(filepath)\n except Exception as e:\n raise InvalidSourceFile(f\"Can not read file {filepath}: {str(e)}\")", "def load_csv(self):\n self.database = pd.read_csv(\n self.settings['database_path'],\n encoding='utf-8')", "def _read_csv(filename):\n csv_file = open(filename, 'r')\n data = pandas.read_csv(csv_file, comment='!', quotechar=\"'\")\n csv_file.close()\n return data", "def load_file(fname, fpath='./', delimiter=','):\n\n dest = fpath + fname\n print(f\"Loading file {dest} ...\")\n df_file = pd.read_csv(dest, delimiter=delimiter)\n\n return df_file", "def _read_data(filename):\n logger.info('Reading file {}'.format(filename))\n return pd.read_csv(filename)", "def _read_csv(self) -> pd.DataFrame:\n\n return pd.concat(\n [\n pd.read_csv(f, usecols=[1, 2, 3, 4, 5])\n for f in self.temp_path.iterdir()\n if f.name.endswith(\".csv\")\n ]\n )", "def load_data(txt_path: str = RAW_TXT) -> pd.DataFrame:\n df = pd.read_csv(txt_path)[INDICES]\n return df", "def gp_dataframe_import(filename):\n path = os.path.join('..', 'data', filename)\n frame = pd.read_csv(path)\n return frame", "def import_data():\n\tif os.path.exists(\"log.csv\"):\n\t\t#print (\"--training data imported to data frame\\n\")\n\t\tdf = pd.read_csv(\"log.csv\", index_col=0)\n\telse:\n\t\tprint(\"training CSV not found\")\n\t\texit()\n\t\n\treturn df", "def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data", "def get_raw_data_from_csv():\n data_df = pd.read_csv(static_constants.RAW_DATA_PATH)\n return data_df", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def reader(self):\n df = pd.read_csv(self.path)\n return df", "def read_csv():", "def load_dataframe(csv_file, column):\n iter_csv = pd.read_csv(csv_file, iterator=True, chunksize=100)\n df1 = pd.concat([chunk[chunk[column] == 'MINF'] for chunk in iter_csv])\n iter_csv = pd.read_csv(csv_file, iterator=True, chunksize=100)\n df2 = pd.concat([chunk[chunk[column] == 'DCM'] for chunk in iter_csv])\n df = pd.concat([df1, df2])\n return df", "def csv_data_loader(path):\n\n logging.info(\"Loading file using SparkSession\")\n csvload = Spark.instance.spark() \\\n .read \\\n .format(\"csv\") \\\n .options(header=True) \\\n .options(mode=\"DROPMALFORMED\")\n\n return csvload.option(\"inferSchema\", \"true\").load(path)", "def read_dataframe_from_csv(filename: str, columns: dict = None,\n nr_rows: int = None, datatype=None) -> pandas.DataFrame:\n try:\n # Some inputfiles have problems reading in utf-8\n csv_data = pandas.read_csv(filename,\n sep=',',\n usecols=columns,\n dtype=datatype,\n engine='python',\n nrows=nr_rows,\n parse_dates=False,\n iterator=False,\n quotechar='\"',\n encoding='utf-8')\n except BaseException:\n print('read_dataframe_from_csv(): error reading in utf-8 format, reading in latin-1 format.')\n csv_data = pandas.read_csv(filename,\n sep=',',\n usecols=columns,\n dtype=datatype,\n engine='python',\n nrows=nr_rows,\n parse_dates=False,\n iterator=False,\n quotechar='\"',\n encoding='latin-1')\n return csv_data", "def load_log(dir_):\n df = pandas.read_csv(os.path.join(dir_, 'log.csv'),\n error_bad_lines=False,\n warn_bad_lines=True)\n if not len(df):\n print(\"empty df at {}\".format(dir_))\n return\n df['model'] = dir_\n return df", "def load_data(filepath):\n\tlogging.info(f\"Load data from {filepath}\")\n\tdf = pd.read_csv(filepath)\n\tdf = set_dtypes(df)\n\tdf = df.sort_values(by='query_date')\n\n\treturn df" ]
[ "0.82277906", "0.8012845", "0.7859813", "0.7839427", "0.7722293", "0.7609823", "0.75901395", "0.7588599", "0.75861067", "0.75579536", "0.7539103", "0.75356525", "0.74848354", "0.7478136", "0.74515945", "0.74494445", "0.7425229", "0.7384577", "0.73813176", "0.73813176", "0.73813176", "0.73240215", "0.7294538", "0.7285156", "0.7240827", "0.72200316", "0.7196671", "0.7194097", "0.7187079", "0.7179595", "0.7174033", "0.71580917", "0.7120801", "0.7116178", "0.7101056", "0.7077614", "0.70606726", "0.7052242", "0.7048504", "0.70430124", "0.70390886", "0.7036723", "0.7035889", "0.7031454", "0.70269775", "0.70096326", "0.7006316", "0.7006316", "0.6998351", "0.6994988", "0.6989779", "0.69791174", "0.6967323", "0.6965458", "0.6964548", "0.6955169", "0.6942815", "0.69427836", "0.69348186", "0.69160414", "0.6905758", "0.6889221", "0.6875336", "0.68730205", "0.6868698", "0.6853094", "0.68511856", "0.6844364", "0.6837717", "0.6831989", "0.6826408", "0.6820773", "0.68202084", "0.68084866", "0.6805801", "0.6804654", "0.67988414", "0.67964923", "0.67869776", "0.6776366", "0.67709404", "0.677085", "0.67642546", "0.67538023", "0.67451614", "0.6744679", "0.6743068", "0.67237264", "0.67157614", "0.6709801", "0.6706122", "0.67030376", "0.6695529", "0.6685834", "0.6684228", "0.6678331", "0.6662421", "0.6656831", "0.6652352", "0.6647543", "0.6646945" ]
0.0
-1
Preprocess graphs by casting into FloatTensor and setting to cuda if available
def preprocess(dataset, cuda): for g, _ in dataset: for key_g, val_g in g.ndata.items(): processed = g.ndata.pop(key_g) processed = processed.type('torch.FloatTensor') if cuda: processed = processed.cuda() g.ndata[key_g] = processed for key_g, val_g in g.edata.items(): processed = g.edata.pop(key_g) processed = processed.type('torch.FloatTensor') if cuda: processed = processed.cuda() g.edata[key_g] = processed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_cuda(network):\n network.cuda()\n\n network._to_cuda_forward_cache = network.forward\n\n def cuda_forward(x):\n return network._to_cuda_forward_cache(x.cuda(non_blocking=True))\n\n network.forward = cuda_forward", "def cuda_if_gpu(T):\n\n return T.cuda() if use_cuda else T", "def __init__(self, model_path, gpu_fraction=1.0,\n input_name = 'input_1:0',\n output_name = 'output_node0:0',\n optimize = True,\n optimizer_args = None):\n\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n\n if optimize:\n if type(output_name) == list:\n sensitive_nodes = output_name\n else:\n sensitive_nodes = [output_name]\n graph_def = optimizeGraph(graph_def,\n sensitive_nodes,\n optimizer_args)\n if type(output_name) == list:\n return_elements = [input_name, *output_name]\n tensors = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n # The first is an input\n self.input_tensor = tensors[0]\n # The rest are outputs\n self.output_tensor = tensors[1:]\n else:\n return_elements = [input_name, output_name]\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n\n self.input_shape = self.input_tensor.get_shape().as_list()", "def trace_cpu(self, graph, tensor_fetches, op_fetches=None):\n if isinstance(graph, func_graph.FuncGraph) or isinstance(\n graph, function._FuncGraph): # pylint: disable=protected-access\n logging.warning('Tensor Tracer is not supported for tracing FuncGraphs. '\n 'Ignoring tracing.')\n return tensor_fetches\n\n if graph in TensorTracer._traced_graphs:\n logging.warning('Graph is already rewritten with tensor tracer, ignoring '\n 'multiple calls.')\n return tensor_fetches\n else:\n TensorTracer._traced_graphs.add(graph)\n # Reset the parameters in case parameters are changed.\n self._parameters = tensor_tracer_flags.TTParameters()\n\n self._tt_config.device_type = _DEVICE_TYPE_CPU\n self._tt_config.num_replicas = 1\n self._tt_config.num_replicas_per_host = 1\n self._tt_config.num_hosts = 1\n self._replica_id = 0\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_before_tt.pbtxt')\n with graph.as_default():\n tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,\n on_tpu=False)\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_after_tt.pbtxt')\n return tensor_fetches", "def set_default_tensor_type(device):\r\n if device in [torch.device(\"cpu\"), \"cpu\"]:\r\n torch.set_default_tensor_type(torch.FloatTensor)\r\n else:\r\n torch.set_default_tensor_type(torch.cuda.FloatTensor)", "def _addCastOps(self, user_graph_def):\n # Load user-specified graph into memory\n user_graph = tf.Graph()\n with user_graph.as_default():\n tf.import_graph_def(user_graph_def, name=\"\")\n\n # Build a subgraph containing our injected ops\n # TODO: Cheap optimization: if all input tensors are of type float64, just do nothing here\n injected_op_subgraph = tf.Graph()\n # Maps names of input tensors in our original graph to outputs of the injected-op subgraph\n input_map = {}\n with injected_op_subgraph.as_default():\n with tf.name_scope(self.SPARKDL_OP_SCOPE):\n for _, orig_tensor_name in self.getInputMapping():\n orig_tensor = tfx.get_tensor(orig_tensor_name, user_graph)\n # Create placeholder with same shape as original input tensor, but that accepts\n # float64 input from Spark.\n spark_placeholder = tf.placeholder(tf.float64, shape=orig_tensor.shape,\n name=tfx.op_name(orig_tensor_name))\n # If the original tensor was of type float64, just pass through the Spark input\n if orig_tensor.dtype == tf.float64:\n input_map[orig_tensor_name] = spark_placeholder\n # Otherwise, cast the Spark input to the datatype of the original tensor\n else:\n input_map[orig_tensor_name] = tf.cast(spark_placeholder,\n dtype=orig_tensor.dtype)\n tf.import_graph_def(graph_def=user_graph_def, input_map=input_map, name=\"\")\n return injected_op_subgraph.as_graph_def(add_shapes=True)", "def local_gpu_lazy_ifelse(node):\r\n if isinstance(node.op, theano.ifelse.IfElse) and not node.op.gpu:\r\n gpu_ifelse = theano.ifelse.IfElse(node.op.n_outs, gpu=True)\r\n outs_clients = reduce(list.__add__,\r\n [out.clients for out in node.outputs])\r\n if any([(i.owner and isinstance(i.owner.op, HostFromGpu))\r\n for i in node.inputs]) or any(\r\n [c != 'output' and c.op == gpu_from_host for c, idx\r\n in outs_clients]):\r\n\r\n c = node.inputs[0]\r\n outs = node.inputs[1:]\r\n # Should not happen, but just in case\r\n if isinstance(c.type, CudaNdarrayType):\r\n c = host_from_gpu(c)\r\n\r\n for i in range(len(outs)):\r\n if not isinstance(outs[i], CudaNdarrayType):\r\n outs[i] = gpu_from_host(outs[i])\r\n return [host_from_gpu(out) for out in\r\n gpu_ifelse.make_node(c, *outs).outputs]\r\n\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if (host_input.owner and\r\n isinstance(host_input.owner.op, theano.ifelse.IfElse) and\r\n not host_input.owner.op.gpu and\r\n # If there is more then 1 outputs, we can't replace it\r\n # here with a local optimizer as we replace the\r\n # GpuFromHost node and the other output of the if won't be\r\n # replaced.\r\n host_input.owner.op.n_outs == 1):\r\n gpu_ifelse = theano.ifelse.IfElse(host_input.owner.op.n_outs,\r\n gpu=True)\r\n\r\n c = host_input.owner.inputs[0]\r\n outs = host_input.owner.inputs[1:]\r\n # Should not happen, but just in case\r\n if isinstance(c.type, CudaNdarrayType):\r\n c = host_from_gpu(c)\r\n\r\n for i in range(len(outs)):\r\n if not isinstance(outs[i], CudaNdarrayType):\r\n outs[i] = gpu_from_host(outs[i])\r\n\r\n outs = gpu_ifelse.make_node(c, *outs).outputs\r\n return outs\r\n\r\n return False", "def cuda(self):\n for i in self.modules:\n if torch.cuda.is_available():\n self.modules[i] = self.modules[i].cuda()", "def local_gpu_conv(node):\r\n def GpuConvOp_from_ConvOp(op):\r\n logical_img_hw = None\r\n\r\n if op.kshp_logical is not None and op.kshp_logical != op.kshp:\r\n return None\r\n #print op.kshp, op.imshp[1:3]\r\n #print op.kshp_logical, logical_img_hw\r\n ret = GpuConv(border_mode=op.out_mode,\r\n subsample=(op.dx, op.dy),\r\n logical_img_hw=logical_img_hw,\r\n logical_kern_hw=op.kshp_logical,\r\n logical_kern_align_top=op.kshp_logical_top_aligned,\r\n kshp=op.kshp,\r\n version=op.version,\r\n verbose=op.verbose,\r\n imshp=op.imshp,\r\n )\r\n if op.imshp_logical is not None:\r\n logical_img_hw = op.imshp_logical[1:3]\r\n if logical_img_hw != op.imshp[1:3]:\r\n # this case is not implemented\r\n #return None\r\n rstride = int(numpy.ceil(op.imshp_logical[1] /\r\n float(op.imshp[1])))\r\n cstride = int(numpy.ceil(op.imshp_logical[2] /\r\n float(op.imshp[2])))\r\n\r\n def make_graph(img, kern):\r\n buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),\r\n img.shape[0], *op.imshp_logical)\r\n img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],\r\n img)\r\n img = gpu_from_host(img)\r\n return ret(img, kern)\r\n\r\n return make_graph\r\n return ret\r\n\r\n def values_eq_approx(a, b):\r\n \"\"\"This fct is needed to don't have DebugMode raise useless\r\n error due to ronding error.\r\n\r\n This happen as We reduce on the two last dimensions, so this\r\n can raise the absolute error if the number of element we\r\n reduce on is significant.\r\n\r\n \"\"\"\r\n assert a.ndim == 4\r\n atol = None\r\n if a.shape[-1] * a.shape[-2] > 100:\r\n #For float32 the default atol is 1e-5\r\n atol = 3e-5\r\n return CudaNdarrayType.values_eq_approx(a, b, atol=atol)\r\n\r\n if isinstance(node.op, GpuFromHost):\r\n #gpu_from_host(conv) -> gpu_conv(gpu_from_host)\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op, conv.ConvOp):\r\n gpu_conv = GpuConvOp_from_ConvOp(host_input.owner.op)\r\n if gpu_conv is None:\r\n return\r\n img, kern = host_input.owner.inputs\r\n out = gpu_conv(gpu_from_host(img),\r\n gpu_from_host(kern))\r\n out = tensor.patternbroadcast(out,\r\n node.outputs[0].broadcastable)\r\n out.values_eq_approx = values_eq_approx\r\n # in some case the ConvOp broadcast the last 2 dimensions\r\n # differently then the gpu ConvOp\r\n return [out]\r\n\r\n if isinstance(node.op, conv.ConvOp):\r\n #conv(host_from_gpu) -> host_from_gpu(gpu_conv)\r\n img, kern = node.inputs\r\n img_on_gpu = (img.owner and isinstance(img.owner.op, HostFromGpu))\r\n kern_on_gpu = (kern.owner and isinstance(kern.owner.op, HostFromGpu))\r\n if img_on_gpu or kern_on_gpu:\r\n gpu_conv = GpuConvOp_from_ConvOp(node.op)\r\n if gpu_conv is None:\r\n return\r\n out = gpu_conv(gpu_from_host(img),\r\n gpu_from_host(kern))\r\n out = tensor.patternbroadcast(\r\n host_from_gpu(out),\r\n node.outputs[0].broadcastable)\r\n out.values_eq_approx = values_eq_approx\r\n # in some case the ConvOp broadcast the last 2 dimensions\r\n # differently then the gpu ConvOp\r\n return [out]", "def local_gpu_conv(node):\r\n def GpuConvOp_from_ConvOp(op):\r\n logical_img_hw = None\r\n\r\n if op.kshp_logical is not None and op.kshp_logical != op.kshp:\r\n return None\r\n #print op.kshp, op.imshp[1:3]\r\n #print op.kshp_logical, logical_img_hw\r\n ret = GpuConv(border_mode=op.out_mode,\r\n subsample=(op.dx, op.dy),\r\n logical_img_hw=logical_img_hw,\r\n logical_kern_hw=op.kshp_logical,\r\n logical_kern_align_top=op.kshp_logical_top_aligned,\r\n kshp=op.kshp,\r\n version=op.version,\r\n verbose=op.verbose,\r\n imshp=op.imshp,\r\n )\r\n if op.imshp_logical is not None:\r\n logical_img_hw = op.imshp_logical[1:3]\r\n if logical_img_hw != op.imshp[1:3]:\r\n # this case is not implemented\r\n #return None\r\n rstride = int(numpy.ceil(op.imshp_logical[1] /\r\n float(op.imshp[1])))\r\n cstride = int(numpy.ceil(op.imshp_logical[2] /\r\n float(op.imshp[2])))\r\n\r\n def make_graph(img, kern):\r\n buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),\r\n img.shape[0], *op.imshp_logical)\r\n img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],\r\n img)\r\n img = gpu_from_host(img)\r\n return ret(img, kern)\r\n\r\n return make_graph\r\n return ret\r\n\r\n def values_eq_approx(a, b):\r\n \"\"\"This fct is needed to don't have DebugMode raise useless\r\n error due to ronding error.\r\n\r\n This happen as We reduce on the two last dimensions, so this\r\n can raise the absolute error if the number of element we\r\n reduce on is significant.\r\n\r\n \"\"\"\r\n assert a.ndim == 4\r\n atol = None\r\n if a.shape[-1] * a.shape[-2] > 100:\r\n #For float32 the default atol is 1e-5\r\n atol = 3e-5\r\n return GpuArrayType.values_eq_approx(a, b, atol=atol)\r\n\r\n img, kern = node.inputs\r\n gpu_conv = GpuConvOp_from_ConvOp(node.op)\r\n if gpu_conv is None:\r\n return\r\n out = gpu_conv(gpu_from_host(img),\r\n gpu_from_host(kern))\r\n # in some case the ConvOp broadcast the last 2 dimensions\r\n # differently then the gpu ConvOp\r\n out = tensor.patternbroadcast(\r\n host_from_gpu(out),\r\n node.outputs[0].broadcastable)\r\n #op_lifter want the output on the GPU.\r\n out = gpu_from_host(out)\r\n out.values_eq_approx = values_eq_approx\r\n return [out]", "def local_to_gpu(node):\r\n if isinstance(node.op, op):\r\n #op(host_from_gpu()) -> host_from_gpu(op)\r\n #If any of the input that go on the GPU are on the GPU,\r\n #move the op to the gpu.\r\n if any(node.inputs[idx].owner and\r\n isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu)\r\n for idx in to_gpu):\r\n new_inp = list(node.inputs)\r\n for idx in to_gpu:\r\n new_inp[idx] = cuda.gpu_from_host(new_inp[idx])\r\n return [cuda.host_from_gpu(op()(*new_inp))]\r\n if node.op == cuda.gpu_from_host:\r\n #gpu_from_host(op) -> op(gpu_from_host)\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op,\r\n op):\r\n op_node = host_input.owner\r\n new_inp = list(op_node.inputs)\r\n for idx in to_gpu:\r\n new_inp[idx] = cuda.gpu_from_host(new_inp[idx])\r\n return [op()(*new_inp)]\r\n return False", "def convert_prelu(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n alpha = g.get_node(op.input(\"Alpha\")[0])\n ndims = len(infer_shape(x))\n axis = 0 if ndims <= 1 else 1\n mode = op.attr(\"mode\")\n if mode == \"all\":\n if ndims == 1:\n shape = _op.strided_slice(shape_of(x), [0], [1])\n else:\n shape = _op.strided_slice(shape_of(x), [1], [2])\n alpha = _op.broadcast_to(alpha, fold_constant(shape))\n out = _op.nn.prelu(x, alpha, axis)\n g.add_node(op.output(\"Out\")[0], out)", "def move_variable_initialization_to_cpu(graph=None):\n if not graph:\n graph = ops.get_default_graph()\n\n init_ops = []\n dep_ops = list(\n map(lambda x: x.initializer.inputs[1].op,\n graph.get_collection('variables')))\n visited = set()\n\n while len(dep_ops) > 0:\n op = dep_ops.pop()\n if not op in visited:\n visited.add(op)\n init_ops += [op]\n dep_ops += map(lambda x: x.op, op.inputs)\n\n for op in init_ops:\n op._set_device('/device:CPU:0')\n op._set_attr('_class', attr_value_pb2.AttrValue(s=b'loc:@cpu'))\n op._set_attr('_XlaCompile', attr_value_pb2.AttrValue(b=False))\n op._set_attr('_XlaScope', attr_value_pb2.AttrValue(s=b''))\n\n return", "def process_initializer():\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n import tensorflow as tf\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n if len(physical_devices) > 0:\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n tf.config.experimental.set_memory_growth(physical_devices[0], True)", "def to_cuda(*args):\n return [None if x is None else x.cuda() for x in args]", "def __init__(self, model_path, img_width, img_height, gpu_fraction=1.0):\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=['input_1:0', 'cumsum_values_1:0'])\n\n self.img_width = img_width\n self.img_height = img_height", "def set_default_tensor_type():\n if torch.cuda.is_available():\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n else:\n print(\"ERROR: cuda is not available. Test will exit.\")", "def inference():\n inf_dataset = dataset\n net.eval()\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = inf_dataset[index]\n \n num_crop = args.test_crops\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n # First get the base_out outputs\n base_output = torch.autograd.Variable(torch.zeros((num_crop, frame_cnt, base_out_dim)).cuda(),\n volatile=True)\n cnt = 0\n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crops * 3, 224, 224]\n # frame_batch_size is 4 by default\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda(),\n volatile=True)\n base_out = net(input_var, None, None, None, None)\n bsc = base_out.view(num_crop, -1, base_out_dim)\n base_output[:, cnt:cnt+bsc.size(1), :] = bsc\n cnt += bsc.size(1)\n\n n_frames = base_output.size(1)\n assert frame_cnt == n_frames\n # GLCU\n step_features = base_output.mean(dim=0).mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0).data.cpu().numpy()\n gate = gate.repeat(1, num_crop * n_frames).view(num_crop, n_frames, base_out_dim)\n if net.additive_glcu:\n base_output = base_output + gate\n else:\n base_output = base_output * gate\n\n # output.shape == [num_frames, 7791]\n output = torch.zeros((frame_cnt, output_dim)).cuda()\n cnt = 0\n for i in range(0, frame_cnt, 4):\n base_out = base_output[:, i:i+4, :].contiguous().view(-1, base_out_dim)\n rst = net.test_fc(base_out)\n sc = rst.data.view(num_crop, -1, output_dim).mean(dim=0)\n output[cnt: cnt + sc.size(0), :] = sc\n cnt += sc.size(0)\n base_output = base_output.mean(dim=0).data\n\n # act_scores.shape == [num_proposals, K+1]\n # comp_scores.shape == [num_proposals, K]\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling)\n act_scores = torch.autograd.Variable(act_scores, volatile=True)\n comp_scores = torch.autograd.Variable(comp_scores, volatile=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0).data.cpu().numpy()\n\n act_scores = act_scores.data\n comp_scores = comp_scores.data\n\n if reg_scores is not None:\n reg_scores = reg_scores.view(-1, num_class, 2)\n reg_scores[:, :, 0] = reg_scores[:, :, 0] * stats[1, 0] + stats[0, 0]\n reg_scores[:, :, 1] = reg_scores[:, :, 1] * stats[1, 1] + stats[0, 1]\n\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n\n # perform stpp on scores\n return ((inf_dataset.video_list[index].id,\n (rel_props.numpy(), act_scores.cpu().numpy(), comp_scores.cpu().numpy(), reg_scores.cpu().numpy(), \n glcu_task_pred, task_pred),\n output.cpu().numpy(),\n base_output.cpu().numpy()))", "def test_hostfromgpu_shape_i():\r\n pass\r\n\r\n m = mode_with_gpu.including('local_dot_to_dot22',\r\n 'local_dot22_to_dot22scalar','specialize')\r\n a = T.fmatrix('a')\r\n ca = theano.sandbox.cuda.var.CudaNdarrayType((False, False))()\r\n\r\n av = numpy.asarray(numpy.random.rand(5, 4), dtype='float32')\r\n cv = cuda.CudaNdarray(numpy.asarray(numpy.random.rand(5, 4),\r\n dtype='float32'))\r\n\r\n f = theano.function([a], cuda.basic_ops.gpu_from_host(a), mode=m)\r\n assert cuda.basic_ops.gpu_from_host in [x.op\r\n for x in f.maker.fgraph.toposort()]\r\n f = theano.function([a], cuda.basic_ops.gpu_from_host(a).shape, mode=m)\r\n topo = f.maker.fgraph.toposort()\r\n assert isinstance(topo[0].op, T.opt.Shape_i)\r\n assert isinstance(topo[1].op, T.opt.Shape_i)\r\n assert isinstance(topo[2].op, T.opt.MakeVector)\r\n assert tuple(f(av)) == (5, 4)\r\n\r\n\r\n\r\n f = theano.function([ca], cuda.basic_ops.host_from_gpu(ca), mode=m)\r\n assert cuda.basic_ops.host_from_gpu in [x.op\r\n for x in f.maker.fgraph.toposort()]\r\n f = theano.function([ca], cuda.basic_ops.host_from_gpu(ca).shape, mode=m)\r\n topo = f.maker.fgraph.toposort()\r\n assert isinstance(topo[0].op, T.opt.Shape_i)\r\n assert isinstance(topo[1].op, T.opt.Shape_i)\r\n assert isinstance(topo[2].op, T.opt.MakeVector)\r\n assert tuple(f(cv)) == (5, 4)", "def _to_cpu(data: Any) -> Any:\n if isinstance(data, (Tensor, BaseDataElement)):\n return data.to('cpu')\n elif isinstance(data, list):\n return [_to_cpu(d) for d in data]\n elif isinstance(data, tuple):\n return tuple(_to_cpu(d) for d in data)\n elif isinstance(data, dict):\n return {k: _to_cpu(v) for k, v in data.items()}\n else:\n return data", "def cuda(self):\n if torch.cuda.is_available():\n self.automata = self.automata.cuda()\n self.inv_automata = self.inv_automata.cuda()\n self.action = self.action.cuda()\n self.inv_action = self.inv_action.cuda()", "def update(self, batch):\n if self.opt['cuda']:\n inputs = [Variable(torch.LongTensor(b).cuda()) for b in batch[:3]]\n subj_start_binary = Variable(torch.LongTensor(batch[5]).cuda()).float()\n subj_end_binary = Variable(torch.LongTensor(batch[6]).cuda()).float()\n obj_start_relation = Variable(torch.LongTensor(batch[7]).cuda())\n obj_end_relation = Variable(torch.LongTensor(batch[8]).cuda())\n subj_start_type = Variable(torch.LongTensor(batch[9]).cuda())\n subj_end_type = Variable(torch.LongTensor(batch[10]).cuda())\n obj_start_type = Variable(torch.LongTensor(batch[11]).cuda())\n obj_end_type = Variable(torch.LongTensor(batch[12]).cuda())\n nearest_subj_start_position_for_each_token = Variable(torch.LongTensor(batch[13]).cuda())\n distance_to_nearest_subj_start = Variable(torch.LongTensor(batch[14]).cuda())\n distance_to_subj = Variable(torch.LongTensor(batch[15]).cuda())\n nearest_obj_start_position_for_each_token = Variable(torch.LongTensor(batch[3]).cuda())\n distance_to_nearest_obj_start = Variable(torch.LongTensor(batch[4]).cuda())\n else:\n inputs = [Variable(torch.LongTensor(b)) for b in batch[:4]]\n subj_start_label = Variable(torch.LongTensor(batch[4])).float()\n subj_end_label = Variable(torch.LongTensor(batch[5])).float()\n obj_start_label = Variable(torch.LongTensor(batch[6]))\n obj_end_label = Variable(torch.LongTensor(batch[7]))\n subj_type_start_label = Variable(torch.LongTensor(batch[8]))\n subj_type_end_label = Variable(torch.LongTensor(batch[9]))\n obj_type_start_label = Variable(torch.LongTensor(batch[10]))\n obj_type_end_label = Variable(torch.LongTensor(batch[11]))\n subj_nearest_start_for_each = Variable(torch.LongTensor(batch[12]))\n subj_distance_to_start = Variable(torch.LongTensor(batch[13]))\n \n \n mask = (inputs[0].data>0).float()\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n\n \n subj_start_logits, subj_end_logits, obj_start_logits, obj_end_logits = self.model(inputs, distance_to_subj)\n\n subj_start_loss = self.obj_criterion(subj_start_logits.view(-1, self.opt['num_subj_type']+1), subj_start_type.view(-1).squeeze()).view_as(mask)\n subj_start_loss = torch.sum(subj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n subj_end_loss = self.obj_criterion(subj_end_logits.view(-1, self.opt['num_subj_type']+1), subj_end_type.view(-1).squeeze()).view_as(mask)\n subj_end_loss = torch.sum(subj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_start_loss = self.obj_criterion(obj_start_logits.view(-1, self.opt['num_class']+1), obj_start_relation.view(-1).squeeze()).view_as(mask)\n obj_start_loss = torch.sum(obj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_end_loss = self.obj_criterion(obj_end_logits.view(-1, self.opt['num_class']+1), obj_end_relation.view(-1).squeeze()).view_as(mask)\n obj_end_loss = torch.sum(obj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n loss = self.opt['subj_loss_weight']*(subj_start_loss + subj_end_loss) + (obj_start_loss + obj_end_loss)\n \n # backward\n loss.backward()\n # torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n loss_val = loss.data.item()\n return loss_val", "def fix_graph(graph, model):\n\n def fix_tensor_metadata(tensors, fix_shape=True):\n for tensor in tensors:\n if not tensor.shape and fix_shape:\n tensor.shape = layerwise(model)[tensor.name].shape\n if not tensor.dtype:\n tensor.dtype = layerwise(model)[tensor.name].dtype\n\n fix_tensor_metadata(graph.inputs)\n fix_tensor_metadata(graph.outputs, fix_shape=False)\n\n # If we're marking inputs, there may be cases where some other inputs are required - for\n # example, if the model is branchy. If, after cleanup(), there are any Variable tensors in\n # the graph without inputs, we'll replace them with constants and fold them away.\n tensor_map = graph.tensors()\n needs_const_fold = False\n for tensor in tensor_map.values():\n if isinstance(tensor, gs.Variable) and not tensor.inputs and tensor not in graph.inputs:\n needs_const_fold = True\n G_LOGGER.info(\"Freezing model input: {:}\".format(tensor))\n tensor.to_constant(layerwise(model, include_data=True)[tensor.name])\n\n if needs_const_fold:\n G_LOGGER.info(\"Folding constants to remove extraneous subgraphs\")\n graph.fold_constants().cleanup()\n\n return graph", "def to_cuda(elements):\n if not torch.cuda.is_available():\n return elements\n if isinstance(elements, tuple) or isinstance(elements, list):\n return [x.cuda() for x in elements]\n return elements.cuda()", "def to_cuda(elements):\n if not torch.cuda.is_available():\n return elements\n if isinstance(elements, tuple) or isinstance(elements, list):\n return [x.cuda() for x in elements]\n return elements.cuda()", "def network_inference(self, points):\r\n\r\n # Ensure no gradient is computed\r\n with torch.no_grad():\r\n\r\n #####################\r\n # Input preparation #\r\n #####################\r\n\r\n # t = [time.time()]\r\n\r\n # Create batch from the frame points\r\n batch = OnlineBatch(points, self.config, self.data_handler)\r\n\r\n # t += [time.time()]\r\n\r\n # Convert batch to a cuda\r\n batch.to(self.device)\r\n # t += [time.time()]\r\n torch.cuda.synchronize(self.device)\r\n\r\n #####################\r\n # Network inference #\r\n #####################\r\n\r\n # Forward pass\r\n outputs = self.net(batch, self.config)\r\n torch.cuda.synchronize(self.device)\r\n # t += [time.time()]\r\n\r\n # Get probs and labels\r\n predicted_probs = self.softmax(outputs).cpu().detach().numpy()\r\n torch.cuda.synchronize(self.device)\r\n # t += [time.time()]\r\n\r\n # Insert false columns for ignored labels\r\n for l_ind, label_value in enumerate(self.data_handler.label_values):\r\n if label_value in self.data_handler.ignored_labels:\r\n predicted_probs = np.insert(predicted_probs, l_ind, 0, axis=1)\r\n\r\n # Get predicted labels\r\n predictions = self.data_handler.label_values[np.argmax(predicted_probs, axis=1)].astype(np.int32)\r\n # t += [time.time()]\r\n\r\n # print('\\n************************\\n')\r\n # print('Timings:')\r\n # i = 0\r\n # print('Batch ...... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # i += 1\r\n # print('ToGPU ...... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # i += 1\r\n # print('Forward .... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # i += 1\r\n # print('Softmax .... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # i += 1\r\n # print('Preds ...... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # print('-----------------------')\r\n # print('TOTAL ..... {:7.1f} ms'.format(1000*(t[-1] - t[0])))\r\n # print('\\n************************\\n')\r\n\r\n return predictions, batch.points[0].cpu().numpy()", "def make_cuda(tensor):\n if torch.cuda.is_available():\n tensor = tensor.cuda()\n return tensor", "def make_cuda(tensor):\n if torch.cuda.is_available():\n tensor = tensor.cuda()\n return tensor", "def variables_on_gpu0():\n old_fn = tf.get_variable\n\n def new_fn(*args, **kwargs):\n with tf.device('/gpu:0'):\n return old_fn(*args, **kwargs)\n\n tf.get_variable = new_fn\n yield\n tf.get_variable = old_fn", "def test_no_shared_var_graph():\r\n a=tensor.fmatrix()\r\n b=tensor.fmatrix()\r\n f = theano.function([a,b],[a+b], mode=mode_with_gpu)\r\n l = f.maker.fgraph.toposort()\r\n assert len(l)==4\r\n assert numpy.any(isinstance(x.op,cuda.GpuElemwise) for x in l)\r\n assert numpy.any(isinstance(x.op,cuda.GpuFromHost) for x in l)\r\n assert numpy.any(isinstance(x.op,cuda.HostFromGpu) for x in l)", "def CUDA(self):\n\n if helpers.CUDA:\n self.generator.cuda()\n self.discriminator.cuda()\n self.adv_loss.cuda()", "def maybe_cuda(x: Union[torch.Tensor, nn.Module]) -> Union[torch.Tensor, nn.Module]:\n if torch.cuda.is_available():\n return x.cuda()\n return x", "def _setup(self):\n\n # caffe-tensorflow/convert.py can only run with Python2. Since the default encoding format of Python2 is ASCII\n # but the default encoding format of Python3 is UTF-8, it will raise an error without 'encoding=\"latin1\"'\n weight_dict = np.load(self.vgg16_path, encoding=\"latin1\").item()\n\n scopes = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3',\n 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3']\n for scope in scopes:\n with tf.variable_scope(scope.split('_')[0] + '/' + scope, reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w_init_op = weights.assign(weight_dict[scope]['weights'])\n b_init_op = biases.assign(weight_dict[scope]['biases'])\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)\n\n with tf.variable_scope('fc6', reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w = weight_dict['fc6']['weights']\n b = weight_dict['fc6']['biases']\n w = np.reshape(w, (7, 7, 512, 4096))\n w = w[0:-1:2, 0:-1:2, :, 0:-1:4]\n b = b[0:-1:4]\n w_init_op = weights.assign(w)\n b_init_op = biases.assign(b)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)\n\n with tf.variable_scope('fc7', reuse=True):\n weights = tf.get_variable('weights')\n biases = tf.get_variable('biases')\n w = weight_dict['fc7']['weights']\n b = weight_dict['fc7']['biases']\n w = np.reshape(w, (1, 1, 4096, 4096))\n w = w[:, :, 0:-1:4, 0:-1:4]\n b = b[0:-1:4]\n w_init_op = weights.assign(w)\n b_init_op = biases.assign(b)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, w_init_op)\n tf.add_to_collection(tf.GraphKeys.INIT_OP, b_init_op)", "def setup_device_and_gradient(f: hessQuik.networks.NN, network_wrapper: str = 'hessQuik', device: str = 'cpu') \\\n -> torch.nn.Module:\n # map to device\n f = f.to(device)\n\n if network_wrapper == 'PytorchAD':\n f = net.NNPytorchAD(f)\n\n if network_wrapper == 'PytorchHessian':\n f = net.NNPytorchHessian(f)\n\n return f", "def as_tensorflow(self, cuda_threads_per_block=_default_cuda_threads_per_block):\n tf.logging.log(tf.logging.DEBUG, 'Compiling generic C++ for Op ' + self.__class__.__name__)\n cpu_op_lib = Operator._make_generic_c(self.op_c_generic, self.op_name)\n if cuda_enabled:\n tf.logging.log(tf.logging.DEBUG, 'Compiling generic CUDA for Op ' + self.__class__.__name__)\n cuda_op_lib = Operator._make_generic_cuda(self.op_cuda_generic, self.op_name)\n else:\n cuda_op_lib = ''\n\n if self.grad_name is None:\n gpu_grad_name = ''\n gpu_grad_lib = ''\n cpu_grad_name = ''\n cpu_grad_lib = ''\n else:\n tf.logging.log(tf.logging.DEBUG, 'Compiling generic C++ for gradient of Op ' + self.__class__.__name__)\n cpu_grad_lib = Operator._make_generic_c(self.grad_c_generic, self.grad_name)\n cpu_grad_name = self.grad_name + '_generic_cpp'\n if cuda_enabled:\n tf.logging.log(tf.logging.DEBUG, 'Compiling generic CUDA for gradient of Op ' + self.__class__.__name__)\n gpu_grad_lib = Operator._make_generic_cuda(self.grad_cuda_generic, self.grad_name)\n gpu_grad_name = self.grad_name + '_generic_cuda'\n else:\n gpu_grad_name = ''\n gpu_grad_lib = ''\n\n out_shapes = []\n out_types = []\n for cur_type in self.output_types:\n if cur_type.dtype == float32:\n tf_type = 'float'\n elif cur_type.dtype == float64:\n tf_type = 'double'\n else:\n raise NotImplementedError('Only floats and doubles currently supported.')\n\n out_types.append(tf_type)\n out_shapes.append(cur_type.shape)\n\n Operator._register_shape_inference()\n Operator._load_dynamiclib_module()\n Operator._register_gradient()\n tf_op = Operator._dynamiclibop_module.dynamic_lib(inputs=self._inputs,\n out_shapes=out_shapes,\n out_types=out_types,\n cpu_lib_path=cpu_op_lib,\n cpu_func_name=self.op_name + '_generic_cpp',\n gpu_lib_path=cuda_op_lib,\n gpu_func_name=self.op_name + '_generic_cuda',\n gpu_grad_func_name=gpu_grad_name,\n gpu_grad_lib_path=gpu_grad_lib,\n cpu_grad_func_name=cpu_grad_name,\n cpu_grad_lib_path=cpu_grad_lib,\n cuda_threads_per_block=cuda_threads_per_block)\n if len(out_shapes) == 1:\n return tf_op[0]\n else:\n return tf_op", "def preprocess_frame(self, frame):\n state = torch.Tensor(frame)\n return gpuify(state, self.gpu_id)", "def _recursive_to_cuda(self, tensors: Union[Tuple[torch.Tensor], torch.Tensor]) \\\n -> Union[List[torch.Tensor], torch.Tensor]:\n if self.device is None: # keep on cpu\n return tensors\n\n if type(tensors) != list and type(tensors) != tuple: # not only for torch.Tensor\n return tensors.to(device=self.device)\n\n cuda_tensors = list()\n for i in range(len(tensors)):\n cuda_tensors.append(self._recursive_to_cuda(tensors[i]))\n return cuda_tensors", "def pipeline(input_, preprocess):\n input_ = input_.convert(\"L\")\n input_ = preprocess(input_)\n input_ = input_.reshape(-1, td.get_resize(Config.is_small_resize)[0], td.get_resize(Config.is_small_resize)[1], 1)\n input_ = input_.permute(3, 0, 1, 2)\n\n if cuda.is_available():\n return input_.type('torch.cuda.FloatTensor')\n else:\n return input_.type('torch.FloatTensor')", "def add_forward_pass_and_gradients(self, phase_train, rel_device_num,\n abs_device_num, image_producer_stage,\n gpu_compute_stage_ops, gpu_grad_stage_ops):\n nclass = self.dataset.num_classes + 1\n input_data_type = get_data_type(self.params)\n data_type = get_data_type(self.params)\n \n with tf.device(self.raw_devices[rel_device_num]):\n if 0:\n aa_debug = 1\n else:\n # Minor hack to avoid H2D copy when using synthetic data\n image_size = self.model.get_image_size()\n image_shape = [\n self.batch_size // self.num_gpus, image_size, image_size,\n self.dataset.depth\n ]\n labels_shape = [self.batch_size // self.num_gpus]\n # Synthetic image should be within [0, 255].\n images = tf.truncated_normal(\n image_shape,\n dtype=input_data_type,\n mean=127,\n stddev=60,\n name='synthetic_images')\n images = tf.contrib.framework.local_variable(\n images, name='gpu_cached_images')\n labels = tf.random_uniform(\n labels_shape,\n minval=0,\n maxval=nclass - 1,\n dtype=tf.int32,\n name='synthetic_labels')\n\n with tf.device(self.devices[rel_device_num]):\n # Rescale from [0, 255] to [0, 2]\n images = tf.multiply(images, 1. / 127.5)\n # Rescale to [-1, 1]\n images = tf.subtract(images, 1.0)\n\n if self.data_format == 'NCHW':\n images = tf.transpose(images, [0, 3, 1, 2])\n var_type = tf.float32\n network = convnet_builder.ConvNetBuilder(\n images, self.dataset.depth, phase_train, self.params.use_tf_layers,\n self.data_format, data_type, var_type)\n with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):\n self.model.add_inference(network)\n # Add the final fully-connected class layer\n logits = network.affine(nclass, activation='linear')\n aux_logits = None\n\n results = {} # The return value\n \n loss = loss_function(logits, labels, aux_logits=aux_logits)\n params = self.variable_mgr.trainable_variables_on_device(\n rel_device_num, abs_device_num)\n l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in params])\n weight_decay = self.params.weight_decay\n if weight_decay is not None and weight_decay != 0.:\n loss += weight_decay * l2_loss\n\n aggmeth = tf.AggregationMethod.DEFAULT\n scaled_loss = loss if self.loss_scale is None else loss * self.loss_scale\n grads = tf.gradients(scaled_loss, params, aggregation_method=aggmeth)\n \n param_refs = self.variable_mgr.trainable_variables_on_device(\n rel_device_num, abs_device_num, writable=True)\n gradvars = list(zip(grads, param_refs))\n results['loss'] = loss\n results['gradvars'] = gradvars\n return results", "def _convert_to_numpy_safe(t: Optional[torch.Tensor]) -> torch.Tensor:\n\n if t is not None and t.device.type == \"cpu\":\n return t.numpy()\n return t", "def local_gpu_elemwise_0(node):\r\n if (isinstance(node.op, tensor.Elemwise) and\r\n dtype_in_elemwise_supported(node.op)):\r\n if any([i.owner and\r\n isinstance(i.owner.op, HostFromGpu)\r\n for i in node.inputs]):\r\n if all([o.type.dtype == 'float32' for o in node.outputs]):\r\n # Don't set any inplace pattern.\r\n # gpu_inplace_elemwise_optimizer will do it later\r\n\r\n if isinstance(node.op.scalar_op, Erfinv):\r\n new_op = GpuElemwise(erfinv_gpu)\r\n else:\r\n try:\r\n new_op = GpuElemwise(node.op.scalar_op)\r\n except SupportCodeError:\r\n # This happens when scalar_op requires support code\r\n return False\r\n\r\n # first establish that float32 can store all inputs\r\n upcastable = set(['float32', 'int8', 'int16', 'uint8',\r\n 'uint16'])\r\n # case 1 - all inputs are already float32\r\n if all([i.type.dtype == 'float32' for i in node.inputs]):\r\n #TODO: change this when fusion makes Elemwise with multiple\r\n # outputs\r\n gpu_elemwise = new_op(*(gpu_from_host(i)\r\n for i in node.inputs))\r\n # case 2 - it is still ok if some inputs were upcast to float32\r\n elif all([i.type.dtype in upcastable\r\n for i in node.inputs]):\r\n # second - establish that a new node with upcasted inputs\r\n # has the same outputs types as the original node\r\n upcasted = node.op.make_node(*[tensor.cast(i, 'float32')\r\n for i in node.inputs])\r\n if [o.type for o in upcasted.outputs] ==\\\r\n [o.type for o in node.outputs]:\r\n\r\n new_inputs = [gpu_from_host(tensor.cast(i, 'float32'))\r\n for i in node.inputs]\r\n gpu_elemwise = new_op(*new_inputs)\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner)\r\n if not gpu_elemwise:\r\n return False\r\n if max_inputs_to_GpuElemwise(node) < len(gpu_elemwise.inputs):\r\n return False\r\n return [host_from_gpu(gpu_elemwise.outputs[0])]", "def local_gpu_dot_to_dot22(node):\r\n\r\n # In case the got do input upcast, we much check that we can\r\n # make it run on the gpu.\r\n if isinstance(node.op, GpuFromHost):\r\n if node.outputs[0].type.dtype != 'float32':\r\n return False\r\n host_input = node.inputs[0]\r\n if host_input.owner and host_input.owner.op == tensor.basic.dot:\r\n x, y = host_input.owner.inputs\r\n # case one: vector X matrix\r\n if _is_real_vector(x) and _is_real_matrix(y):\r\n new_op = GpuDimShuffle((False,), ['x', 0])\r\n shape_out = y.shape[1].dimshuffle(['x'])\r\n gpu_x = new_op(gpu_from_host(x))\r\n gpu_y = gpu_from_host(y)\r\n # case two: matrix X vector\r\n elif _is_real_matrix(x) and _is_real_vector(y):\r\n new_op = GpuDimShuffle((False,), [0, 'x'])\r\n shape_out = x.shape[0].dimshuffle(['x'])\r\n gpu_x = gpu_from_host(x)\r\n gpu_y = new_op(gpu_from_host(y))\r\n else:\r\n return False\r\n\r\n return [GpuReshape(1)(gpu_dot22(gpu_x, gpu_y), shape_out)]\r\n if node.op == tensor.basic.dot:\r\n if node.outputs[0].type.dtype != 'float32':\r\n return False\r\n if any([i.owner and isinstance(i.owner.op, HostFromGpu)\r\n for i in node.inputs]):\r\n x, y = node.inputs\r\n if _is_real_vector(x) and _is_real_matrix(y):\r\n new_op = GpuDimShuffle((False,), ['x', 0])\r\n shape_out = y.shape[1].dimshuffle(['x'])\r\n gpu_x = new_op(gpu_from_host(x))\r\n gpu_y = gpu_from_host(y)\r\n\r\n elif _is_real_matrix(x) and _is_real_vector(y):\r\n new_op = GpuDimShuffle((False,), [0, 'x'])\r\n shape_out = x.shape[0].dimshuffle(['x'])\r\n gpu_x = gpu_from_host(x)\r\n gpu_y = new_op(gpu_from_host(y))\r\n else:\r\n return False\r\n\r\n return [host_from_gpu(GpuReshape(1)(gpu_dot22(gpu_x, gpu_y),\r\n shape_out))]\r\n return False", "def batch_to_device(\n batch: Dict[str, torch.Tensor],\n device: str,\n test_features_nan: List[str] = []) -> Dict[str, torch.Tensor]:\n\n if device == 'cuda':\n data = {}\n for k, v in batch.items():\n if (k in test_features_nan):\n if torch.isnan(v).any():\n raise ValueError(\n 'NaN in {}, training stopped.'.format(k))\n data.update({k: v.to(device, non_blocking=True)})\n return data\n elif device == 'cpu':\n return batch\n else:\n raise ValueError(f'Device {device} not understood, should be one of \"cpu\", \"cuda\".')", "def _prepare_batch(self, batch):\n try:\n from torch_geometric.data import Batch\n except:\n raise ValueError(\"This class requires PyTorch Geometric to be installed.\")\n\n inputs, labels, weights = batch\n pyg_graphs = [graph.to_pyg_graph() for graph in inputs[0]]\n inputs = Batch.from_data_list(pyg_graphs)\n _, labels, weights = super(GATModel, self)._prepare_batch(([], labels,\n weights))\n return inputs, labels, weights", "def setupGPUSwept(solver):\n solver.gpuBlock = (slice(0,solver.sharedShape[0],1),)+solver.gpuBlock\n getGPUReadBlockSwept(solver) #Finish creating gpuReadBlock here\n blockShape =[element.stop for element in solver.gpuBlock]\n blockShape[-1] += int(2*solver.blocksize[0]) #Adding 2 blocks in the column direction\n # Creating local GPU array with split\n grid = (int((blockShape[2])/solver.blocksize[0]),int((blockShape[3])/solver.blocksize[1])) #Grid size\n #Creating constants\n bsp = lambda x: int(numpy.prod(blockShape[x:])) #block shape product returned as an integer\n const_dict = ({\"NV\":blockShape[1],'SX':blockShape[2],'SY':blockShape[3],\"VARS\":bsp(2),\"TIMES\":bsp(1),\"MPSS\":solver.maxPyramidSize,\"MOSS\":solver.maxOctSize,\"OPS\":solver.operating,\"ITS\":solver.intermediate})\n solver.GPUArray = mallocGPUArray(blockShape) #Allocated GPU\n solver.localGPUArray = numpy.zeros(blockShape)\n #Building CUDA source code\n solver.gpu = io.buildGPUSource(solver.gpu)\n io.copyConstants(solver.gpu,const_dict) #This copys cpu constants not global constants\n solver.cpu.set_globals(*solver.globals,source_mod=solver.gpu)\n # Make GPU geometry\n solver.Up.initializeGPU(solver.gpu.get_function(\"UpPyramid\"),solver.blocksize,(grid[0],grid[1]-1))\n solver.Oct.initializeGPU(solver.gpu.get_function(\"Octahedron\"),solver.blocksize,(grid[0],grid[1]-1))\n solver.Down.initializeGPU(solver.gpu.get_function(\"DownPyramid\"),solver.blocksize,(grid[0],grid[1]-1))\n solver.Yb.initializeGPU(solver.gpu.get_function(\"YBridge\"),solver.blocksize,grid)\n solver.Xb.initializeGPU(solver.gpu.get_function(\"XBridge\"),solver.blocksize,grid)", "def _model_forward(self, node_feats, input_graph):\n bg = input_graph.to(self.device)\n \n bg.requires_grad = True\n node_feats.requires_grad = True\n \n if self.model_name in ['MPNN', 'AttentiveFP', 'Weave']:\n edge_feats = bg.edata.pop('e').to(self.device)\n edge_feats.requires_grad = True\n return self.model_instance(bg, node_feats, edge_feats)\n else:\n bg.edata.pop('e').to('cuda')\n return self.model_instance(bg, node_feats)", "def batch_to_device(batch):\n for key in batch:\n if isinstance(batch[key], torch.Tensor):\n batch[key] = batch[key].to(device)\n return batch", "def forward(model: nn.Module, inputs: torch.Tensor, device: torch.device):\n\n model.eval()\n model.to(device)\n\n with torch.no_grad():\n inputs = inputs.to(device)\n return model(inputs)", "def evaluate(self, input_graph, dataloader, postprocess=None,\n metric=None, measurer=None, iteration=-1, tensorboard=False):\n logger.info(\"start to evaluate model....\")\n import tensorflow as tf\n from .tf_utils.graph_rewriter.generic.pre_optimize import PreOptimization\n\n graph = tf.Graph()\n graph_def = PreOptimization(input_graph, self.inputs, \\\n self.outputs).get_optimized_graphdef()\n assert graph_def\n with graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n outputs = copy.deepcopy(self.outputs)\n if tensorboard:\n from .tf_utils.graph_rewriter.graph_util import GraphAnalyzer\n from tensorflow.python.framework import tensor_util\n\n output_postfix = \"_fp32.output\"\n inspect_node_types = [\"Conv2D\", \"DepthwiseConv2dNative\", \"MaxPool\", \"AvgPool\",\n \"ConcatV2\", \"MatMul\", \"FusedBatchNormV3\", \"BiasAdd\",\n \"Relu\", \"Relu6\", \"Dequantize\"]\n fp32_inspect_node_name = []\n int8_inspect_node_name = []\n q_node_scale = {}\n if self.dump_times == 0:\n temp_dir = \"./runs/eval/baseline\"\n else:\n temp_dir = \"./runs/eval/tune_\" + str(self.dump_times)\n if os.path.isdir(temp_dir):\n import shutil\n shutil.rmtree(temp_dir, ignore_errors=True)\n writer = tf.compat.v1.summary.FileWriter(temp_dir, graph)\n\n cur_graph = GraphAnalyzer()\n cur_graph.graph = graph_def\n cur_graph.parse_graph()\n graph_info = cur_graph.node_name_details\n for node in graph_def.node:\n if node.op in inspect_node_types:\n fp32_inspect_node_name.append(node.name)\n elif node.op.find(\"Requantize\") != -1:\n out_min = -2\n out_max = -1\n if node.op.find(\"Sum\") != -1:\n out_min = -5\n out_max = -4\n q_out_min = graph_info[node.input[out_min]\n ].node.attr[\"value\"].tensor.float_val[0]\n q_out_max = graph_info[node.input[out_max]\n ].node.attr[\"value\"].tensor.float_val[0]\n q_node_scale[node.name] = (node.op, q_out_min, q_out_max)\n int8_inspect_node_name.append(node.name)\n # Inspect weights, bias. Need further optimize\n if node.op == \"Const\" and (graph_info[graph_info[node.name].outputs[0]].node.op in\n [\"Conv2D\", \"DepthwiseConv2dNative\", \"MatMul\", \"FusedBatchNormV3\", \"BiasAdd\"]):\n const_value = tensor_util.MakeNdarray(node.attr.get('value').tensor)\n self.log_histogram(writer, node.name, const_value)\n\n outputs.extend(fp32_inspect_node_name)\n if len(int8_inspect_node_name) > 0:\n output_postfix = \"_int8.output\"\n outputs.extend(int8_inspect_node_name)\n input_tensor = [\n self.get_tensor_by_name_with_import(graph, x + \":0\") for x in self.inputs \\\n ]\n output_tensor = [\n self.get_tensor_by_name_with_import(graph, x + \":0\") for x in outputs\n ]\n\n config = tf.compat.v1.ConfigProto()\n config.use_per_session_threads = 1\n # config.intra_op_parallelism_threads = 28\n config.inter_op_parallelism_threads = 1\n sess_graph = tf.compat.v1.Session(graph=graph, config=config)\n\n logger.info(\"Start to evaluate model via tensorflow...\")\n for idx, (inputs, labels) in enumerate(dataloader):\n # dataloader should keep the order and len of inputs same with input_tensor\n if len(input_tensor) == 1:\n feed_dict = {input_tensor[0]: inputs} # get raw tensor using index [0]\n else:\n assert len(input_tensor) == len(inputs), \\\n 'inputs len must equal with input_tensor'\n feed_dict = dict(zip(input_tensor, inputs))\n\n if measurer is not None:\n measurer.start()\n predictions = sess_graph.run(output_tensor, feed_dict) \n measurer.end()\n else:\n predictions = sess_graph.run(output_tensor, feed_dict)\n # Inspect node output, just get 1st iteration output tensors for now\n if idx == 0 and tensorboard:\n for index, node_name in enumerate(outputs):\n tensor = predictions[index]\n if node_name in int8_inspect_node_name:\n tensor = self._dequantize(predictions[index], q_node_scale[node_name])\n self.log_histogram(writer, node_name + output_postfix, tensor, idx)\n writer.close()\n if postprocess is not None:\n predictions, labels = postprocess((predictions, labels))\n if metric is not None:\n metric.update(predictions[0], labels)\n if idx + 1 == iteration:\n break\n acc = metric.result() if metric is not None else 0\n if tensorboard:\n new_dir = temp_dir + \"_acc_\" + str(acc)\n writer.close()\n if os.path.isdir(new_dir):\n import shutil\n shutil.rmtree(new_dir, ignore_errors=True)\n os.rename(temp_dir, new_dir)\n self.dump_times += 1\n sess_graph.close()\n return acc", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def build_graph():\n os.environ['CUDA_VISIBLE_DEVICES']= '0'\n\n # frozen_model = '/home/kevin/Codes/DeepNet/log/20180419_221132/frozen_model.pb'\n # frozen_model = '/home/kevin/Downloads/deeplabv3_cityscapes_train/frozen_inference_graph.pb'\n # frozen_model = '/home/kevin/Codes/EnvNet/RUNS/used3/frozen_model.pb'\n frozen_model = '/home/kevin/Codes/DeepNet/log/20180716_212035/frozen_model1.pb'\n graph = load_graph(frozen_model)\n\n for op in graph.get_operations():\n print(op.name)\n\n ## model_envnet/frozen_model.pb\n image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n ## model_deeplab/frozen_inference_graph.pb\n # image_pl = graph.get_tensor_by_name('ImageTensor:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n # ## model_deepnet/frozen_model.pb\n # image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n config = tf.ConfigProto() \n config.gpu_options.per_process_gpu_memory_fraction = 0.5\n sess = tf.Session(graph=graph,config=config)\n\n return image_pl, pred_seg, sess", "def test_hostfromgpu_shape_i():\r\n\r\n m = mode_with_gpu.including('local_dot_to_dot22',\r\n 'local_dot22_to_dot22scalar','specialize')\r\n a = T.fmatrix('a')\r\n ca = theano.sandbox.gpuarray.type.GpuArrayType('float32', (False, False))()\r\n av = numpy.asarray(numpy.random.rand(5, 4), dtype='float32')\r\n cv = gpuarray.asarray(numpy.random.rand(5, 4),\r\n dtype='float32')\r\n\r\n gpu_from_host = theano.sandbox.gpuarray.basic_ops.gpu_from_host\r\n host_from_gpu = theano.sandbox.gpuarray.basic_ops.host_from_gpu\r\n f = theano.function([a], gpu_from_host(a), mode=m)\r\n assert gpu_from_host in [x.op\r\n for x in f.maker.fgraph.toposort()]\r\n f = theano.function([a], gpu_from_host(a).shape, mode=m)\r\n topo = f.maker.fgraph.toposort()\r\n assert isinstance(topo[0].op, T.opt.Shape_i)\r\n assert isinstance(topo[1].op, T.opt.Shape_i)\r\n assert isinstance(topo[2].op, T.opt.MakeVector)\r\n assert tuple(f(av)) == (5, 4)\r\n\r\n f = theano.function([ca], host_from_gpu(ca), mode=m)\r\n assert host_from_gpu in [x.op\r\n for x in f.maker.fgraph.toposort()]\r\n f = theano.function([ca], host_from_gpu(ca).shape, mode=m)\r\n topo = f.maker.fgraph.toposort()\r\n assert isinstance(topo[0].op, theano.compile.Shape_i)\r\n assert isinstance(topo[1].op, theano.compile.Shape_i)\r\n assert isinstance(topo[2].op, theano.tensor.opt.MakeVector)\r\n assert tuple(f(cv)) == (5, 4)", "def inference(images, indication, flags):\r\n # We instantiate all variables using tf.get_variable() instead of\r\n # tf.Variable() in order to share variables across multiple GPU training runs.\r\n # If we only ran this model on a single GPU, we could simplify this function\r\n # by replacing all instances of tf.get_variable() with tf.Variable().\r\n \r\n # conv1\r\n with tf.variable_scope('conv1') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[7, 7, 3, 32],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(input=images, filter=kernel, strides=[1, 2, 2, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_1 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_1)\r\n\r\n \r\n # pool1\r\n pool_1 = tf.nn.max_pool(conv_1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool1') \r\n # conv2\r\n with tf.variable_scope('conv2') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 32, 32],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(pool_1, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases) \r\n conv_2 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_2)\r\n \r\n # conv3\r\n with tf.variable_scope('conv3') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 32, 32],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_2, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_3 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_3) \r\n \r\n \r\n # pool2\r\n pool_2 = tf.nn.max_pool(conv_3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool2')\r\n \r\n # conv4\r\n with tf.variable_scope('conv4') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 32, 64],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(pool_2, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases) \r\n conv_4 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_4)\r\n \r\n # conv5\r\n with tf.variable_scope('conv5') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 64, 64],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_4, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_5 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_5) \r\n\r\n # conv6\r\n with tf.variable_scope('conv6') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 64, 64],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_5, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_6 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_6) \r\n \r\n # pool3\r\n pool_3 = tf.nn.max_pool(conv_6, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool3') \r\n # conv7\r\n with tf.variable_scope('conv7') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 64, 128],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(pool_3, kernel, [1, 1, 1, 1], padding='SAME')\r\n\r\n biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_7 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name) \r\n _activation_summary(conv_7)\r\n\r\n # conv8\r\n with tf.variable_scope('conv8') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 128, 128],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_7, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases) \r\n conv_8 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_8) \r\n \r\n # conv9\r\n with tf.variable_scope('conv9') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 128, 128],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_8, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_9 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_9) \r\n \r\n # conv10\r\n with tf.variable_scope('conv10') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 128, 128],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_9, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_10 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_10) \r\n \r\n # pool4\r\n pool_4 = tf.nn.max_pool(conv_10, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool4') \r\n # conv11\r\n with tf.variable_scope('conv11') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 128, 256],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(pool_4, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_11 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_11)\r\n \r\n # conv12\r\n with tf.variable_scope('conv12') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 256, 256],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_11, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_12 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name) \r\n _activation_summary(conv_12) \r\n \r\n # conv13\r\n with tf.variable_scope('conv13') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 256, 256],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_12, kernel, [1, 1, 1, 1], padding='SAME') \r\n biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_13 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name) \r\n _activation_summary(conv_13) \r\n \r\n # conv14\r\n with tf.variable_scope('conv14') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 256, 256],\r\n init_parameter=0.001,\r\n wd=0.0,\r\n use_fp16=flags.use_fp16)\r\n conv = tf.nn.conv2d(conv_13, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.0), flags.use_fp16)\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv_14 = tf.nn.leaky_relu(pre_activation, alpha=0.5 ,name=scope.name)\r\n _activation_summary(conv_14) \r\n\r\n # pool5\r\n pool_5 = tf.nn.max_pool(conv_14, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool5') \r\n # dropout1\r\n dropout_1 = tf.nn.dropout(pool_5, keep_prob=0.25, name='dropout1') \r\n \r\n # maxout1\r\n with tf.variable_scope('maxout1') as scope:\r\n # reshape from convolution\r\n reshape = tf.reshape(dropout_1, [-1, 16384])\r\n weights = _variable_with_weight_decay('weights', shape=[16384, 512, 2],\r\n init_parameter=0.001, wd=0.0, use_fp16=flags.use_fp16) \r\n biases = _variable_on_cpu('biases', [512, 2], tf.constant_initializer(0.0), flags.use_fp16)\r\n maxout = tf.tensordot(reshape, weights, axes=1) + biases\r\n maxout_1 = tf.reduce_max(maxout, axis=2, name=scope.name)\r\n _activation_summary(maxout_1) \r\n \r\n # merge two eyes\r\n with tf.variable_scope('reshape1') as scope:\r\n # concat left and right lable\r\n concat_1 = tf.concat([maxout_1,indication], axis=-1)\r\n # reshape1(merge eyes)\r\n reshape_1 = tf.reshape(concat_1, [32,-1], name=scope.name)\r\n \r\n # dropout2\r\n dropout_2 = tf.nn.dropout(reshape_1, keep_prob=0.25, name='dropout2') \r\n \r\n # maxout2\r\n with tf.variable_scope('maxout2') as scope:\r\n reshape = tf.reshape(dropout_2, [-1, 1028])\r\n weights = _variable_with_weight_decay('weights', shape=[1028, 512, 2],\r\n init_parameter=0.001, wd=0.0, use_fp16=flags.use_fp16) \r\n biases = _variable_on_cpu('biases', [512, 2], tf.constant_initializer(0.0), flags.use_fp16)\r\n maxout = tf.tensordot(reshape, weights, axes=1) + biases\r\n maxout_2 = tf.reduce_max(maxout, axis=2, name='maxout2')\r\n _activation_summary(maxout_2) \r\n\r\n # dropout3\r\n dropout_3 = tf.nn.dropout(maxout_2, keep_prob=0.25, name='dropout3')\r\n \r\n # linear layer(WX + b),\r\n # We don't apply softmax here because\r\n # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits\r\n # and performs the softmax internally for efficiency.\r\n with tf.variable_scope('softmax_linear') as scope:\r\n weights = _variable_with_weight_decay('weights', [512, 2*NUM_CLASSES],\r\n init_parameter=0.001, wd=0.0, use_fp16=flags.use_fp16)\r\n biases = _variable_on_cpu('biases', [2*NUM_CLASSES], tf.constant_initializer(0.0), flags.use_fp16)\r\n softmax_linear = tf.add(tf.matmul(dropout_3, weights), biases, name=scope.name)\r\n _activation_summary(softmax_linear)\r\n \r\n # back to one eye\r\n with tf.variable_scope('reshape2') as scope:\r\n reshape_2 = tf.reshape(softmax_linear,[64,5],name=scope.name)\r\n \r\n return reshape_2", "def cpu_tensor_to_gpu(tensor):\n name = '%s-gpu' % tensor.name\n if tensor.ndim == 0:\n result = theano.sandbox.cuda.fscalar(name)\n elif tensor.ndim == 1:\n result = theano.sandbox.cuda.fvector(name)\n elif tensor.ndim == 2:\n result = theano.sandbox.cuda.fmatrix(name)\n elif tensor.ndim == 3:\n result = theano.sandbox.cuda.ftensor3(name)\n elif tensor.ndim == 4:\n result = theano.sandbox.cuda.ftensor4(name)\n elif tensor.ndim == 5:\n _type = theano.sandbox.cuda.type.CudaNdarrayType(\n dtype=theano.config.floatX,\n broadcastable=((False,)*5) \n )\n result = _type(name)\n else:\n raise ValueError('only up to dimension 4')\n\n return result", "def _get_relay_func(self, graph, layout=\"NHWC\", shape=None, outputs=None):\n try:\n from tensorflow.python.framework import tensor_util\n except ImportError as e:\n raise ImportError(f\"Unable to import tensorflow which is required {e}\")\n\n missing_operators = self._parse_import_prerequisites(graph)\n control_flow_nodes = []\n ta_write_nodes = []\n ta_gather_nodes = []\n ta_construct_nodes = []\n self._in_shape = shape\n self._layout = layout\n self._graph = graph\n\n if missing_operators:\n freezed_ops = [op for op in missing_operators if op in _freezed_graph_pruned_op_list]\n if freezed_ops:\n raise Exception(\n f\"Graph is not frozen. Provide a frozen graph. \"\n f\"Found operators {freezed_ops}\"\n )\n\n raise NotImplementedError(\n f\"The following operators are not implemented: {missing_operators}\"\n )\n\n for node in graph.node:\n node_name_prefix = node.name.rsplit(\"/\", 1)[0]\n self._control_flow_node_map[node_name_prefix].add(node.op)\n self._tf_node_map[node.name] = node\n\n # Parse output_shapes attribute\n parsed_attr = self._parse_attr(node.attr)\n if \"_output_shapes\" in parsed_attr:\n self._output_shapes[node.name] = [\n tensor_util.TensorShapeProtoToList(tshape)\n for tshape in parsed_attr[\"_output_shapes\"]\n ]\n else:\n self._output_shapes[node.name] = [None]\n\n # Parse placeholder and const here since input shape info is required.\n if node.op == \"Placeholder\" or node.op == \"PlaceholderWithDefault\":\n # Give priority to user argument.\n if shape and node.name in shape:\n self._input_shapes[node.name] = list(shape[node.name])\n else:\n self._input_shapes[node.name] = tensor_util.TensorShapeProtoToList(\n node.attr[\"shape\"].shape\n )\n for idx, dim in enumerate(self._input_shapes[node.name]):\n if dim < 0:\n self._input_shapes[node.name][idx] = Any()\n\n self._output_shapes[node.name] = [self._input_shapes[node.name]]\n attr = self._parse_attr(node.attr)\n self._nodes[node.name] = [\n set_span(\n _expr.var(\n node.name, shape=self._input_shapes[node.name], dtype=attr[\"dtype\"].name\n ),\n node.name,\n )\n ]\n\n # Ignore user's input shape for Non placeholder\n elif node.op == \"Const\":\n tensor_value = node.attr[\"value\"].tensor\n self._input_shapes[node.name] = tensor_util.TensorShapeProtoToList(\n tensor_value.tensor_shape\n )\n self._output_shapes[node.name] = [self._input_shapes[node.name]]\n if shape and node.name in shape:\n warnings.warn(\n f\"Ignore the passed shape. Shape in graphdef \"\n f\"will be used for operator {node.name}.\"\n )\n for key, value in node.attr.items():\n self._parse_param(key, value, node.name, self._in_shape)\n elif node.op in _control_flow_nodes:\n # We assume that the direct parent node of Exit is a while loop block\n if node.op == \"Exit\":\n self._while_loop_name_set.add(node_name_prefix)\n control_flow_nodes.append(node)\n elif node.op.startswith(\"TensorArray\"):\n if is_tensor_array_constuctor(node):\n ta_construct_nodes.append(node)\n else:\n for ta_write_name, idx in _tensor_array_write_ops.items():\n if node.op.startswith(ta_write_name):\n ta_write_nodes.append((node, idx))\n break\n if node.op.startswith(\"TensorArrayGather\"):\n ta_gather_nodes.append(node)\n\n # Use tensor array gather to infer static tensor array shape\n for gather_node in ta_gather_nodes:\n input_ta_name = gather_node.input[0]\n input_ta_node = self._tf_node_map[input_ta_name]\n if is_tensor_array_constuctor(input_ta_node):\n gather_attr = self._parse_attr(gather_node.attr)\n if \"element_shape\" not in gather_attr:\n continue\n raw_elem_shape = tensor_util.TensorShapeProtoToList(gather_attr[\"element_shape\"])\n elem_shape = []\n for dim in raw_elem_shape:\n if dim < 0:\n elem_shape.append(Any())\n else:\n elem_shape.append(int(dim))\n self._tensor_array_shapes[input_ta_node.name] = elem_shape\n\n # Fetch node contains static tensor array shape\n for item in ta_write_nodes:\n wnode = item[0]\n ta_idx, inode_idx = item[1]\n\n stack = [self._tf_node_map[wnode.input[ta_idx].split(\":\")[0]]]\n while stack:\n cnode = stack.pop(0)\n if not cnode.op.startswith(\"TensorArray\"):\n for iname in cnode.input:\n stack.append(self._tf_node_map[iname.split(\":\")[0]])\n elif cnode.name != wnode.name:\n if is_tensor_array_constuctor(cnode):\n inode = self._tf_node_map[wnode.input[inode_idx].split(\":\")[0]]\n tn = wnode.input[inode_idx].split(\":\")\n output_index = int(tn[1]) if len(tn) > 1 else 0\n self._tensor_array_shape_nodes[cnode.name] = (inode, wnode.op, output_index)\n break\n\n # First, parse all control flow nodes.\n # Convert tf.cond to Branch and tf.while_loop to Loop.\n sorted_cf_nodes = []\n exit_pos_map = {}\n ordered_prefix = []\n # Sort control flow nodes to move all Exit nodes to the end\n # of corresponding while_loop block.\n for node in control_flow_nodes:\n loop_name = find_parent_loop_name(node.name, self._while_loop_name_set)\n if node.op == \"Exit\":\n if loop_name not in exit_pos_map:\n ordered_prefix.append(loop_name)\n exit_pos_map[loop_name] = len(sorted_cf_nodes)\n sorted_cf_nodes.append(node)\n elif loop_name in self._while_loop_name_set:\n if loop_name not in exit_pos_map:\n sorted_cf_nodes.append(node)\n else:\n sorted_cf_nodes.insert(exit_pos_map[loop_name], node)\n for j in range(ordered_prefix.index(loop_name), len(ordered_prefix)):\n exit_pos_map[ordered_prefix[j]] += 1\n else:\n sorted_cf_nodes.append(node)\n\n for node in sorted_cf_nodes:\n self._sorted_cf_node_names.append(node.name)\n\n for node in sorted_cf_nodes:\n self._backtrack_construct(node.name)\n\n # Second, parse other nodes to re-create TF graph using Relay operators.\n for node in graph.node:\n self._backtrack_construct(node.name)\n\n out = []\n if outputs is None:\n last_node = graph.node[-1]\n op = self._nodes[last_node.name.split(\":\")[0]]\n if last_node.op == \"Exit\":\n out = [op[0].tuple_value]\n else:\n out = op\n else:\n for out_name in outputs:\n if \":\" in out_name:\n out_name, out_num = out_name.split(\":\")\n out_num = int(out_num)\n out.append(self._nodes[out_name][out_num])\n else:\n out.append(self._nodes[out_name][0])\n\n if isinstance(out, _expr.TupleWrapper):\n out = out.tuple_value\n else:\n out = out[0] if len(out) == 1 else _expr.Tuple(out)\n fvars = analysis.free_vars(out)\n func = _function.Function(fvars, out)\n final_params = {}\n for fv in fvars:\n if fv.name_hint in self._params:\n final_params[fv.name_hint] = self._params[fv.name_hint]\n self._params = final_params\n return func", "def _batch_to_device(batch, target_device):\n tensor = _getattr(\"torch\", \"Tensor\")\n for key in batch:\n if isinstance(batch[key], tensor):\n batch[key] = batch[key].to(target_device)\n return batch", "def maybe_cuda(t):\n if torch.cuda.is_available():\n return t\n return t", "def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n #print('Reached start of vgg')\n for k in self.vgg._modules.keys():\n if int(k) < 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n #print('Reached L2Norm')\n s = self.L2Norm(x)\n sources.append(s)\n\n #print('Reached after L2Norm')\n # apply vgg up to fc7\n for k in self.vgg._modules.keys():\n if int(k) >= 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n sources.append(x)\n #print('Reached end of VGG')\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n self.priors # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output", "def graph_to_function(graph, target, ctx, shape=None, dtype=None):\n # Infer missing shapes and dtypes\n graph, shape, dtype, output_shapes, output_dtypes = \\\n infer_shapes_dtypes(graph, shape=shape, dtype=dtype)\n\n if None in dtype.values():\n raise ValueError(\"Input variables with no type: {}\".format(dtype))\n\n if not all(shape.values()):\n raise ValueError(\"Input variables with no shape: {}\".format(shape))\n\n compute_graph, lib, params = nnvm.compiler.build(graph, target, shape=shape, dtype=dtype)\n module = graph_runtime.create(compute_graph, lib, ctx)\n\n if params:\n module.set_inputs(**params)\n\n def run(**kwargs):\n module.run(**kwargs)\n res = []\n for i, (o_shape, o_dtype) in enumerate(zip(output_shapes, output_dtypes)):\n res.append(module.get_output(i, tvm.nd.empty(o_shape, o_dtype)).asnumpy())\n return res\n\n return run", "def call(self, inputs, training=False):\n with tf.device('/GPU:0'):\n x1 = self.conv1_1(inputs)\n x1 = tf.nn.local_response_normalization(x1, depth_radius=5, bias=2, alpha=0.001, beta=0.75)\n x1 = self.max_pool(x1)\n x1 = self.conv2_1(x1)\n x1 = tf.nn.local_response_normalization(x1, depth_radius=5, bias=2, alpha=0.001, beta=0.75)\n x1 = self.max_pool(x1)\n\n with tf.device('/GPU:1'):\n x2 = self.conv1_2(inputs)\n x2 = tf.nn.local_response_normalization(x2, depth_radius=5, bias=2, alpha=0.001, beta=0.75)\n x2 = self.max_pool(x2)\n x2 = self.conv2_2(x2)\n x2 = tf.nn.local_response_normalization(x2, depth_radius=5, bias=2, alpha=0.001, beta=0.75)\n x2 = self.max_pool(x2)\n\n x = tf.keras.layers.concatenate([x1, x2])\n\n with tf.device('/GPU:0'):\n x1 = self.conv3_1(x)\n x1 = self.conv4_1(x1)\n x1 = self.conv5_1(x1)\n x1 = self.max_pool(x1)\n\n with tf.device('/GPU:1'):\n x2 = self.conv3_2(x)\n x2 = self.conv4_2(x2)\n x2 = self.conv5_2(x2)\n x2 = self.max_pool(x2)\n\n # Fully Connected Layers\n x = tf.keras.layers.concatenate([x1, x2])\n x = self.flatten(x)\n\n with tf.device('/GPU:0'):\n x1 = self.drop(self.fc1_1(x))\n x1 = self.drop(self.fc2_1(x1))\n\n with tf.device('/GPU:1'):\n x2 = self.drop(self.fc1_2(x))\n x2 = self.drop(self.fc2_2(x2))\n\n x = tf.keras.layers.concatenate([x1, x2])\n x = self.fc3(x)\n\n return x", "def _train(args): \n\n #device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = 'cpu'\n logger.info(\"Device Type: {}\".format(device))\n\n logger.info(\"Loading SUN360 dataset\")\n transform = transforms.Compose(\n [transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n target_transform = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor()]) \n\n trainset = SUN360Dataset(\"imagedata.json\",transform = transform, target_transform = target_transform)\n train_loader = DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n \"\"\"\n testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,\n download=False, transform=transform)\n test_loader = DataLoader(testset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers)\n \"\"\" \n\n logger.info(\"Model loaded\")\n model = EfficientNet.from_name('efficientnet-b0',conv_type='Equi')\n\n if torch.cuda.device_count() > 1:\n logger.info(\"Gpu count: {}\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n\n model = model.to(device)\n\n criterion = CELoss().to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(0, args.epochs):\n running_loss = 0.0\n for i, data in enumerate(train_loader):\n # get the inputs\n inputs, EM , CM = data\n inputs, EM, CM = inputs.to(device), EM.to(device), CM.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n EMLoss, CMLoss = map_loss(outputs,EM,CM,criterion)\n loss = EMLoss + CMLoss\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n print('Finished Training')\n return _save_model(model, args.model_dir)", "def prepare_data(train_x, train_y, dev_x, dev_y, test_x, testy):\n train_x = torch.FloatTensor(train_x).cuda()\n train_y = torch.FloatTensor(train_y).cuda()\n dev_x = torch.FloatTensor(dev_x).cuda()\n dev_y = torch.FloatTensor(dev_y).cuda()\n test_x = torch.FloatTensor(test_x).cuda()\n test_y = torch.FloatTensor(testy).cuda()\n return train_x, train_y, dev_x, dev_y, test_x, test_y", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def process_batch(batch):\n args = get_args()\n\n tokens = batch['text'].long().cuda().contiguous()\n types = batch['types'].long().cuda().contiguous()\n labels = batch['label'].long().cuda().contiguous()\n attention_mask = batch['padding_mask'].float().cuda().contiguous()\n if args.fp16:\n attention_mask = attention_mask.half()\n\n return tokens, types, labels, attention_mask", "def InfeedTFFunc(self, inp_instance):\n inp_instance.DeviceLoopSetupEager()\n inp_instance.CreateTpuEnqueueOps()\n # `CreateTpuEnqueueOps` and `CreateCpuPassthroughEnqueueOps` must be in the\n # same place, because the former enqueues `_per_host_passthrough_batches`,\n # while the latter consumes it.\n inp_instance.CreateCpuPassthroughEnqueueOps()\n # `CreateCpuPassthroughEnqueueOps` and `DequeueCpuPassthrough` must be in\n # the same place, because the former enqueues `_host_queues`,\n # while the latter consumes it.\n cpu_pt = inp_instance.DequeueCpuPassthrough()\n return cpu_pt", "def main(unused_argv):\n # Load data\n (train_adj, full_adj, train_feats, test_feats, y_train, y_val, y_test,\n train_mask, val_mask, test_mask, _, val_data, test_data, num_data,\n visible_data) = load_data(FLAGS.data_prefix, FLAGS.dataset, FLAGS.precalc)\n\n # Partition graph and do preprocessing\n if FLAGS.bsize > 1:\n _, parts = partition_utils.partition_graph(train_adj, visible_data,\n FLAGS.num_clusters)\n parts = [np.array(pt) for pt in parts]\n else:\n (parts, features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess(train_adj, train_feats, y_train,\n train_mask, visible_data,\n FLAGS.num_clusters,\n FLAGS.diag_lambda)\n\n (_, val_features_batches, val_support_batches, y_val_batches,\n val_mask_batches) = utils.preprocess(full_adj, test_feats, y_val, val_mask,\n np.arange(num_data),\n FLAGS.num_clusters_val,\n FLAGS.diag_lambda)\n\n (_, test_features_batches, test_support_batches, y_test_batches,\n test_mask_batches) = utils.preprocess(full_adj, test_feats, y_test,\n test_mask, np.arange(num_data),\n FLAGS.num_clusters_test,\n FLAGS.diag_lambda)\n idx_parts = list(range(len(parts)))\n\n # Some preprocessing\n model_func = models.GCN\n\n # Define placeholders\n placeholders = {\n 'support':\n tf.sparse_placeholder(tf.float32),\n 'features':\n tf.placeholder(tf.float32),\n 'labels':\n tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),\n 'labels_mask':\n tf.placeholder(tf.int32),\n 'dropout':\n tf.placeholder_with_default(0., shape=()),\n 'num_features_nonzero':\n tf.placeholder(tf.int32) # helper variable for sparse dropout\n }\n\n # Create model\n model = model_func(\n placeholders,\n input_dim=test_feats.shape[1],\n logging=True,\n multilabel=FLAGS.multilabel,\n norm=FLAGS.layernorm,\n precalc=FLAGS.precalc,\n num_layers=FLAGS.num_layers)\n\n # Initialize session\n sess = tf.Session()\n tf.set_random_seed(seed)\n\n # Init variables\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n cost_val = []\n total_training_time = 0.0\n # Train model\n for epoch in range(FLAGS.epochs):\n t = time.time()\n np.random.shuffle(idx_parts)\n if FLAGS.bsize > 1:\n (features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess_multicluster(\n train_adj, parts, train_feats, y_train, train_mask,\n FLAGS.num_clusters, FLAGS.bsize, FLAGS.diag_lambda)\n for pid in range(len(features_batches)):\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n else:\n np.random.shuffle(idx_parts)\n for pid in idx_parts:\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n\n total_training_time += time.time() - t\n print_str = 'Epoch: %04d ' % (epoch + 1) + 'training time: {:.5f} '.format(\n total_training_time) + 'train_acc= {:.5f} '.format(outs[2])\n\n # Validation\n if FLAGS.validation:\n cost, acc, micro, macro = evaluate(sess, model, val_features_batches,\n val_support_batches, y_val_batches,\n val_mask_batches, val_data,\n placeholders)\n cost_val.append(cost)\n print_str += 'val_acc= {:.5f} '.format(\n acc) + 'mi F1= {:.5f} ma F1= {:.5f} '.format(micro, macro)\n\n tf.logging.info(print_str)\n\n if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(\n cost_val[-(FLAGS.early_stopping + 1):-1]):\n tf.logging.info('Early stopping...')\n break\n\n tf.logging.info('Optimization Finished!')\n\n # Save model\n saver.save(sess, FLAGS.save_name)\n\n # Load model (using CPU for inference)\n with tf.device('/cpu:0'):\n sess_cpu = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))\n sess_cpu.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess_cpu, FLAGS.save_name)\n # Testing\n test_cost, test_acc, micro, macro = evaluate(\n sess_cpu, model, test_features_batches, test_support_batches,\n y_test_batches, test_mask_batches, test_data, placeholders)\n print_str = 'Test set results: ' + 'cost= {:.5f} '.format(\n test_cost) + 'accuracy= {:.5f} '.format(\n test_acc) + 'mi F1= {:.5f} ma F1= {:.5f}'.format(micro, macro)\n tf.logging.info(print_str)", "def FeedTensor(tensor, array, force_cpu=False, dtype=None):\n name = tensor.name if hasattr(tensor, 'name') else str(tensor)\n if force_cpu is True:\n dev = utils.MakeDeviceOption(0, 0)\n else:\n from dragon.core.scope import _DEVICE_SCOPE\n if _DEVICE_SCOPE != '':\n supports = {'/cpu': 0, '/gpu': 1, '/mlu': 2}\n dev = pb.DeviceOption()\n dev.device_type = supports[_DEVICE_SCOPE.split(':')[0]]\n dev.device_id = int(_DEVICE_SCOPE.split(':')[1])\n else:\n from dragon.config import option\n if option['device'] == 'CUDA':\n dev = utils.MakeDeviceOption(1, option['device_id'])\n else:\n dev = utils.MakeDeviceOption(0, 0)\n\n if not isinstance(array, np.ndarray):\n auto_data_type = np.float32 if dtype is None else dtype\n else:\n auto_data_type = array.dtype if dtype is None else dtype\n\n if hasattr(tensor, 'dtype') and tensor.dtype is not None:\n if tensor.dtype not in _DATA_TYPES:\n raise TypeError('Unsupported data types: {}.'.format(tensor.dtype))\n preset_data_type = _DATA_TYPES[tensor.dtype]\n if dtype is not None:\n if dtype != preset_data_type:\n raise TypeError('The preset data type is {}, but force to {}.'.\n format(preset_data_type, dtype))\n auto_data_type = preset_data_type\n nd_array = np.array(array, dtype=auto_data_type, copy=False)\n FeedTensorCC(name, nd_array, _stringify_proto(dev))", "def train():\r\n with tf.Graph().as_default():\r\n global_step = tf.train.get_or_create_global_step()\r\n # Get images and labels for CIFAR-10.\r\n # Force input pipeline to CPU:0 to avoid operations sometimes ending up on\r\n # GPU and resulting in a slow down.\r\n with tf.device('/cpu:0'):\r\n signals, labels = cnnHAR.distorted_inputs()\r\n print('<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>')\r\n \r\n # Build a Graph that computes the logits predictions from the\r\n # inference model.\r\n #training = tf.placeholder(tf.bool)\r\n \r\n pool11=cnnHAR.inference_cov11(signals,'_01')\r\n reshape1=cnnHAR.inference_cov21(pool11,'_01')\r\n local21=cnnHAR.inference_local21(reshape1,'_01')\r\n local31=cnnHAR.inference_local31(local21,'_01')\r\n local41=cnnHAR.inference_local41(local31,'_01')\r\n logits1=cnnHAR.inference_output1(local41,'_01')\r\n \r\n \r\n pool12=cnnHAR.inference_cov11(signals,'_02')\r\n reshape2=cnnHAR.inference_cov21(pool12,'_02')\r\n local22=cnnHAR.inference_local21(reshape2,'_02')\r\n local32=cnnHAR.inference_local31(local22,'_02')\r\n local42=cnnHAR.inference_local41(local32,'_02')\r\n logits2=cnnHAR.inference_output1(local42,'_02')\r\n \r\n \r\n pool13=cnnHAR.inference_cov11(signals,'_03')\r\n reshape3=cnnHAR.inference_cov21(pool13,'_03')\r\n local23=cnnHAR.inference_local21(reshape3,'_03')\r\n local33=cnnHAR.inference_local31(local23,'_03')\r\n local43=cnnHAR.inference_local41(local33,'_03')\r\n logits3=cnnHAR.inference_output1(local43,'_03')\r\n \r\n \r\n pool14=cnnHAR.inference_cov11(signals,'_04')\r\n reshape4=cnnHAR.inference_cov21(pool14,'_04')\r\n local24=cnnHAR.inference_local21(reshape4,'_04')\r\n local34=cnnHAR.inference_local31(local24,'_04')\r\n local44=cnnHAR.inference_local41(local34,'_04')\r\n logits4=cnnHAR.inference_output1(local44,'_04')\r\n\r\n \r\n pool15=cnnHAR.inference_cov11(signals,'_05')\r\n reshape5=cnnHAR.inference_cov21(pool15,'_05')\r\n local25=cnnHAR.inference_local21(reshape5,'_05')\r\n local35=cnnHAR.inference_local31(local25,'_05')\r\n local45=cnnHAR.inference_local41(local35,'_05')\r\n logits5=cnnHAR.inference_output1(local45,'_05')\r\n\r\n pool16=cnnHAR.inference_cov11(signals,'_06')\r\n reshape6=cnnHAR.inference_cov21(pool16,'_06')\r\n local26=cnnHAR.inference_local21(reshape6,'_06')\r\n local36=cnnHAR.inference_local31(local26,'_06')\r\n local46=cnnHAR.inference_local41(local36,'_06')\r\n logits6=cnnHAR.inference_output1(local46,'_06')\r\n \r\n\r\n loss1=cnnHAR.loss(logits1, labels,'_01')\r\n loss2=cnnHAR.loss(logits2, labels,'_02')\r\n loss3=cnnHAR.loss(logits3, labels,'_03')\r\n loss4=cnnHAR.loss(logits4, labels,'_04')\r\n loss5=cnnHAR.loss(logits5, labels,'_05')\r\n loss6=cnnHAR.loss(logits6, labels,'_06')\r\n \r\n train_op1 = cnnHAR.train(loss1, global_step,'_01')\r\n train_op2 = cnnHAR.train(loss2, global_step,'_02')\r\n train_op3 = cnnHAR.train(loss3, global_step,'_03')\r\n train_op4 = cnnHAR.train(loss4, global_step,'_04')\r\n train_op5 = cnnHAR.train(loss5, global_step,'_05')\r\n train_op6 = cnnHAR.train(loss6, global_step,'_06')\r\n \r\n \r\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n class _LoggerHook(tf.train.SessionRunHook):\r\n \"\"\"Logs loss and runtime.\"\"\"\r\n\r\n def begin(self):\r\n self._step = -1\r\n self._start_time = time.time()\r\n\r\n def before_run(self, run_context):\r\n self._step += 1\r\n #print('~~~~~~~~~~~~~~~~before run1~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n #tmp = tf.concat([labels,signals],1)\r\n \r\n index=int(self._step%(num*7)/7)\r\n if index==0:\r\n return tf.train.SessionRunArgs(loss1)\r\n elif index==1:\r\n return tf.train.SessionRunArgs(loss2)\r\n elif index==2:\r\n return tf.train.SessionRunArgs(loss3)\r\n elif index==3:\r\n return tf.train.SessionRunArgs(loss4)\r\n elif index==4:\r\n return tf.train.SessionRunArgs(loss5)\r\n elif index==5:\r\n return tf.train.SessionRunArgs(loss6)\r\n \r\n # Asks for loss value.\r\n\r\n def after_run(self, run_context, run_values):\r\n# if self._step == 1000:\r\n# #tf.Session().run(tf.global_variables_initializer())\r\n# ndar = np.array(run_values.results)\r\n# np.savetxt(\"logits.csv\", ndar.reshape(128,256), delimiter=\",\")\r\n \r\n if int((self._step-1) / log_frequency)%10==0 and (self._step%(num*7)+1)%7==0 and int(self._step%(num*7)/7)==0:#(self._step-1) % (log_frequency)== 0:\r\n #print('~~~~~~~~~~~~~~~~after run1~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n '''\r\n current_time = time.time()\r\n duration = current_time - self._start_time\r\n self._start_time = current_time\r\n \r\n loss_value = run_values.results\r\n examples_per_sec = log_frequency * batch_size / duration\r\n sec_per_batch = float(duration / log_frequency)\r\n format_str = ('%s: step %d, loss = %.8f (%.1f examples/sec; %.3f '\r\n 'sec/batch)')\r\n '''\r\n format_str = ('%s: step %d loss%d=%0.8f')\r\n print(format_str % (datetime.now(), self._step+1, int(self._step%(num*7)/7)+1,run_values.results))\r\n \r\n class _LoggerHook2(tf.train.SessionRunHook):\r\n \"\"\"Logs signals.\"\"\"\r\n\r\n def begin(self):\r\n self._step = -1\r\n\r\n def before_run(self, run_context):\r\n self._step += 1\r\n #print('~~~~~~~~~~~~~~~~before run2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n return tf.train.SessionRunArgs(logits) # Asks for logits.\r\n\r\n def after_run(self, run_context, run_values):\r\n if self._step == max_steps-1:#:\r\n print('~~~~~~~~~~~~~~~~after run2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n ndar = np.array(run_values.results)\r\n np.savetxt(\"logits\"+str(self._step)+\".csv\", ndar.reshape(batch_size,NUM_CLASSES), delimiter=\",\")\r\n\r\n class _LoggerHook3(tf.train.SessionRunHook):\r\n \"\"\"Logs labels.\"\"\"\r\n\r\n def begin(self):\r\n self._step = -1\r\n\r\n def before_run(self, run_context):\r\n self._step += 1\r\n return tf.train.SessionRunArgs(labels) # Asks for labels.\r\n\r\n def after_run(self, run_context, run_values):\r\n if self._step == max_steps-1:\r\n ndar = np.array(run_values.results)\r\n np.savetxt(\"labels\"+str(self._step)+\".csv\", ndar.reshape(batch_size,NUM_CLASSES), delimiter=\",\")\r\n\r\n class _LoggerHook4(tf.train.SessionRunHook):\r\n \"\"\"Logs signals.\"\"\"\r\n\r\n def begin(self):\r\n self._step = -1\r\n\r\n def before_run(self, run_context):\r\n self._step += 1\r\n #print('~~~~~~~~~~~~~~~~before run4~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n #return tf.train.SessionRunArgs(signals) # Asks for signals.\r\n\r\n def after_run(self, run_context, run_values):\r\n if (self._step+1)% (50*log_frequency) == 0:\r\n #if self._step == max_steps-1:#:\r\n #print('~~~~~~~~~~~~~~~~after run4~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r\n cnnHAR_eval.main()\r\n\r\n with tf.train.MonitoredTrainingSession(\r\n checkpoint_dir=train_dir,\r\n hooks=[tf.train.StopAtStepHook(last_step=max_steps),\r\n #tf.train.NanTensorHook(loss),\r\n _LoggerHook(),\r\n #_LoggerHook2(),\r\n _LoggerHook4()],#,save_checkpoint_steps=5000\r\n config=tf.ConfigProto(\r\n log_device_placement=log_device_placement),save_checkpoint_steps=50*log_frequency) as mon_sess:\r\n ''',save_checkpoint_steps=10*log_frequency'''\r\n i=0\r\n while not mon_sess.should_stop():\r\n# mon_sess = tfdbg.LocalCLIDebugWrapperSession(mon_sess)\r\n #mon_sess.run([train_op1,extra_update_ops])\r\n #print('~~~~~~~~~~~~~~~~%d step:'%i)\r\n \r\n index=int(i%(num*7)/7)\r\n if index==0:\r\n #print('~~~~~~~~~~~~~~~~train_op1')\r\n mon_sess.run([train_op1,extra_update_ops])\r\n elif index==1:\r\n #print('~~~~~~~~~~~~~~~~train_op2')\r\n mon_sess.run([train_op2,extra_update_ops])\r\n elif index==2:\r\n #print('~~~~~~~~~~~~~~~~train_op3')\r\n mon_sess.run([train_op3,extra_update_ops])\r\n elif index==3:\r\n #print('~~~~~~~~~~~~~~~~train_op4')\r\n mon_sess.run([train_op4,extra_update_ops])\r\n elif index==4:\r\n #print('~~~~~~~~~~~~~~~~train_op5')\r\n mon_sess.run([train_op5,extra_update_ops])\r\n elif index==5:\r\n #print('~~~~~~~~~~~~~~~~train_op6')\r\n mon_sess.run([train_op6,extra_update_ops])\r\n '''\r\n elif index==6:\r\n #print('~~~~~~~~~~~~~~~~train_op1')\r\n mon_sess.run([train_op7,extra_update_ops])\r\n elif index==7:\r\n #print('~~~~~~~~~~~~~~~~train_op2')\r\n mon_sess.run([train_op8,extra_update_ops])\r\n elif index==8:\r\n #print('~~~~~~~~~~~~~~~~train_op3')\r\n mon_sess.run([train_op9,extra_update_ops])\r\n elif index==9:\r\n #print('~~~~~~~~~~~~~~~~train_op4')\r\n mon_sess.run([train_op10,extra_update_ops])\r\n elif index==10:\r\n #print('~~~~~~~~~~~~~~~~train_op5')\r\n mon_sess.run([train_op11,extra_update_ops])\r\n elif index==11:\r\n #print('~~~~~~~~~~~~~~~~train_op6')\r\n mon_sess.run([train_op12,extra_update_ops])\r\n '''\r\n i=i+1\r\n \r\n #print('~~~~~~~~~~~~~~~~one session ends~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\r", "def evaluate(net,\n tokenizer, ner_tagger,\n device, eval_data_filepath, eval_preds_filepath,\n fb_passes = 1, text_length = 250, verbose=False):\n\n \"\"\" PREPADE DATA FOR PREDICTION \"\"\"\n dh = utils.HotPotDataHandler(eval_data_filepath)\n dev_data = dh.data_for_paragraph_selector()\n\n point_ids = [point[0] for point in dev_data] # needed to handle useless datapoints\n queries = [point[2] for point in dev_data]\n contexts = [point[3] for point in dev_data]\n\n graphs = [EntityGraph.EntityGraph(c,\n context_length=text_length,\n tagger=ner_tagger)\n for c in contexts]\n\n # if the NER in EntityGraph doesn't find entities, the datapoint is useless.\n useless_datapoint_inds = [i for i, g in enumerate(graphs) if not g.graph]\n queries = [q for i, q in enumerate(queries) if i not in useless_datapoint_inds]\n contexts = [c for i, c in enumerate(contexts) if i not in useless_datapoint_inds]\n graphs = [g for i, g in enumerate(graphs) if i not in useless_datapoint_inds]\n\n # required for prediction in the right format\n s_lens_batch = [utils.sentence_lengths(c, tokenizer) for c in contexts]\n\n # turn the texts into tensors in order to put them on the GPU\n qc_ids = [net.encoder.token_ids(q, c) for q, c in zip(queries, contexts)] # list[ (list[int], list[int]) ]\n q_ids, c_ids = list(zip(*qc_ids)) # tuple(list[int]), tuple(list[int])\n q_ids_list = [torch.tensor(q).to(device) for q in q_ids] # list[Tensor]\n c_ids_list = [torch.tensor(c).to(device) for c in c_ids] # list[Tensor]\n\n for i,g in enumerate(graphs):\n graphs[i].M = g.M.to(device) # work with enumerate to actually mutate the graph objects\n\n \"\"\" FORWARD PASSES \"\"\"\n answers = {} # {question_id: str} (either \"yes\", \"no\" or a string containing the answer)\n sp = {} # {question_id: list[list[paragraph_title, sent_num]]} (supporting sentences)\n\n # return useless datapoints unanswered\n for i in useless_datapoint_inds:\n answers[point_ids[i]] = \"noanswer\"\n sp[point_ids[i]] = []\n\n for i, (query, context, graph, s_lens) in enumerate(zip(q_ids_list, c_ids_list, graphs, s_lens_batch)):\n\n if verbose: print(queries[i])\n\n answer, sup_fact_pairs = predict(net, query, context, graph, tokenizer,\n s_lens, fb_passes=fb_passes) #TODO sort these parameters\n\n answers[dev_data[i][0]] = answer # {question_id: str}\n sp[dev_data[i][0]] = sup_fact_pairs # {question_id: list[list[paragraph_title, sent_num]]}\n\n if verbose: print(answer)\n\n with open(eval_preds_filepath, 'w') as f:\n json.dump( {\"answer\":answers, \"sp\":sp} , f)\n\n\n \"\"\" EVALUATION \"\"\"\n return official_eval_script.eval(eval_preds_filepath, eval_data_filepath) #TODO return aything else than the metrics?", "def IFFT_3D_CUDA( vol_gpu, F_vol_gpu ):\n vol_gpu_out = gpuarray.empty_like(vol_gpu) \n plan_inverse = cu_fft.Plan(vol_gpu_out.shape, np.complex64, np.float32) \n cu_fft.ifft(F_vol_gpu, vol_gpu_out, plan_inverse, True)\n vol_out = vol_gpu_out.get()\n print 'Success status:', np.allclose(vol_out, vol_gpu_out.get(), atol=1e-6)\n return vol_out", "def local_gpu_ger(node):\r\n gers = (tensor.blas_c.CGer,\r\n tensor.blas.Ger,\r\n tensor.blas_scipy.ScipyGer,\r\n )\r\n\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op, gers):\r\n z, a, x, y = host_input.owner.inputs\r\n return [gpu_ger_no_inplace(\r\n gpu_from_host(z),\r\n a,\r\n gpu_from_host(x),\r\n gpu_from_host(y)\r\n )]\r\n if isinstance(node.op, gers):\r\n z, a, x, y = node.inputs\r\n x_on_gpu = (x.owner and isinstance(x.owner.op, HostFromGpu))\r\n y_on_gpu = (y.owner and isinstance(y.owner.op, HostFromGpu))\r\n z_on_gpu = (z.owner and isinstance(z.owner.op, HostFromGpu))\r\n if x_on_gpu or y_on_gpu or z_on_gpu:\r\n return [host_from_gpu(\r\n gpu_ger_no_inplace(\r\n gpu_from_host(z),\r\n a,\r\n gpu_from_host(x),\r\n gpu_from_host(y)\r\n ))]\r\n return False", "def update_preprocessor(self, batch_size):\r\n # Update batch size.\r\n self.batch_size = batch_size\r\n\r\n # Set input tensor shape (NHWC).\r\n input_shape = [None] * 4\r\n input_shape[0] = self.batch_size\r\n input_shape[1] = self.height\r\n input_shape[2] = self.width\r\n input_shape[3] = 3\r\n \r\n assert len(input_shape) == 4\r\n for i in range(len(input_shape)):\r\n assert input_shape[i] >= 1\r\n input_format = None\r\n if input_shape[1] == 3:\r\n input_format = \"NCHW\"\r\n if input_shape[3] == 3:\r\n input_format = \"NHWC\"\r\n assert input_format in [\"NCHW\", \"NHWC\"]\r\n\r\n self.graph.inputs[0].shape = input_shape\r\n self.graph.inputs[0].dtype = np.float32\r\n\r\n self.infer()\r\n log.info(\"ONNX graph input shape: {} [NCHW format set]\".format(self.graph.inputs[0].shape))\r\n\r\n # Find the initial nodes of the graph, whatever the input is first connected to, and disconnect them.\r\n for node in [node for node in self.graph.nodes if self.graph.inputs[0] in node.inputs]:\r\n node.inputs.clear()\r\n\r\n # Get input tensor.\r\n # Convert to NCHW format if needed.\r\n input_tensor = self.graph.inputs[0]\r\n if input_format == \"NHWC\":\r\n input_tensor = self.graph.transpose(\"preprocessor/transpose\", input_tensor, [0, 3, 1, 2])\r\n\r\n # Mobilenets' and inception's backbones preprocessor.\r\n if (self.model == 'ssd_mobilenet_v2_keras' or self.model == 'ssd_mobilenet_v1_fpn_keras' \r\n or self.model == 'ssd_mobilenet_v2_fpn_keras' or self.model == \"faster_rcnn_inception_resnet_v2_keras\"):\r\n mul_const = np.expand_dims(np.asarray([0.007843137718737125, 0.007843137718737125, 0.007843137718737125], dtype=np.float32), axis=(0, 2, 3))\r\n sub_const = np.expand_dims(np.asarray([1, 1, 1], dtype=np.float32), axis=(0, 2, 3))\r\n mul_out = self.graph.elt_const(\"Mul\", \"preprocessor/scale\", input_tensor, mul_const)\r\n sub_out = self.graph.elt_const(\"Sub\", \"preprocessor/mean\", mul_out, sub_const)\r\n\r\n # Resnet backbones' preprocessor.\r\n elif (self.model == 'ssd_resnet50_v1_fpn_keras' or self.model == 'ssd_resnet101_v1_fpn_keras' \r\n or self.model == 'ssd_resnet152_v1_fpn_keras' or self.model == 'faster_rcnn_resnet50_keras' \r\n or self.model == 'faster_rcnn_resnet101_keras' or self.model == 'faster_rcnn_resnet152_keras'):\r\n sub_const = np.expand_dims(np.asarray([123.68000030517578, 116.77899932861328, 103.93900299072266], dtype=np.float32), axis=(0, 2, 3))\r\n sub_out = self.graph.elt_const(\"Sub\", \"preprocessor/mean\", input_tensor, sub_const)\r\n \r\n # Model is not supported.\r\n else:\r\n log.info(\"This model: {} is not supported\".format(self.model))\r\n sys.exit(1)\r\n\r\n # Find first Conv node and connect preprocessor directly to it.\r\n stem_name = \"StatefulPartitionedCall/\"\r\n stem = [node for node in self.graph.nodes if node.op == \"Conv\" and stem_name in node.name][0]\r\n log.info(\"Found {} node '{}' as stem entry\".format(stem.op, stem.name))\r\n stem.inputs[0] = sub_out[0]\r\n\r\n # Get rid of the last node in one of the preprocessing branches with first TensorListStack parent node\r\n concat_name = \"StatefulPartitionedCall/\"\r\n concat_node = [node for node in self.graph.nodes if node.op == \"Concat\" and concat_name in node.name][0]\r\n concat_node.outputs = []\r\n\r\n # Get rid of the last node in second preprocessing branch with parent second TensorListStack node:\r\n cast_name = \"StatefulPartitionedCall/\"\r\n cast_node = [node for node in self.graph.nodes if node.op == \"Tile\" and cast_name in node.name][0]\r\n cast_node.outputs = []\r\n\r\n # Reshape nodes tend to update the batch dimension to a fixed value of 1, they should use the batch size instead.\r\n for node in [node for node in self.graph.nodes if node.op == \"Reshape\"]:\r\n if type(node.inputs[1]) == gs.Constant and node.inputs[1].values[0] == 1:\r\n node.inputs[1].values[0] = self.batch_size\r\n\r\n self.infer()", "def cpu_fallback(flag=True):\n gpu_phy_devices = tf.config.list_physical_devices(\"GPU\")\n cpu_phy_devices = tf.config.list_physical_devices(\"CPU\")\n\n general_warning_msg = (\n f\"Tensorflow has already been initialized, {inspect.currentframe().f_code.co_name}() needs \"\n f\"to be called before any Tensorflow operation, as a result this function will have no effect\"\n )\n\n if flag is True:\n try:\n tf.config.set_visible_devices([], \"GPU\")\n except RuntimeError:\n warnings.warn(general_warning_msg)\n elif flag is False:\n try:\n tf.config.set_visible_devices(gpu_phy_devices, \"GPU\")\n except RuntimeError:\n warnings.warn(general_warning_msg)\n else:\n raise ValueError(\"Unknown flag, can only be True of False!\")", "def cudaLoader(model, train_set, val_set):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # device = ('cpu') # comment/uncomment to use GPU\n print(\"device:\\t\", device)\n\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs\n model = nn.DataParallel(model)\n\n model.to(dtype = torch.double) #typecasting\n train_set = train_set.to(device)\n val_set = val_set.to(device)\n model = model.to(device);\n return model, train_set, val_set", "def prepare_processing_graph(self, model_settings):\n desired_samples = model_settings['desired_samples']\n self.wav_filename_placeholder_ = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(self.wav_filename_placeholder_)\n wav_decoder = contrib_audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n # Allow the audio sample's volume to be adjusted.\n self.foreground_volume_placeholder_ = tf.placeholder(tf.float32, [])\n scaled_foreground = tf.multiply(wav_decoder.audio,\n self.foreground_volume_placeholder_)\n # Shift the sample's start position, and pad any gaps with zeros.\n self.time_shift_padding_placeholder_ = tf.placeholder(tf.int32, [2, 2])\n self.time_shift_offset_placeholder_ = tf.placeholder(tf.int32, [2])\n padded_foreground = tf.pad(\n scaled_foreground,\n self.time_shift_padding_placeholder_,\n mode='CONSTANT')\n sliced_foreground = tf.slice(padded_foreground,\n self.time_shift_offset_placeholder_,\n [desired_samples, -1])\n # Mix in background noise.\n self.background_data_placeholder_ = tf.placeholder(tf.float32,\n [desired_samples, 1])\n self.background_volume_placeholder_ = tf.placeholder(tf.float32, [])\n background_mul = tf.multiply(self.background_data_placeholder_,\n self.background_volume_placeholder_)\n background_add = tf.add(background_mul, sliced_foreground)\n background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)\n # Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.\n spectrogram = contrib_audio.audio_spectrogram(\n background_clamp,\n window_size=model_settings['window_size_samples'],\n stride=model_settings['window_stride_samples'],\n magnitude_squared=True)\n self.mfcc_ = contrib_audio.mfcc(\n spectrogram,\n wav_decoder.sample_rate,\n dct_coefficient_count=model_settings['dct_coefficient_count'])", "def _prepare_batch(self, batch):\n try:\n import dgl\n except:\n raise ImportError('This class requires dgl.')\n\n inputs, labels, weights = batch\n dgl_graphs = [\n graph.to_dgl_graph(self_loop=self._self_loop) for graph in inputs[0]\n ]\n inputs = dgl.batch(dgl_graphs).to(self.device)\n _, labels, weights = super(MPNNModel, self)._prepare_batch(\n ([], labels, weights))\n return inputs, labels, weights", "def train_step(self, batch: dict, epoch: int):\n\n with torch.cuda.amp.autocast(self.mixed_precision):\n \n # Update momentum {key, pseudo} networks\n with torch.no_grad():\n self._momentum_update_key_net()\n self._momentum_update_pseudo_net()\n\n # Get data (3 views)\n x_q = batch['x1'].to(self.local_rank)\n x_k = batch['x2'].to(self.local_rank)\n x_ps = batch['x3'].to(self.local_rank)\n \n # Compute strong query features; (B, f)\n z_q = F.normalize(self.net_q(x_q), dim=1)\n\n with torch.no_grad():\n \n # Shuffle across nodes (gpus)\n x_k, idx_unshuffle_k = ForMoCo.batch_shuffle_ddp(x_k)\n x_ps, idx_unshuffle_ps = ForMoCo.batch_shuffle_ddp(x_ps)\n \n # Compute {key, pseudo} features; (B, f)\n z_k = F.normalize(self.net_k(x_k), dim=1)\n z_ps = F.normalize(self.net_ps(x_ps), dim=1)\n \n # Restore {key, pseudo} features to their original nodes\n z_k = ForMoCo.batch_unshuffle_ddp(z_k, idx_unshuffle_k)\n z_ps = ForMoCo.batch_unshuffle_ddp(z_ps, idx_unshuffle_ps)\n\n # Compute loss\n loss, logits, labels, loss_pseudo, probs_pseudo_neg = \\\n self.loss_function(z_q, z_ps, z_k, self.queue.buffer, threshold=self.threshold)\n \n # Backpropagate & update\n if loss_pseudo.isnan() or (epoch <= self.ramp_up):\n self.backprop(loss)\n else:\n alpha = 1.0\n self.backprop(loss + alpha * loss_pseudo)\n \n # Compute metrics\n with torch.no_grad():\n \n # Accuracy of true positives against all negatives\n rank_1 = TopKAccuracy(k=1)(logits, labels)\n \n # Accuracy of pseudo positives with ground truth labels\n above_threshold = probs_pseudo_neg.ge(self.threshold)\n num_pseudo = above_threshold.sum()\n \n # No pseudo positives may have been selected\n if self.queue.is_reliable and (num_pseudo > 0):\n labels_query = batch['y'].to(self.local_rank) # (B, )\n labels_queue = self.queue.labels # (k, )\n is_correct = labels_query.view(-1, 1).eq(labels_queue.view(1, -1)) # (B, 1) @ (1, k) -> (B, k)\n num_correct = is_correct.masked_select(above_threshold).sum()\n precision = torch.true_divide(num_correct, num_pseudo)\n else:\n num_correct = torch.zeros(1, dtype=torch.long, device=num_pseudo.device)\n precision = torch.zeros(1, dtype=torch.float32, device=num_pseudo.device)\n \n # Update memory queue\n self.queue.update(keys=z_k, labels=batch['y'].to(self.local_rank))\n\n return {\n 'loss': loss.detach(),\n 'loss_pseudo': loss_pseudo.detach(), # (1, ) or tensor(nan)\n 'rank@1': rank_1,\n 'num_correct': num_correct,\n 'num_pseudo': num_pseudo,\n 'precision': precision,\n }", "def evaluate(self, input_graph, dataloader, postprocess=None,\n metric=None, measurer=None, iteration=-1,\n tensorboard=False, fp32_baseline=False):\n sess_options = ort.SessionOptions()\n if measurer:\n # https://github.com/microsoft/onnxruntime/issues/7347\n cores_per_instance = int(os.environ.get('CORES_PER_INSTANCE'))\n assert cores_per_instance > 0, \"benchmark cores_per_instance should greater than 0\"\n sess_options.intra_op_num_threads = cores_per_instance\n session = ort.InferenceSession(input_graph.model.SerializeToString(), sess_options)\n if metric:\n metric.reset()\n if hasattr(metric, \"compare_label\") and not metric.compare_label:\n self.fp32_preds_as_label = True\n results = []\n\n ort_inputs = {}\n len_inputs = len(session.get_inputs())\n inputs_names = [session.get_inputs()[i].name for i in range(len_inputs)]\n for idx, (inputs, labels) in enumerate(dataloader):\n if not isinstance(labels, list):\n labels = [labels]\n if len_inputs == 1:\n ort_inputs.update({inputs_names[0]: inputs})\n else:\n assert len_inputs == len(inputs), \\\n 'number of input tensors must align with graph inputs' \n \n for i in range(len_inputs):\n # in case dataloader contains non-array input\n if not isinstance(inputs[i], np.ndarray):\n ort_inputs.update({inputs_names[i]: np.array(inputs[i])})\n else:\n ort_inputs.update({inputs_names[i]: inputs[i]}) \n\n if measurer is not None:\n measurer.start()\n predictions = session.run(None, ort_inputs)\n measurer.end()\n else:\n predictions = session.run(None, ort_inputs)\n\n if self.fp32_preds_as_label:\n self.fp32_results.append(predictions) if fp32_baseline else \\\n results.append(predictions)\n\n if postprocess is not None:\n predictions, labels = postprocess((predictions, labels))\n if metric is not None and not self.fp32_preds_as_label:\n metric.update(predictions, labels)\n if idx + 1 == iteration:\n break\n\n if self.fp32_preds_as_label:\n from neural_compressor.adaptor.ox_utils.util import collate_preds\n if fp32_baseline:\n results = collate_preds(self.fp32_results)\n metric.update(results, results)\n else:\n reference = collate_preds(self.fp32_results)\n results = collate_preds(results)\n metric.update(results, reference)\n\n acc = metric.result() if metric is not None else 0\n return acc", "def _preprocess_traced_tensor(self, tensor):\n\n def _detect_nan_inf(tensor):\n \"\"\"Trace function for detecting any NaN/Inf in the tensor.\"\"\"\n\n if tensor.dtype.is_floating:\n mask = math_ops.reduce_any(\n gen_math_ops.logical_or(\n gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))\n output_tensor = cond.cond(\n mask,\n lambda: constant_op.constant([1.0]),\n lambda: constant_op.constant([0.0]))\n else:\n output_tensor = constant_op.constant([0.0])\n return output_tensor\n\n def _compute_signature(tensor, tf_op, cast_to_f32=True):\n if cast_to_f32:\n tensor = math_ops.cast(tensor, dtypes.float32)\n output_tensor = tf_op(tensor)\n # Return type should be scalar. Set it if it does not have the\n # information.\n if not output_tensor.get_shape().is_fully_defined():\n output_tensor = array_ops.reshape(output_tensor, [])\n return output_tensor\n\n def _show_size(tensor):\n # In order to check the size of a tensor.\n # Not all sizes are known at the compile time, also, different replicas\n # sometimes get different sizes of tensors.\n # Collect it here to be used in merging replica data.\n tsize = _compute_signature(tensor, array_ops.size, cast_to_f32=False)\n # Cast to float32, so that it can be placed into same cache with other\n # signatures.\n return math_ops.cast(tsize, dtypes.float32)\n\n def _show_max(tensor, cast_to_f32=True):\n # returns -inf for empty tensor\n return _compute_signature(tensor, math_ops.reduce_max, cast_to_f32)\n\n def _show_min(tensor, cast_to_f32=True):\n # returns inf for empty tensor\n return _compute_signature(tensor, math_ops.reduce_min, cast_to_f32)\n\n def _show_norm(tensor, cast_to_f32=True):\n # returns 0 for empty tensor\n return _compute_signature(tensor, linalg_ops.norm, cast_to_f32)\n\n def _show_sparsity(tensor, cast_to_f32=True, tolerance=1e-06):\n # returns nan for empty tensor and treats nans as non-zero numbers\n def sparsity_fn(tensor):\n non_zeros = math_ops.greater_equal(math_ops.abs(tensor), tolerance)\n nans = math_ops.is_nan(tensor)\n return nn_impl.zero_fraction(math_ops.logical_or(non_zeros, nans))\n\n return _compute_signature(tensor, sparsity_fn, cast_to_f32)\n\n def _show_mean_and_variance(tensor, cast_to_f32=True):\n \"\"\"Returns the mean and variance of the given tensor.\"\"\"\n if cast_to_f32:\n tensor = math_ops.cast(tensor, dtypes.float32)\n # returns nan for empty tensor\n mean, var = nn_impl.moments(array_ops.reshape(tensor, [-1]), axes=[0])\n # The shape has to be 1. Set it if it does not have the information.\n if not mean.get_shape().is_fully_defined():\n mean = array_ops.reshape(mean, [])\n if not var.get_shape().is_fully_defined():\n var = array_ops.reshape(var, [])\n return mean, var\n\n def _show_max_abs(tensor, cast_to_f32=True):\n return _compute_signature(\n tensor, lambda t: math_ops.reduce_max(math_ops.abs(t)), cast_to_f32)\n\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:\n return {self._parameters.trace_mode: _detect_nan_inf(tensor)}\n if (self._parameters.trace_mode ==\n tensor_tracer_flags.TRACE_MODE_PART_TENSOR):\n return {self._parameters.trace_mode: tensor}\n if (self._parameters.trace_mode in (\n tensor_tracer_flags.TRACE_MODE_FULL_TENSOR,\n tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)):\n return {self._parameters.trace_mode: tensor}\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NORM:\n return {self._parameters.trace_mode: array_ops.reshape(\n _show_norm(tensor), [1])}\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_HISTORY:\n return {self._parameters.trace_mode: array_ops.reshape(\n _show_norm(tensor), [1])}\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_MAX_ABS:\n return {self._parameters.trace_mode: _show_max_abs(tensor)}\n\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:\n tensor = math_ops.cast(tensor, dtypes.float32)\n result_dict = {}\n # Call mean and variance computation here to avoid adding the same nodes\n # twice.\n if (_TT_SUMMARY_MEAN in self._signature_types() or\n _TT_SUMMARY_VAR in self._signature_types()):\n mean, variance = _show_mean_and_variance(tensor, cast_to_f32=False)\n\n for signature_name, _ in sorted(self._signature_types().items(),\n key=lambda x: x[1]):\n if signature_name == _TT_SUMMARY_NORM:\n signature_result_tensor = _show_norm(tensor, cast_to_f32=False)\n elif signature_name == _TT_SUMMARY_MAX:\n signature_result_tensor = _show_max(tensor, cast_to_f32=False)\n elif signature_name == _TT_SUMMARY_MAX_ABS:\n signature_result_tensor = _show_max_abs(tensor, cast_to_f32=False)\n elif signature_name == _TT_SUMMARY_MIN:\n signature_result_tensor = _show_min(tensor, cast_to_f32=False)\n elif signature_name == _TT_SUMMARY_SPARSITY:\n signature_result_tensor = _show_sparsity(tensor)\n elif signature_name == _TT_SUMMARY_SIZE:\n signature_result_tensor = _show_size(tensor)\n elif signature_name == _TT_SUMMARY_MEAN:\n signature_result_tensor = mean\n elif signature_name == _TT_SUMMARY_VAR:\n signature_result_tensor = variance\n else:\n raise ValueError('Unknown signature type :%s.' % signature_name)\n\n result_dict[signature_name] = signature_result_tensor\n return result_dict\n\n raise RuntimeError(\n 'Unsupported signature for trace mode %s.'\n % self._parameters.trace_mode)", "def prepare_config(device='npu'):\n if device == 'npu':\n # config for Ascend processor\n config = tf.ConfigProto()\n custom_op = config.graph_options.rewrite_options.custom_optimizers.add()\n custom_op.name = \"NpuOptimizer\"\n custom_op.parameter_map[\"use_off_line\"].b = True\n custom_op.parameter_map[\"precision_mode\"].s = tf.compat.as_bytes(\"force_fp16\")\n custom_op.parameter_map[\"graph_run_mode\"].i = 0\n config.graph_options.rewrite_options.remapping = RewriterConfig.OFF\n custom_op.parameter_map[\"debug_dir\"].s = tf.compat.as_bytes(str(TMP))\n else:\n config = tf.ConfigProto()\n return config", "def prepare_processing_graph(self):\n desired_samples = self.model_settings['desired_samples']\n self.wav_filename_placeholder_ = tf.placeholder(tf.string, [])\n\n wav_loader = io_ops.read_file(self.wav_filename_placeholder_)\n wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1, desired_samples=desired_samples)\n # Allow the audio sample's volume to be adjusted.\n self.foreground_volume_placeholder_ = tf.placeholder(tf.float32, [])\n scaled_foreground = tf.multiply(wav_decoder.audio, self.foreground_volume_placeholder_)\n\n # Shift the sample's start position, and pad any gaps with zeros.\n self.time_shift_padding_placeholder_ = tf.placeholder(tf.int32, [2, 2])\n self.time_shift_offset_placeholder_ = tf.placeholder(tf.int32, [2])\n padded_foreground = tf.pad(scaled_foreground, self.time_shift_padding_placeholder_, mode='CONSTANT')\n sliced_foreground = tf.slice(padded_foreground, self.time_shift_offset_placeholder_, [desired_samples, -1])\n # Mix in background noise.\n self.background_data_placeholder_ = tf.placeholder(tf.float32, [desired_samples, 1])\n self.background_volume_placeholder_ = tf.placeholder(tf.float32, [])\n background_mul = tf.multiply(self.background_data_placeholder_, self.background_volume_placeholder_)\n background_add = tf.add(background_mul, sliced_foreground)\n background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)\n\n # Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.\n spectrogram = contrib_audio.audio_spectrogram(background_clamp,\n window_size=self.model_settings['window_size_samples'],\n stride=self.model_settings['window_stride_samples'],\n magnitude_squared=True)\n self.mfcc_ = contrib_audio.mfcc(spectrogram, wav_decoder.sample_rate,\n dct_coefficient_count=self.model_settings['dct_coefficient_count'])\n num_spectrogram_bins = spectrogram.shape[-1].value\n lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, self.model_settings['dct_coefficient_count']\n # linear_to_mel_weight_matrix is just filter-bank\n linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(\n num_mel_bins, num_spectrogram_bins, self.model_settings['sample_rate'], lower_edge_hertz,\n upper_edge_hertz)\n mel_spectrograms = tf.tensordot(\n spectrogram, linear_to_mel_weight_matrix, 1)\n mel_spectrograms.set_shape(spectrogram.shape[:-1].concatenate(\n linear_to_mel_weight_matrix.shape[-1:]))\n self.mel_ = mel_spectrograms\n self.log_mel_ = tf.log(mel_spectrograms + 1e-6)", "def prepare_processing_graph(self, model_settings):\n desired_samples = model_settings['desired_samples']\n self.wav_filename_placeholder_ = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(self.wav_filename_placeholder_)\n wav_decoder = contrib_audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n # Allow the audio sample's volume to be adjusted.\n self.foreground_volume_placeholder_ = tf.placeholder(tf.float32, [])\n scaled_foreground = tf.multiply(wav_decoder.audio,\n self.foreground_volume_placeholder_)\n # Shift the sample's start position, and pad any gaps with zeros.\n self.time_shift_padding_placeholder_ = tf.placeholder(tf.int32, [2, 2])\n self.time_shift_offset_placeholder_ = tf.placeholder(tf.int32, [2])\n padded_foreground = tf.pad(\n scaled_foreground,\n self.time_shift_padding_placeholder_,\n mode='CONSTANT')\n sliced_foreground = tf.slice(padded_foreground,\n self.time_shift_offset_placeholder_,\n [desired_samples, -1])\n # Mix in background noise.\n self.background_data_placeholder_ = tf.placeholder(tf.float32,\n [desired_samples, 1])\n self.background_volume_placeholder_ = tf.placeholder(tf.float32, [])\n background_mul = tf.multiply(self.background_data_placeholder_,\n self.background_volume_placeholder_)\n background_add = tf.add(background_mul, sliced_foreground)\n background_clamp = tf.clip_by_value(background_add, -1.0, 1.0)\n # Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.\n spectrogram = contrib_audio.audio_spectrogram(\n background_clamp,\n window_size=model_settings['window_size_samples'],\n stride=model_settings['window_stride_samples'],\n magnitude_squared=True)\n if model_settings['use_mfcc'] == True:\n self.mfcc_ = contrib_audio.mfcc(\n spectrogram,\n wav_decoder.sample_rate,\n dct_coefficient_count=model_settings['dct_coefficient_count'])\n else:\n linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(\n num_mel_bins=model_settings['dct_coefficient_count'], num_spectrogram_bins=spectrogram.shape[-1].value,\n sample_rate=model_settings['sample_rate'], upper_edge_hertz=7600.0, lower_edge_hertz=80.0)\n self.mfcc_ = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)\n self.mfcc_.set_shape(spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))", "def train(molecule: Sequence[system.Atom],\n spins: Tuple[int, int],\n batch_size: int,\n network_config: Optional[NetworkConfig] = None,\n pretrain_config: Optional[PretrainConfig] = None,\n optim_config: Optional[OptimConfig] = None,\n kfac_config: Optional[KfacConfig] = None,\n mcmc_config: Optional[MCMCConfig] = None,\n logging_config: Optional[LoggingConfig] = None,\n multi_gpu: bool = False,\n double_precision: bool = False,\n graph_path: Optional[str] = None):\n\n if not mcmc_config:\n mcmc_config = MCMCConfig()\n if not logging_config:\n logging_config = LoggingConfig()\n if not pretrain_config:\n pretrain_config = PretrainConfig()\n if not optim_config:\n optim_config = OptimConfig()\n if not kfac_config:\n kfac_config = KfacConfig()\n if not network_config:\n network_config = NetworkConfig()\n\n nelectrons = sum(spins)\n precision = tf.float64 if double_precision else tf.float32\n\n if multi_gpu:\n strategy = tf.distribute.MirroredStrategy()\n else:\n # Get the default (single-device) strategy.\n strategy = tf.distribute.get_strategy()\n if multi_gpu:\n batch_size = batch_size // strategy.num_replicas_in_sync\n logging.info('Setting per-GPU batch size to %s.', batch_size)\n logging_config.replicas = strategy.num_replicas_in_sync\n logging.info('Running on %s replicas.', strategy.num_replicas_in_sync)\n\n # Create a re-entrant variable scope for network.\n with tf.variable_scope('model') as model:\n pass\n\n with strategy.scope():\n with tf.variable_scope(model, auxiliary_name_scope=False) as model1:\n with tf.name_scope(model1.original_name_scope):\n fermi_net = networks.FermiNet(\n atoms=molecule,\n nelectrons=spins,\n slater_dets=network_config.determinants,\n hidden_units=network_config.hidden_units,\n after_det=network_config.after_det,\n architecture=network_config.architecture,\n r12_ee_features=network_config.r12_ee_features,\n r12_en_features=network_config.r12_en_features,\n pos_ee_features=network_config.pos_ee_features,\n build_backflow=network_config.build_backflow,\n use_backflow=network_config.backflow,\n jastrow_en=network_config.jastrow_en,\n jastrow_ee=network_config.jastrow_ee,\n jastrow_een=network_config.jastrow_een,\n logdet=True,\n envelope=network_config.use_envelope,\n residual=network_config.residual,\n pretrain_iterations=pretrain_config.iterations)\n\n scf_approx = scf.Scf(\n molecule,\n nelectrons=spins,\n restricted=False,\n basis=pretrain_config.basis)\n if pretrain_config.iterations > 0:\n scf_approx.run()\n\n hamiltonian_ops = hamiltonian.operators(molecule, nelectrons)\n if mcmc_config.init_means:\n if len(mcmc_config.init_means) != 3 * nelectrons:\n raise RuntimeError('Initial electron positions of incorrect shape. '\n '({} not {})'.format(\n len(mcmc_config.init_means), 3 * nelectrons))\n init_means = [float(x) for x in mcmc_config.init_means]\n else:\n init_means = assign_electrons(molecule, spins)\n\n # Build the MCMC state inside the same variable scope as the network.\n with tf.variable_scope(model, auxiliary_name_scope=False) as model1:\n with tf.name_scope(model1.original_name_scope):\n data_gen = mcmc.MCMC(\n fermi_net,\n batch_size,\n init_mu=init_means,\n init_sigma=mcmc_config.init_width,\n move_sigma=mcmc_config.move_width,\n dtype=precision)\n with tf.variable_scope('HF_data_gen'):\n hf_data_gen = mcmc.MCMC(\n scf_approx.tf_eval_slog_hartree_product,\n batch_size,\n init_mu=init_means,\n init_sigma=mcmc_config.init_width,\n move_sigma=mcmc_config.move_width,\n dtype=precision)\n\n with tf.name_scope('learning_rate_schedule'):\n global_step = tf.train.get_or_create_global_step()\n lr = optim_config.learning_rate * tf.pow(\n (1.0 / (1.0 + (tf.cast(global_step, tf.float32) /\n optim_config.learning_rate_delay))),\n optim_config.learning_rate_decay)\n\n if optim_config.learning_rate < 1.e-10:\n logging.warning('Learning rate less than 10^-10. Not using an optimiser.')\n optim_fn = lambda _: None\n update_cached_data = None\n elif optim_config.use_kfac:\n cached_data = tf.get_variable(\n 'MCMC_cache',\n initializer=tf.zeros(shape=data_gen.walkers.shape, dtype=precision),\n use_resource=True,\n trainable=False,\n dtype=precision,\n )\n if kfac_config.adapt_damping:\n update_cached_data = tf.assign(cached_data, data_gen.walkers)\n else:\n update_cached_data = None\n optim_fn = lambda layer_collection: mean_corrected_kfac_opt.MeanCorrectedKfacOpt( # pylint: disable=g-long-lambda\n invert_every=kfac_config.invert_every,\n cov_update_every=kfac_config.cov_update_every,\n learning_rate=lr,\n norm_constraint=kfac_config.norm_constraint,\n damping=kfac_config.damping,\n cov_ema_decay=kfac_config.cov_ema_decay,\n momentum=kfac_config.momentum,\n momentum_type=kfac_config.momentum_type,\n loss_fn=lambda x: tf.nn.l2_loss(fermi_net(x)[0]),\n train_batch=data_gen.walkers,\n prev_train_batch=cached_data,\n layer_collection=layer_collection,\n batch_size=batch_size,\n adapt_damping=kfac_config.adapt_damping,\n is_chief=True,\n damping_adaptation_decay=kfac_config.damping_adaptation_decay,\n damping_adaptation_interval=kfac_config.damping_adaptation_interval,\n min_damping=kfac_config.min_damping,\n use_passed_loss=False,\n estimation_mode='exact',\n )\n else:\n adam = tf.train.AdamOptimizer(lr)\n optim_fn = lambda _: adam\n update_cached_data = None\n\n qmc_net = qmc.QMC(\n hamiltonian_ops,\n fermi_net,\n data_gen,\n hf_data_gen,\n clip_el=optim_config.clip_el,\n check_loss=optim_config.check_loss,\n )\n\n qmc_net.train(\n optim_fn,\n optim_config.iterations,\n logging_config,\n using_kfac=optim_config.use_kfac,\n strategy=strategy,\n scf_approx=scf_approx,\n global_step=global_step,\n determinism_mode=optim_config.deterministic,\n cached_data_op=update_cached_data,\n write_graph=os.path.abspath(graph_path) if graph_path else None,\n burn_in=mcmc_config.burn_in,\n mcmc_steps=mcmc_config.steps,\n )", "def to_var(x, requires_grad=False):\n if isinstance(x, list):\n for k in x:\n if torch.cuda.is_available():\n k = k.cuda()\n k = torch.tensor(k, requires_grad=requires_grad)\n else:\n if torch.cuda.is_available():\n x = x.cuda()\n x = torch.tensor(x, requires_grad=requires_grad)\n\n return x", "def inference(image, keep_prob):\r\n '''\r\n print(\"setting up vgg initialized conv layers ...\")\r\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\r\n\r\n mean = model_data['normalization'][0][0][0]\r\n mean_pixel = np.mean(mean, axis=(0, 1))\r\n\r\n weights = np.squeeze(model_data['layers'])\r\n print(\"weights.shape\",weights.shape)\r\n\r\n processed_image = utils.process_image(image, mean_pixel)'''\r\n\r\n with tf.variable_scope(\"inference\"):\r\n pooling_net,conv_final_layer = inference_op(image)\r\n #conv_final_layer = image_net[\"conv5_3\"]\r\n\r\n pool5 = utils.max_pool_2x2(conv_final_layer)\r\n\r\n W6 = utils.weight_variable([7, 7, 512, 4096], name=\"W6\")\r\n b6 = utils.bias_variable([4096], name=\"b6\")\r\n conv6 = utils.conv2d_basic(pool5, W6, b6)\r\n relu6 = tf.nn.relu(conv6, name=\"relu6\")\r\n if FLAGS.debug:\r\n utils.add_activation_summary(relu6)\r\n relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)\r\n\r\n W7 = utils.weight_variable([1, 1, 4096, 4096], name=\"W7\")\r\n b7 = utils.bias_variable([4096], name=\"b7\")\r\n conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)\r\n relu7 = tf.nn.relu(conv7, name=\"relu7\")\r\n if FLAGS.debug:\r\n utils.add_activation_summary(relu7)\r\n relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)\r\n\r\n W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name=\"W8\")\r\n b8 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b8\")\r\n conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)\r\n # annotation_pred1 = tf.argmax(conv8, dimension=3, name=\"prediction1\")\r\n\r\n # now to upscale to actual image size\r\n deconv_shape1 = pooling_net[\"pool4\"].get_shape()\r\n W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name=\"W_t1\")\r\n b_t1 = utils.bias_variable([deconv_shape1[3].value], name=\"b_t1\")\r\n # 对第8层的结果进行反卷积(上采样),通道数也由NUM_OF_CLASSESS变为第4层的通道数\r\n conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(pooling_net[\"pool4\"]))\r\n fuse_1 = tf.add(conv_t1, pooling_net[\"pool4\"], name=\"fuse_1\")\r\n\r\n deconv_shape2 = pooling_net[\"pool3\"].get_shape()\r\n W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name=\"W_t2\")\r\n b_t2 = utils.bias_variable([deconv_shape2[3].value], name=\"b_t2\")\r\n conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(pooling_net[\"pool3\"]))\r\n fuse_2 = tf.add(conv_t2, pooling_net[\"pool3\"], name=\"fuse_2\")\r\n\r\n shape = tf.shape(image)\r\n deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])\r\n W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name=\"W_t3\")\r\n b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b_t3\")\r\n conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)\r\n\r\n annotation_pred = tf.argmax(conv_t3, dimension=3, name=\"prediction\")\r\n print(\"annotation_pred.shape\",annotation_pred.shape)\r\n print(\"conv_t3\",conv_t3)\r\n print(\"tf.expand_dims(annotation_pred, dim=3)\",tf.expand_dims(annotation_pred, dim=3))\r\n return tf.expand_dims(annotation_pred, dim=3), conv_t3", "def to_device(device, x):\n if device is None:\n return x\n elif device < 0:\n return cuda.to_cpu(x)\n else:\n return cuda.to_gpu(x, device)", "def __init__(self, cfg, task_queue, result_queue, gpu_id=None):\n super().__init__()\n self.cfg = cfg\n self.task_queue = task_queue\n self.result_queue = result_queue\n self.gpu_id = gpu_id\n\n self.device = (\n torch.device(\"cuda:{}\".format(self.gpu_id))\n if self.cfg.NUM_GPUS\n else \"cpu\"\n )", "def local_gpu_specifyShape_0(node):\r\n if isinstance(node.op, tensor.SpecifyShape):\r\n input = node.inputs[0]\r\n if input.owner and isinstance(input.owner.op, HostFromGpu):\r\n return [host_from_gpu(tensor.specify_shape(gpu_from_host(input),\r\n *node.inputs[1:]))]\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op,\r\n tensor.SpecifyShape):\r\n specifyshape_node = host_input.owner\r\n return [tensor.specify_shape(\r\n gpu_from_host(specifyshape_node.inputs[0]),\r\n *specifyshape_node.inputs[1:])]\r\n return False", "def build_tf_graph(self):\n raise NotImplementedError", "def convert_feed(g, op, block):\n\n if block is not None:\n ipt_name = op.output(\"Out\")[0]\n ipt_shape = block.var(ipt_name).shape\n ipt_dtype = block.var(ipt_name).dtype\n ipt_dtype = str(ipt_dtype).strip().split(\".\")[1]\n else:\n ipt_shape = op.shape\n ipt_dtype = str(op.dtype).strip().split(\".\")[1]\n ipt_name = op.name\n if g.shape_dict is not None:\n ipt_shape = g.shape_dict[ipt_name]\n\n if isinstance(ipt_shape, tuple):\n ipt_shape = list(ipt_shape)\n for i, s in enumerate(ipt_shape):\n if s < 0:\n ipt_shape[i] = _ty.Any()\n out = new_var(ipt_name, shape=ipt_shape, dtype=ipt_dtype)\n g.add_node(ipt_name, out)", "def local_gpu_dot22scalar(node):\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if (host_input.owner and\r\n isinstance(host_input.owner.op,\r\n tensor.blas.Dot22Scalar)):\r\n x, y, scalar = host_input.owner.inputs\r\n return [gpu_dot22scalar(gpu_from_host(x), gpu_from_host(y),\r\n tensor.blas._as_scalar(scalar))]\r\n if isinstance(node.op, tensor.blas.Dot22Scalar):\r\n if any([i.owner and isinstance(i.owner.op, HostFromGpu)\r\n for i in node.inputs]):\r\n x, y, scalar = node.inputs\r\n return [host_from_gpu(\r\n gpu_dot22scalar(gpu_from_host(x),\r\n gpu_from_host(y),\r\n tensor.blas._as_scalar(scalar)))]\r\n return False", "def make_gpu_optimizer(op, to_gpu):\r\n @theano.gof.local_optimizer([op, cuda.gpu_from_host])\r\n def local_to_gpu(node):\r\n \"\"\"\r\n op(host_from_gpu()) -> host_from_gpu(op)\r\n gpu_from_host(op) -> op(gpu_from_host)\r\n \"\"\"\r\n if isinstance(node.op, op):\r\n #op(host_from_gpu()) -> host_from_gpu(op)\r\n #If any of the input that go on the GPU are on the GPU,\r\n #move the op to the gpu.\r\n if any(node.inputs[idx].owner and\r\n isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu)\r\n for idx in to_gpu):\r\n new_inp = list(node.inputs)\r\n for idx in to_gpu:\r\n new_inp[idx] = cuda.gpu_from_host(new_inp[idx])\r\n return [cuda.host_from_gpu(op()(*new_inp))]\r\n if node.op == cuda.gpu_from_host:\r\n #gpu_from_host(op) -> op(gpu_from_host)\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op,\r\n op):\r\n op_node = host_input.owner\r\n new_inp = list(op_node.inputs)\r\n for idx in to_gpu:\r\n new_inp[idx] = cuda.gpu_from_host(new_inp[idx])\r\n return [op()(*new_inp)]\r\n return False\r\n local_to_gpu.__name__ = \"local_to_gpu_\" + op.__name__\r\n cuda.opt.register_opt()(local_to_gpu)", "def cpu_expr_to_gpu(expr, unsafe=False):\n expr_ = T.cast(expr, 'float32')\n expr_ = theano.Out(theano.sandbox.cuda.basic_ops.gpu_from_host(expr_),\n borrow=unsafe)\n\n expr_.name = expr.name\n return expr_", "def handle_gpu_compatibility():\n try:\n gpus = tf.config.experimental.list_physical_devices(\"GPU\")\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n except Exception as e:\n print(e)", "def apply_fixpatches():\n\n # Fixes from 29c4b893f00aad811a0366622bd93d82ed46d665\n # https://github.com/tensorflow/tensorflow/commit/29c4b893f00aad811a0366622bd93d82ed46d665\n\n def sync_to_numpy_or_python_type(tensors):\n \"\"\"\n Syncs and converts a structure of `Tensor`s to `NumPy` arrays or\n Python scalar types. For each tensor, it calls `tensor.numpy()`.\n If the result is a scalar value, it converts it to a Python type,\n such as a float or int, by calling `result.item()`.\n Numpy scalars are converted, as Python types are often more\n convenient to deal with. This is especially useful for bfloat16\n Numpy scalars, which don't support as many operations as other\n Numpy values. Async strategies (such as `TPUStrategy` and\n `ParameterServerStrategy`) are forced to sync during this process.\n\n :param tensors: A structure of tensors.\n :return: `tensors`, but scalar tensors are converted to Python\n types and non-scalar tensors are converted to Numpy arrays.\n \"\"\"\n if isinstance(tensors, coordinator_lib.RemoteValue):\n return tensors.fetch()\n\n def _to_single_numpy_or_python_type(t):\n if isinstance(t, ops.Tensor):\n x = t.numpy()\n return x.item() if np.ndim(x) == 0 else x\n return t # Don't turn ragged or sparse tensors to NumPy.\n\n return nest.map_structure(_to_single_numpy_or_python_type, tensors)\n\n def _process_logs(self, logs):\n \"\"\"Turns tensors into numpy arrays or Python scalars if necessary.\"\"\"\n if logs is None:\n return {}\n return tf_utils.sync_to_numpy_or_python_type(logs)\n\n def _call_batch_hook_helper(self, hook_name, batch, logs):\n \"\"\"Helper function for `on_*_batch_*` methods.\"\"\"\n if self._check_timing:\n start_time = time.time()\n\n logs = self._process_logs(logs)\n for callback in self.callbacks:\n hook = getattr(callback, hook_name)\n hook(batch, logs)\n\n if self._check_timing:\n if hook_name not in self._hook_times:\n self._hook_times[hook_name] = []\n self._hook_times[hook_name].append(time.time() - start_time)\n\n def on_epoch_end(self: typing.Any, epoch: int, logs: dict = None) -> None:\n \"\"\"\n Calls the `on_epoch_end` methods of its callbacks.\n This function should only be called during TRAIN mode.\n\n :param epoch: Integer, index of epoch.\n :param logs: Dict, metric results for this training epoch, and for\n the validation epoch if validation is performed. Validation\n result keys are prefixed with `val_`.\n \"\"\"\n logs = self._process_logs(logs)\n for callback in self.callbacks:\n callback.on_epoch_end(epoch, logs)\n\n tf_utils.sync_to_numpy_or_python_type = sync_to_numpy_or_python_type\n callback_list_class = keras.callbacks.CallbackList\n callback_list_class._process_logs = _process_logs\n callback_list_class._call_batch_hook_helper = _call_batch_hook_helper\n callback_list_class.on_epoch_end = on_epoch_end", "def train_one_epoch(self):\n print('Training......')\n\n # set mode train\n self.network.train()\n\n # prepare data\n train_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n RandomHorizontalFlip(),\n ToTensor()\n ])\n\n\n\n dataset = Cityscapes(params.dataset_root, mode='train', transforms = transform)\n\n train_loader = DataLoader(dataset,\n batch_size=params.train_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n \n train_size = 1896\n if train_size % self.params.train_batch != 0:\n total_batch = train_size // self.params.train_batch + 1\n else:\n total_batch = train_size // self.params.train_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # train through dataset\n for batch_idx, batch in enumerate(train_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n\n\n loss = self.loss_fn(out, label_cuda)\n \n #display_image(out, label_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one + TN\n IOU = IOU+ FN \n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n\n # optimize\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n # accumulate\n train_loss += loss.item()\n\n # record first loss\n if self.train_loss == []:\n self.train_loss.append(train_loss)\n self.summary_writer.add_scalar('loss/train_loss', train_loss, 0)\n \n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(accuracy_new/total_batch)\n \n self.pb.close()\n train_loss /= total_batch\n self.train_loss.append(train_loss)\n\n # add to summary\n self.summary_writer.add_scalar('loss/train_loss', train_loss, self.epoch)", "def inference(images):\n # We instantiate all variables using tf.get_variable() instead of\n # tf.Variable() in order to share variables across multiple GPU training runs.\n # If we only ran this model on a single GPU, we could simplify this function\n # by replacing all instances of tf.get_variable() with tf.Variable().\n #\n # conv1\n with tf.variable_scope('conv1') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 3, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv1)\n\n # pool1\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\n padding='SAME', name='pool1')\n # norm1\n norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm1')\n\n # conv2\n with tf.variable_scope('conv2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 64, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv2)\n\n # norm2\n norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm2')\n # pool2\n pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n\n # local3\n with tf.variable_scope('local3') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])\n dim = reshape.get_shape()[1].value\n weights = _variable_with_weight_decay('weights', shape=[dim, 384],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))\n local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n _activation_summary(local3)\n\n # local4\n with tf.variable_scope('local4') as scope:\n weights = _variable_with_weight_decay('weights', shape=[384, 192],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))\n local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)\n _activation_summary(local4)\n\n # linear layer(WX + b),\n # We don't apply softmax here because\n # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits\n # and performs the softmax internally for efficiency.\n with tf.variable_scope('softmax_linear') as scope:\n weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],\n stddev=1/192.0, wd=0.0)\n biases = _variable_on_cpu('biases', [NUM_CLASSES],\n tf.constant_initializer(0.0))\n softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)\n _activation_summary(softmax_linear)\n\n return softmax_linear", "def with_cpu(ops, model):\n ...", "def forward(self, input, frame_index):\n losses = []\n offsets= []\n filters = []\n occlusions = []\n\n device = torch.cuda.current_device()\n # print(device)\n # s1 = torch.cuda.Stream(device=device, priority=5)\n # s2 = torch.cuda.Stream(device=device, priority=10) #PWC-Net is slow, need to have higher priority\n s1 = torch.cuda.current_stream()\n s2 = torch.cuda.current_stream()\n\n '''\n STEP 1: sequeeze the input \n '''\n if self.training == True:\n if self.temporal== False:\n assert input.size(0) == 3\n input_0,input_1,input_2 = torch.squeeze(input,dim=0) # input_2 middle\n input_3,input_4,input_5,input_6 = [],[],[],[]\n else:\n assert input.size(0) == 7\n input_0,input_1,input_2, input_3, input_4, input_5,input_6 = \\\n torch.squeeze(input,dim=0)\n else:\n if self.temporal == False:\n assert input.size(0) ==2\n input_0,input_2 = torch.squeeze(input,dim=0)\n input_1, input_3,input_4,input_5,input_6 = [],[],[],[],[]\n else:\n assert input.size(0) == 4\n input0,input_2,input_4,input_6 = torch.sequeeze(input,dim= 0)\n input_1,input_3,input_5,input7 = [],[],[],[]\n\n\n '''\n STEP 2: initialize the auxiliary input either from temporal or scale predecessor\n '''\n pre_scale_offset, pre_scale_filter, pre_scale_occlusion = None, None, None\n if self.temporal:\n pre_scale_offset_c, pre_scale_filter_c, pre_scale_occlusion_c = None, None, None\n pre_scale_offset_n, pre_scale_filter_n, pre_scale_occlusion_n = None, None, None\n\n '''\n STEP 3: iteratively execuate the Multiscale Network \n '''\n # from the coarser scale to the most\n for i in range(self.scale_num):\n\n '''\n STEP 3.1: prepare current scale inputs\n '''\n #prepare the input data of current scale\n cur_input_0 = F.avg_pool2d(input_0,pow(self.scale_ratio,self.scale_num - i - 1))\n if self.training == True:\n cur_input_1 = F.avg_pool2d(input_1,pow(self.scale_ratio,self.scale_num - i - 1))\n cur_input_2 = F.avg_pool2d(input_2,pow(self.scale_ratio,self.scale_num - i - 1))\n if self.temporal == True:\n # frame 3 is the central frame to be interpolated.\n if self.training == True:\n cur_input_3 = F.avg_pool2d(input_3, pow(self.scale_ratio,self.scale_num - i - 1))\n cur_input_4 = F.avg_pool2d(input_4, pow(self.scale_ratio,self.scale_num - i - 1))\n if self.training== True:\n cur_input_5 = F.avg_pool2d(input_5, pow(self.scale_ratio,self.scale_num - i - 1))\n cur_input_6 = F.avg_pool2d(input_6, pow(self.scale_ratio,self.scale_num - i - 1))\n\n '''\n STEP 3.2: concatenating the inputs.\n '''\n if i == 0:\n cur_offset_input = torch.cat((cur_input_0, cur_input_2), dim=1)\n cur_filter_input = cur_offset_input # torch.cat((cur_input_0, cur_input_2), dim=1)\n # cur_occlusion_input = cur_offset_input # torch.cat((cur_input_0, cur_input_2), dim=1)\n\n if self.temporal==True:\n # the central part\n cur_offset_input_c = torch.cat((cur_input_2,cur_input_4),dim = 1)\n cur_filter_input_c = cur_offset_input_c #torch.cat((cur_input_2,cur_input_4),dim =1)\n # cur_occlusion_input_c = cur_offset_input_c #torch.cat((cur_input_2,cur_input_4),dim =1)\n # the next part\n cur_offset_input_n = torch.cat((cur_input_4,cur_input_6),dim = 1)\n cur_filter_input_n = cur_offset_input_n# torch.cat((cur_input_4,cur_input_6),dim = 1)\n # cur_occlusion_input_n = cur_offset_input_n #torch.cat((cur_input_4,cur_input_6),dim = 1)\n # # to compose a enlarged batch with the three parts\n # cur_offset = torch.cat((cur_offset, cur_offset_c, cur_offset_n), dim=0)\n # cur_filter = torch.cat((cur_filter, cur_filter_c,cur_filter_n), dim=0)\n # cur_occlusion = torch.cat((cur_occlusion,cur_occlusion_c, cur_occlusion_n), dim=0)\n else:\n cur_offset_input = torch.cat((cur_input_0,cur_input_2,pre_scale_offset),dim=1)\n cur_filter_input = torch.cat((cur_input_0,cur_input_2,pre_scale_filter),dim =1)\n # cur_occlusion_input = torch.cat((cur_input_0,cur_input_2,pre_scale_occlusion),dim=1)\n\n if self.temporal ==True:\n cur_offset_input_c = torch.cat((cur_input_2, cur_input_4,pre_scale_offset_c),dim=1)\n cur_filter_input_c = torch.cat((cur_input_2,cur_input_4, pre_scale_filter_c),dim =1 )\n # cur_occlusion_input_c = torch.cat((cur_input_2,cur_input_4,pre_scale_occlusion_c),dim = 1)\n\n cur_offset_input_n = torch.cat((cur_input_4,cur_input_6,pre_scale_offset_n),dim=1)\n cur_filter_input_n = torch.cat((cur_input_4,cur_input_6,pre_scale_filter_n),dim=1)\n # cur_occlusion_input_n = torch.cat((cur_input_4,cur_input_6,pre_scale_occlusion_n),dim=1)\n\n # # to compose a enlarged batch with the three parts\n # cur_offset = torch.cat((cur_offset, cur_offset_c, cur_offset_n), dim=0)\n # cur_filter = torch.cat((cur_filter, cur_filter_c,cur_filter_n), dim=0)\n # cur_occlusion = torch.cat((cur_occlusion,cur_occlusion_c, cur_occlusion_n), dim=0)\n\n '''\n STEP 3.3: perform the estimation by the Three subpath Network \n '''\n if i ==0 :\n\n time_offsets = [ kk * self.timestep for kk in range(1, 1+self.numFrames,1)]\n\n if len(time_offsets) == 1:\n frame_index = [0]\n\n # always set depthNet to evaluation mode without optimizing its parameters.\n # self.depthNet = self.depthNet.eval()\n\n with torch.cuda.stream(s1):\n temp = self.depthNet(torch.cat((cur_filter_input[:, :3, ...],\n cur_filter_input[:, 3:, ...]),dim=0))\n log_depth = [temp[:cur_filter_input.size(0)], temp[cur_filter_input.size(0):]]\n\n # print(\"depth estimation time\")\n # print(time.time() - lasttime)\n # lasttime = time.time()\n\n # log_depth = [self.depthNet(cur_filter_input[:, :3, ...]),\n # self.depthNet(cur_filter_input[:, 3:, ...])]\n # combine the depth with context to\n cur_ctx_output = [\n torch.cat((self.ctxNet(cur_filter_input[:, :3, ...]),\n log_depth[0].detach()), dim=1),\n torch.cat((self.ctxNet(cur_filter_input[:, 3:, ...]),\n log_depth[1].detach()), dim=1)\n ]\n # print(\"context extraction time\")\n # print(time.time() - lasttime)\n # lasttime = time.time()\n temp = self.forward_singlePath(self.initScaleNets_filter, cur_filter_input, 'filter')\n cur_filter_output = [self.forward_singlePath(self.initScaleNets_filter1, temp, name=None),\n self.forward_singlePath(self.initScaleNets_filter2, temp, name=None)]\n\n # print(\"filter estimation time\")\n # print(time.time() - lasttime)\n # lasttime = time.time()\n # temp = self.forward_singlePath(self.initScaleNets_occlusion,cur_occlusion_input,'occlusion')\n # cur_occlusion_output = [self.forward_singlePath(self.initScaleNets_occlusion1,temp,name=None),\n # self.forward_singlePath(self.initScaleNets_occlusion2,temp,name=None)]\n\n depth_inv = [1e-6 + 1 / torch.exp(d) for d in log_depth]\n\n with torch.cuda.stream(s2):\n # use the occlusion as the depthmap outpu\n for _ in range(1):\n cur_offset_outputs = [\n self.forward_flownets(self.flownets, cur_offset_input, time_offsets=time_offsets, # F_0_t\n flowmethod=self.flowmethod),\n self.forward_flownets(self.flownets, torch.cat((cur_offset_input[:, 3:, ...], # F_1_t\n cur_offset_input[:, 0:3, ...]), dim=1),\n time_offsets=time_offsets[::-1],\n flowmethod=self.flowmethod)\n ]\n\n torch.cuda.synchronize() #synchronize s1 and s2\n\n for _ in range(1):\n cur_offset_outputs = [\n self.FlowProject(cur_offset_outputs[0],depth_inv[0],\n self.FlowProjection_threshhold,\n refinputs=[cur_offset_input[:,0:3,...],cur_offset_input[:,3:,...]] ),\n self.FlowProject(cur_offset_outputs[1],depth_inv[1],\n self.FlowProjection_threshhold,refinputs=[ cur_offset_input[:,3:,...], cur_offset_input[:,0:3,...]])\n ]\n\n # print(\"flow estimation time\")\n # print(time.time() - lasttime)\n\n # lasttime = time.time()\n depth_inv_maxreg = [d / torch.max(d) for d in depth_inv]\n cur_occlusion_output = [\n depth_inv_maxreg[0],depth_inv_maxreg[1]\n # Variable(torch.cuda.FloatTensor().resize_(cur_filter_input.size(0), 1, cur_filter_input.size(2),\n # cur_filter_input.size(3)).zero_()),\n # Variable(torch.cuda.FloatTensor().resize_(cur_filter_input.size(0), 1, cur_filter_input.size(2),\n # cur_filter_input.size(3)).zero_()),\n # 0.5 * Variable(torch.ones(cur_filter_input.size(0),1,cur_filter_input.size(2),cur_filter_input.size(3)).type(cur_filter_input.data.type())),\n # 0.5 * Variable(torch.ones(cur_filter_input.size(0),1,cur_filter_input.size(2),cur_filter_input.size(3)).type(cur_filter_input.data.type())),\n ]\n\n\n if self.temporal:\n cur_offset_output_c = self.forward_singlePath(self.initScaleNets_offset,cur_offset_input_c)\n cur_offset_output_n = self.forward_singlePath(self.initScaleNets_offset,cur_offset_input_n)\n\n cur_filter_output_c = self.forward_singlePath(self.initScaleNets_filter, cur_filter_input_c)\n cur_filter_output_n = self.forward_singlePath(self.initScaleNets_filter,cur_filter_input_n)\n\n cur_occlusion_output_c = self.forward_singlePath(self.initScaleNets_occlusion,cur_occlusion_input_c)\n cur_occlusion_output_n = self.forward_singlePath(self.initScaleNets_occlusion,cur_occlusion_input_n)\n else:\n cur_offset_output = self.forward_singlePath(self.iterScaleNets_offset, cur_offset_input)\n cur_filter_output = self.forward_singlePath(self.iterScaleNets_filter,cur_filter_input)\n cur_occlusion_output = self.forward_singlePath(self.iterScaleNets_occlusion,cur_occlusion_input)\n if self.temporal:\n cur_offset_output_c = self.forward_singlePath(self.iterScaleNets_offset,cur_offset_input_c)\n cur_offset_output_n = self.forward_singlePath(self.iterScaleNets_offset,cur_offset_input_n)\n\n cur_filter_output_c = self.forward_singlePath(self.iterScaleNets_filter,cur_filter_input_c)\n cur_filter_output_n = self.forward_singlePath(self.iterScaleNets_filter,cur_filter_input_n)\n\n # cur_occlusion_output_c = self.forward_singlePath(self.iterScaleNets_occlusion,cur_occlusion_input_c)\n # cur_occlusion_output_n = self.forward_singlePath(self.iterScaleNets_occlusion,cur_occlusion_input_n)\n\n '''\n STEP 3.4: perform the frame interpolation process \n '''\n\n\n\n timeoffset = time_offsets[frame_index[0]]\n temp_0 = cur_offset_outputs[0][frame_index[0]]\n temp_1 = cur_offset_outputs[1][frame_index[0]]\n cur_offset_output = [temp_0, temp_1]\n ctx0, ctx2 = self.FilterInterpolate_ctx(cur_ctx_output[0],cur_ctx_output[1],cur_offset_output,cur_filter_output, timeoffset)\n\n cur_output, ref0, ref2 = self.FilterInterpolate(cur_input_0, cur_input_2, cur_offset_output,\n cur_filter_output, self.filter_size ** 2,\n timeoffset)\n\n cur_occlusion_output = self.Interpolate_ch(cur_occlusion_output[0], cur_occlusion_output[1],\n cur_offset_output, 1)\n\n rectify_input = torch.cat((cur_output, ref0, ref2,\n cur_offset_output[0], cur_offset_output[1],\n cur_filter_output[0], cur_filter_output[1],\n ctx0, ctx2\n ), dim=1)\n\n cur_output_rectified = self.rectifyNet(rectify_input) + cur_output\n\n\n if self.temporal ==True:\n cur_output_c = self.Interpolate(cur_input_2,cur_input_4,cur_offset_output_c,cur_filter_output_c,cur_occlusion_output_c)\n cur_output_n = self.Interpolate(cur_input_4,cur_input_6,cur_offset_output_n,cur_filter_output_n,cur_occlusion_output_n)\n\n temp, forward = torch.split(cur_offset_output, 2, dim=1)\n forward = -forward\n backward, temp = torch.split(cur_offset_output_n,2,dim=1)\n backward = -backward\n\n cur_offset_sym = torch.cat((forward,backward),dim = 1)\n cur_filter_sym = cur_filter_output\n cur_occlusion_sym = cur_occlusion_output\n cur_output_sym = self.Interpolate(cur_input_2,cur_input_4,cur_offset_sym, cur_filter_sym,cur_occlusion_sym)\n\n\n '''\n STEP 3.5: for training phase, we collect the variables to be penalized.\n '''\n if self.training == True:\n losses +=[cur_output - cur_input_1]\n losses += [cur_output_rectified - cur_input_1] \n offsets +=[cur_offset_output]\n filters += [cur_filter_output]\n occlusions += [cur_occlusion_output]\n if self.temporal == True:\n losses+= [cur_output_c - cur_input_3]\n losses+= [cur_output_n - cur_input_5]\n losses+= [cur_output_c - cur_output_sym]\n\n '''\n STEP 3.6: prepare inputs for the next finer scale\n '''\n if self.scale_num > 1:\n ## prepare for the next finer scale's requirements.\n pre_scale_offset = F.upsample(cur_offset_output * self.scale_ratio, scale_factor=self.scale_ratio,mode='bilinear')\n pre_scale_filter = F.upsample(cur_filter_output, scale_factor=self.scale_ratio,mode='bilinear')\n pre_scale_occlusion = F.upsample(cur_offset_output, scale_factor=self.scale_ratio,mode='bilinear')\n if self.temporal == True:\n pre_scale_offset_c = F.upsample(cur_offset_output_c * self.scale_ratio, scale_factor= self.scale_ratio,mode='bilinear')\n pre_scale_filter_c = F.upsample(cur_filter_output_c, scale_factor=self.scale_ratio,mode='bilinear')\n pre_scale_occlusion_c = F.upsample(cur_occlusion_output_c, scale_factor=self.scale_ratio,mode='bilinear')\n\n pre_scale_offset_n = F.upsample(cur_offset_output_n * self.scale_ratio, scale_factor= self.scale_ratio,mode='bilinear')\n pre_scale_filter_n = F.upsample(cur_filter_output_n, scale_factor=self.scale_ratio, mode='bilinear')\n pre_scale_occlusion_n = F.upsample(cur_occlusion_output_n, scale_factor=self.scale_ratio, mode='bilinear')\n\n '''\n STEP 4: return the results\n '''\n if self.training == True:\n\n return losses, offsets,filters,occlusions\n else:\n # if in test phase, we directly return the interpolated frame\n if self.temporal == False:\n cur_outputs = [cur_output,cur_output_rectified]\n return cur_outputs,cur_offset_output,cur_filter_output,cur_occlusion_output\n else:\n return cur_output_c, cur_output_sym" ]
[ "0.5934677", "0.5921635", "0.58703035", "0.581513", "0.5797795", "0.5776158", "0.57602173", "0.57533664", "0.5746811", "0.5742912", "0.5733459", "0.5706499", "0.56763935", "0.56657827", "0.5658226", "0.5653087", "0.5644797", "0.5627843", "0.55857766", "0.55729073", "0.5570853", "0.55708265", "0.55682164", "0.5534825", "0.5534825", "0.5529216", "0.5528229", "0.5528229", "0.5521497", "0.55093825", "0.5472109", "0.5455533", "0.5453974", "0.54511297", "0.5450182", "0.54393643", "0.5429202", "0.54212755", "0.5418492", "0.54087013", "0.5403549", "0.5403547", "0.5388667", "0.538732", "0.53806204", "0.5374346", "0.53703606", "0.53663766", "0.535343", "0.53473127", "0.53473127", "0.5345339", "0.5339063", "0.53385174", "0.53346825", "0.5330102", "0.53291774", "0.53269273", "0.5326595", "0.5317602", "0.5310502", "0.5300201", "0.5292074", "0.5288236", "0.5280429", "0.52774996", "0.52760303", "0.52640736", "0.52614063", "0.5244552", "0.5243022", "0.5241128", "0.5234227", "0.52297807", "0.52284", "0.52229744", "0.5216933", "0.5214031", "0.51991266", "0.51928544", "0.51920044", "0.5190452", "0.51881975", "0.5180145", "0.51763976", "0.5174229", "0.51724786", "0.51697356", "0.51695496", "0.51692057", "0.5165782", "0.5163752", "0.51577526", "0.5156216", "0.51544815", "0.51530933", "0.5150452", "0.5150352", "0.5147993", "0.51476955" ]
0.69795823
0
The init of this class converts all of the downloaded data into usable lists which can then be analysed or plotted through the use of other functions and modules
def __init__(self, stock, start_date, end_date): try: self.data = yahoo_finance.Share(stock).get_historical(start_date, end_date) self.close = [dic['Close'] for dic in self.data] self.open = [dic['Open'] for dic in self.data] self.date = [dic['Date'] for dic in self.data] except Exception, error_StockClass__init__: print 'error_StockClass__init__: ', error_StockClass__init__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n\n def process_ihm(x):\n return list(map(int, x.split(';')))\n\n def process_los(x):\n x = x.split(';')\n if x[0] == '':\n return ([], [])\n return (list(map(int, x[:len(x)//2])), list(map(float, x[len(x)//2:])))\n\n def process_ph(x):\n return list(map(int, x.split(';')))\n\n def process_decomp(x):\n x = x.split(';')\n if x[0] == '':\n return ([], [])\n return (list(map(int, x[:len(x)//2])), list(map(int, x[len(x)//2:])))\n\n self._data = [(fname, float(t), process_ihm(ihm), process_los(los),\n process_ph(pheno), process_decomp(decomp))\n for fname, t, ihm, los, pheno, decomp in self._data]", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, float(t), float(y)) for (x, t, y) in self._data]", "def __init__(self, datafiles, plotter):\n self.datafiles = datafiles\n self.datasets = dict()\n self.plotter = plotter", "def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, float(t), int(y)) for (x, t, y) in self._data]", "def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(mas[0], float(mas[1]), list(map(int, mas[2:]))) for mas in self._data]", "def load_data(self):", "def __init__(self, objs): # file=None\n self.artists = objs\n self.art_num = len(objs)\n self.fig = self.ax = None\n self.lines, self.points = [], []\n self.time_template = self.time_text = None", "def __init__(self):\n self.data_set = []\n self.finalized_data = LogData()", "def __init__(self, dataset_dir, listfile=None, period_length=48.0):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(\",\") for line in self._data]\n self._data = [(x, int(y)) for (x, y) in self._data]\n self._period_length = period_length", "def __init__(self, data):\n self.data = self.get_api_reference_html(data)\n self.parsed_data = []", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')", "def load_data(self) -> None:", "def __init__(self, dataset_dir, listfile=None, period_length=48.0):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, int(y)) for (x, y) in self._data]\n self._period_length = period_length", "def __init__(self, data):\n\t\tassert isinstance(data, str), \"Data location must be provided in type 'str'!\"\n\t\t\n\t\t# load the location provided\n\t\tdata = json.loads(open(data).read())\n\n\t\t# check for correct format\n\t\tassert isinstance(data, list), \"Data must be of type 'list'!\"\n\n\t\tfor element in data:\n\t\t\tassert isinstance(element, dict), \"Each element of data must be of type 'dict'!\"\n\n\t\tself.data = data", "def collect_datset(self):\n response = requests.get(self.url)\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # to remove labels from list\n dataset = np.matrix(data)\n return dataset", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n\n self.current_path = os.getcwd()\n self.data_path = self.current_path + \"/data\"\n\n self.original_files = {}\n self.imitation_files = {}\n self.original_test_files = {}\n self.imitation_test_files = {}\n\n self.training_set = None\n self.original_test_set = None\n self.imitation_test_set = None\n\n self.accuracy = 0.\n self.threshold = 0.\n\n self.get_files()", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def __init__(self):\n self._data=[]", "def __init__(self):\n\n data_extract=DataExtracter()\n self.data = tuple()", "def __init__(self, directory):\n self._path = os.path.join(\"../../datasets\", directory)\n self.airlines = pd.read_csv(os.path.join(self._path, 'airlines.csv'))\n self.airports = pd.read_csv(os.path.join(self._path, 'airports.csv'))\n self.planes = pd.read_csv(os.path.join(self._path, 'planes.csv'))\n self.countries = pd.read_csv(os.path.join(self._path, 'countries.csv'))\n self.routes = pd.read_csv(os.path.join(self._path, 'routes.csv'))\n self._CreateGraph()", "def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()", "def __init__(self):\n groups = [\n os.path.splitext(f)[0] for f in os.listdir(data_dir) if f.endswith(\".json\")\n ]\n\n self._data = {\n group: IndicatorGroup.parse_file(os.path.join(data_dir, f\"{group}.json\"))\n for group in groups\n }", "def __init__(self, dataList):\n xList = []\n yList = []\n\n for index in range(0, len(dataList)):\n xList.append(dataList[index][0])\n yList.append(dataList[index][1])\n self.xList = xList\n self.yList = yList\n self.dataList = dataList", "def load_data_list(self) -> List[dict]: # noqa: E501\n try:\n import lvis\n if getattr(lvis, '__version__', '0') >= '10.5.3':\n warnings.warn(\n 'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"', # noqa: E501\n UserWarning)\n from lvis import LVIS\n except ImportError:\n raise ImportError(\n 'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".' # noqa: E501\n )\n self.lvis = LVIS(self.ann_file)\n self.cat_ids = self.lvis.get_cat_ids()\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)\n\n img_ids = self.lvis.get_img_ids()\n data_list = []\n total_ann_ids = []\n for img_id in img_ids:\n raw_img_info = self.lvis.load_imgs([img_id])[0]\n raw_img_info['img_id'] = img_id\n # coco_url is used in LVISv1 instead of file_name\n # e.g. http://images.cocodataset.org/train2017/000000391895.jpg\n # train/val split in specified in url\n raw_img_info['file_name'] = raw_img_info['coco_url'].replace(\n 'http://images.cocodataset.org/', '')\n ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])\n raw_ann_info = self.lvis.load_anns(ann_ids)\n total_ann_ids.extend(ann_ids)\n parsed_data_info = self.parse_data_info({\n 'raw_ann_info':\n raw_ann_info,\n 'raw_img_info':\n raw_img_info\n })\n data_list.append(parsed_data_info)\n if self.ANN_ID_UNIQUE:\n assert len(set(total_ann_ids)) == len(\n total_ann_ids\n ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n del self.lvis\n\n return data_list", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def __init__(self, data):\n self.bees = [Bee(b) for b in data[\"bees\"]]\n self.flowers = [Flower(f) for f in data[\"flowers\"]]\n self.hives = [Hive(h) for h in data[\"hives\"]]", "def __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\t# Typically a list of data here\n\t\t# Typically a dict of header keys and values here", "def __init__(self):\n self.data = []", "def __init__(self):\n self.data = []", "def load_data_list(self) -> List[dict]: # noqa: E501\n try:\n import lvis\n if getattr(lvis, '__version__', '0') >= '10.5.3':\n warnings.warn(\n 'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"', # noqa: E501\n UserWarning)\n from lvis import LVIS\n except ImportError:\n raise ImportError(\n 'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".' # noqa: E501\n )\n with self.file_client.get_local_path(self.ann_file) as local_path:\n self.lvis = LVIS(local_path)\n self.cat_ids = self.lvis.get_cat_ids()\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)\n\n img_ids = self.lvis.get_img_ids()\n data_list = []\n total_ann_ids = []\n for img_id in img_ids:\n raw_img_info = self.lvis.load_imgs([img_id])[0]\n raw_img_info['img_id'] = img_id\n if raw_img_info['file_name'].startswith('COCO'):\n # Convert form the COCO 2014 file naming convention of\n # COCO_[train/val/test]2014_000000000000.jpg to the 2017\n # naming convention of 000000000000.jpg\n # (LVIS v1 will fix this naming issue)\n raw_img_info['file_name'] = raw_img_info['file_name'][-16:]\n ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])\n raw_ann_info = self.lvis.load_anns(ann_ids)\n total_ann_ids.extend(ann_ids)\n\n parsed_data_info = self.parse_data_info({\n 'raw_ann_info':\n raw_ann_info,\n 'raw_img_info':\n raw_img_info\n })\n data_list.append(parsed_data_info)\n if self.ANN_ID_UNIQUE:\n assert len(set(total_ann_ids)) == len(\n total_ann_ids\n ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n del self.lvis\n\n return data_list", "def load(self):\n\n super(DatasetLoader_XRite2016, self).sync()\n\n keys = (\n 'ColorChecker24 - After November 2014',\n 'ColorChecker24 - Before November 2014',\n 'ColorCheckerSG - After November 2014',\n 'ColorCheckerSG - Before November 2014',\n )\n filenames = (\n 'ColorChecker24_After_Nov2014.txt',\n 'ColorChecker24_Before_Nov2014.txt',\n 'ColorCheckerSG_After_Nov2014.txt',\n 'ColorCheckerSG_Before_Nov2014.txt',\n )\n\n # TODO: Implement support for \"CGATS\" file format in \"Colour\":\n # https://github.com/colour-science/colour/issues/354\n illuminant = (\n CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['ICC D50'])\n\n self._content = OrderedDict()\n for key, filename in zip(keys, filenames):\n directory = os.path.splitext(filename)[0]\n path = os.path.join(self.record.repository, 'dataset', directory,\n filename)\n\n with codecs.open(path, encoding='utf-8') as xrite_file:\n samples = []\n is_data = False\n lines = filter(\n None, (line.strip() for line in xrite_file.readlines()))\n for line in lines:\n if line == 'END_DATA':\n is_data = False\n\n if is_data:\n tokens = line.split()\n samples.append([\n tokens[0],\n [\n float(value.replace(',', '.'))\n for value in tokens[1:]\n ],\n ])\n\n if line == 'BEGIN_DATA':\n is_data = True\n\n i, j = (6, 4) if len(samples) == 24 else (14, 10)\n samples = np.array(samples)\n samples = np.transpose(samples.reshape([i, j, 2]), [1, 0, 2])\n keys, values = zip(*samples.reshape([-1, 2]))\n values = XYZ_to_xyY(Lab_to_XYZ(values, illuminant))\n self._content[key] = ColourChecker(key,\n OrderedDict(zip(keys, values)),\n illuminant)\n\n return self._content", "def _load(self):\n\t\tpool = []\n\t\tview = []\n\t\tlibrary = []\n\n\t\tif is_file(\"~/comiccrawler/pool.json\"):\n\t\t\tpool = json.loads(content_read(\"~/comiccrawler/pool.json\"))\n\n\t\tif is_file(\"~/comiccrawler/view.json\"):\n\t\t\tview = json.loads(content_read(\"~/comiccrawler/view.json\"))\n\n\t\tif is_file(\"~/comiccrawler/library.json\"):\n\t\t\tlibrary = json.loads(content_read(\"~/comiccrawler/library.json\"))\n\n\t\tfor m_data in pool:\n\t\t\t# reset state\n\t\t\tif m_data[\"state\"] in (\"DOWNLOADING\", \"ANALYZING\"):\n\t\t\t\tm_data[\"state\"] = \"ERROR\"\n\t\t\t# build episodes\n\t\t\tepisodes = []\n\t\t\tfor ep_data in m_data[\"episodes\"]:\n\t\t\t\tepisodes.append(Episode(**ep_data))\n\t\t\tm_data[\"episodes\"] = episodes\n\t\t\tmission = Mission(**m_data)\n\t\t\tself._add(mission)\n\n\t\tfor url in view:\n\t\t\tself.view[url] = self.pool[url]\n\n\t\tfor url in library:\n\t\t\tself.library[url] = self.pool[url]\n\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.view)\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.library)", "def __init__(self, data_config):\n self._brands = self._load_from_directory(data_config['targeted_brands_dir'])\n self._keywords = self._load_from_directory(data_config['keywords_dir'])\n self._fqdn_keywords = self._load_from_directory(data_config['fqdn_keywords_dir'])\n self._similarity_words = self._load_from_directory(data_config['similarity_words_dir'])\n self._tlds = self._load_from_directory(data_config['tld_dir'])", "def loadData(self,data_file):\n #Load the data from the json\n with open(data_file) as json_file: \n data = json.load(json_file)\n\n # Clear all instance variables\n self.dirs = []\n self.files = {}\n self.X = []\n self.Y = []\n self.output = {}\n\n # stored the data into the instance variables\n self.dirs = data['dirs'] #good\n self.files = data['files'] # good\n \n # self.output is a dict() with string:np.array\n output = data['output']\n for e in output:\n self.output[e] = np.array(output[e]) # -> fine\n #self.X is a list of np.arrays\n X = data['X']\n for x in X:\n self.X.append(np.array(x))# -> fine\n #self.Y is a list of np.arrays\n Y = data['Y']\n for y in Y:\n self.Y.append(list(y))# -> fine\n #Test prints, uncomment to test if data looks correct\n #print('self.dirs = ' + str(self.dirs))\n #print()\n #print('self.files = ' + str(self.files))\n #print()\n #print('self.output = ' + str(self.output))\n #print()\n #print('self.X = ' + str(self.X))\n #print()\n #print('self.Y = ' + str(self.Y))\n #print()\n print('Preprocessed data loaded from ' + str(data_file))\n print(data['comment'])\n return", "def load_data(self):\n logging.debug('Loading data from file ({})...'.format(self.file_name))\n parsed_data = list()\n with open(self.file_name) as file_data:\n for line in file_data.readlines():\n temp = dict()\n if 'JD' in line:\n continue\n line = line.split()\n temp['ts'], temp['mag'], temp['dif'] = float(line[0][:14]), float(line[1]), float(line[2])\n temp['f_mag'] = self.kalman_filter(temp['mag'])\n temp['dt'] = self.jd_to_datetime(temp['ts'])\n temp['dt_cor'] = self.jd_to_datetime(temp['ts'] - TIME_CRT)\n parsed_data.append(temp)\n logging.debug(' {} records loaded.'.format(len(parsed_data)))\n logging.debug(parsed_data[0])\n self.data_stream = parsed_data", "def __init__(self):\n self._data = [] # non-public underlying Python list as storage", "def __init__(self):\n\n self.dialogue_ids = self.__load_dialogue_ids(\"data/dialogue_ids.txt\")\n self.class_dict = self.__load_class_representation(\"data/class_vectors.txt\")", "def __init__(self, data_dir):\n self.data_dir = data_dir\n\n # reading in the images present\n self.files = os.listdir(self.data_dir)", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def __init__(self):\n\n print '-'*60\n #self.train_folder = '../data/preprocess_nonstopword_nonstemming/train_clean/' # folder\n #self.test_folder = '../data/preprocess_nonstopword_nonstemming/test_clean/' # folder\n self.train_folder = '../data/preprocess_6/train_clean/' # folder\n self.test_folder = '../data/preprocess_6/test_clean/' # folder\n self.label_file = '../data/train_labels.csv' # path\n #pred_file = './submission_NB.csv' # predicitons\n self.pred_file = './submission_pre_6_t0.6.csv'\n\n\n self.train_ans = []\n self.test_index = []", "def __init__(self, data):\n\n self.produce_csv = data['produce_csv']\n self.produce_graphics = data['produce_graphics']\n self.report_name = data['report_name']\n self.file_name = self.report_name + '.csv'\n self.annual_file_name = self.report_name + '_annual.csv'\n self.csv_dir = ''\n self.diagnostic_dir = ''\n\n self.daily_variables = {\n 'year': ['time.cal_year', '', []],\n 'j_day': ['time.day', '', []]\n }\n\n self.annual_variables = {\n 'year': ['time.cal_year', '', 0]\n }", "def __init__(self):\n super(Nasbench201, self).__init__()\n self.args.data_path = FileOps.download_dataset(self.args.data_path)\n self.nasbench201_api = API('self.args.data_path')", "def _load(self):\n self.firstIter = 1\n self.lastIter = self.protocol.getLastFinishedIter()\n \n if self.viewIter.get() == ITER_LAST:\n self._iterations = [self.lastIter]\n else:\n self._iterations = self._getListFromRangeString(self.iterSelection.get())\n \n from matplotlib.ticker import FuncFormatter\n self._plotFormatter = FuncFormatter(self._formatFreq)", "def __init__(self):\n\n basedir = os.path.dirname(__file__)\n with open(os.path.join(basedir, \"datasets/beneficiary_ownership_markers.txt\")) as fp:\n self.bo_markers = set(map(str.strip, fp))\n\n with open(os.path.join(basedir, \"datasets/beneficiary_ownership_absent_markers.txt\")) as fp:\n self.absent_markers = set(map(str.strip, fp))\n\n with open(os.path.join(basedir, \"datasets/beneficiary_owner_is_founder_markers.txt\")) as fp:\n self.ref_markers = set(map(str.strip, fp))", "def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()", "def load_data(self):\n print(\"Loading dataset...\")\n # Load the dataset\n subIDs, data, labels = dl.load_processed_data_N_subjects_allchans(\n '../data_5sec_100Hz_bipolar/', Nsub=14)\n\n if len(data) > 1:\n\n # If more than one patient loaded, append data to single array\n data_arr = np.array(data[0])\n label_arr = labels[0]\n\n for sub in range(1, len(data)):\n data_arr = np.append(data_arr, data[sub], axis=1)\n label_arr = np.append(label_arr, labels[sub], axis=0)\n\n else:\n # Remove the extra dimension at axis=0\n data_array = np.squeeze(data)\n labels = np.squeeze(labels)\n\n # Move trials to the end so data array is 'nchan x timeseries x trials'\n self.data = np.moveaxis(data_arr, 1, -1)\n self.labels = np.array(label_arr)\n\n self.label_strings = dl.available_stringlabels\n\n valid_indices = np.sum(self.labels, axis=0)\n names = [[self.label_strings[i], i, valid_indices[i]] for i in range(len(valid_indices)) if valid_indices[i] > 0]\n print(\"A summary of valid labels is below: \\nFormat: [Label name, label index, Label count]\")\n for i in range(len(names)):\n print(names[i])\n return", "def datasets(self):\n pass", "def __init__(self, opt, data_dir, data_list):\r\n BaseDataset.__init__(self, opt)\r\n self.max_length = 60\r\n self.opt = opt\r\n self.input_nc = self.opt.output_nc\r\n self.data_dir = data_dir\r\n \r\n self.labels = []\r\n self.paths = []\r\n self.label_lens = []\r\n with open(data_list, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n line = line.strip()\r\n # label index\r\n if len(line.split(' ')) != 2: \r\n continue\r\n codes = line.split(' ')[1].split(',')\r\n if len(codes) > self.max_length:\r\n continue\r\n if codes[-1] == '':\r\n codes.remove('')\r\n img_code = [int(code)+1 for code in codes if int(code) < 6097]\r\n self.label_lens.append(len(img_code))\r\n # 把标签索引改为等长,后面填充0\r\n length = len(img_code)\r\n if length < self.max_length:\r\n img_code += [0] * (self.max_length - length)\r\n self.labels.append(img_code)\r\n self.paths.append(line.split(' ')[0])\r\n\r\n print ('loading from {:s} : {:d}'.format(data_list, len(self.paths)))\r\n self.size = len(self.paths)", "def __init__(self):\n self._data = None\n self._forecast_data = None\n self._today_data = None\n self.last_updated = None", "def __init__(self):\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)", "def __init__(self,input_file, output_file):\n\t\tself.input_file = input_file\n\t\tself.output_file = output_file\n\t\tself.data1 = []\n\t\tself.data2 = []\n\t\tself.data3 = []\n\t\tself.data4 = []\n\t\tself.data5 = []\n\t\tself.min_time = None\n\t\tself.max_time = None", "def __init__(self):\n self.data = []\n self.idx = {}", "def __init__(self, dataset):\n self._dataset = dataset", "def __init__(self):\n self.__dataset = None", "def __init__(self, dataPath, transformImage=None):\r\n self.dataPath = dataPath\r\n self.transformImage = transformImage\r\n self.videos = sorted(os.listdir(self.dataPath))\r\n self.query = 'cast.json'\r\n self.cand = 'candidate.json'\r\n self.queryDir = 'cast'\r\n self.candDir = 'candidates'\r\n self.datas = []\r\n self.domains = []\r\n self.labels = []\r\n self.iden = {}\r\n total = 0\r\n for index in range(len(self.videos)):\r\n with open(os.path.join(self.dataPath, self.videos[index], self.query)) as f:\r\n query = OrderedDict(sorted(json.load(f).items(), key=lambda d:d[0]))\r\n with open(os.path.join(self.dataPath, self.videos[index], self.cand)) as f:\r\n cand = OrderedDict(sorted(json.load(f).items(), key=lambda d:d[0]))\r\n for qk,qv in query.items():\r\n if qv not in self.iden:\r\n self.iden[qv] = total\r\n total += 1\r\n for ck,cv in cand.items():\r\n if qv == cv:\r\n self.domains.append(qk)\r\n self.datas.append(ck)\r\n self.labels.append(self.iden[qv])\r\n #total += 1\r\n self.len = len(self.datas)", "def __init__(self):\n \n self.load_PSF_data()", "def __init__(self, filename):\n #Opening the file and storing its contents in a list\n with open(filename) as fp:\n self.data = json.load(fp)", "def load_data(self) -> None:\n self.paths: List[str] = []\n self.durations: List[float] = []\n self.transcriptions: List[str] = []\n\n def raise_(err):\n \"\"\"raises error if problem during os.walk\"\"\"\n raise err\n\n for subset in self.subsets:\n subset_path = os.path.join(self.root, self.base_dir, subset)\n for root, dirs, files in os.walk(subset_path, onerror=raise_):\n if not files:\n continue\n matches = fnmatch.filter(files, \"*.trans.txt\")\n assert len(matches) == 1, \"> 1 transcription file found\"\n self._parse_transcription_file(root, matches[0])\n\n self._sort_by_duration()", "def _get_data(self):\n\n # Grab the data. Note, the separator is actually ', ', not just a\n # comma, so specify. Also, recognize the \"?\" as an NA value\n # (I think it is easier to have pandas catch the NA values instead\n # of manually searching for and parsing these in the future).\n # Finally, set the engine to python, since having a separator greater\n # than one character automatically does this, and prints a warning\n # message. By explicitly telling it to use python, we suppress the\n # warning.\n self.train_df = pd.read_csv(self.train_url, sep=', ', header=None,\n na_values='?', engine='python')\n\n # For the training data, have one comment row, so need to ignore\n self.test_df = pd.read_csv(self.test_url, sep=', ', header=None,\n skiprows=1, na_values='?', engine='python')\n\n # Get the header data\n response = requests.get(self.head_url)\n header = response.text.split('\\n')\n\n # Now, filter to grab the header lines:\n # First, make sure there is at least one character for the line, and\n # ignore lines that start with the comment character for the file \"|\"\n header = [row for row in header if len(row) > 0 and row[0] != '|']\n\n # Ignore the first row, since it is just identifying the classifier\n # task and, get just the header values\n header = [head.split(':')[0] for head in header[1:]]\n\n # Finally, we need to add a header name for the last column (if <= or >\n # income of 50k)\n header.append('income')\n\n # Now, set the header for the data sets\n self.train_df.columns = header\n self.test_df.columns = header", "def build(self):\n # open json, len 161,260\n at_json = open_json(self.json_names[0])\n link_json = open_json(self.json_names[1])\n # if need preprocessing, do it\n if self.args.img_preprocessing:\n print(\"resize imgs\")\n for i in tqdm(range(len(link_json))):\n image_url = \"image/\" + link_json[i][\"image_url_4x\"].split('/')[-1]\n img = Image.open(image_url)\n img = img.resize((224, 224))\n img.save(image_url)\n\n # create dataset\n itemlen = 0\n previd = 0\n for i in tqdm(range(len(link_json))):\n image_url = link_json[i][\"image_url_4x\"].split('/')[-1]\n uid = image_url.split('-')[0]\n if previd != uid:\n self.label.append(list(at_json[i].values())[2:])\n if i != 0:\n self.itemlen.append(itemlen)\n itemlen = 0\n self.input.append(f\"{self.frontpath}dataset/image/\" + image_url)\n previd = uid\n itemlen += 1\n self.itemlen.append(itemlen)\n self.separate()\n self.dataset = {\n 'train': self.train,\n 'validation': self.val,\n 'test': self.test\n }\n\n print('finished dataset')", "def __init__(self,site,startDate,endDate,path='verif_data/'):\n self.site = site.upper()\n self.startDateTime = datetime.strptime(startDate,'%Y%m%d')\n self.endDateTime = datetime.strptime(endDate,'%Y%m%d')\n years = range(self.startDateTime.year,self.endDateTime.year + 1)\n data = []\n for year in years:\n self.filename=path + self.site + '_asos_' + str(year) + '.txt'\n datafile = open(self.filename)\n for line in datafile:\n if line[0] != '#':\n if 'station' in line:\n self.header = [x.strip() for x in line[:-1].split(',')]\n else:\n dataline = line[:-2].split(',')\n for i,val in enumerate(dataline[:-1]):\n if val=='M':\n dataline[i] = -999\n dataline[1] = dataline[1].replace(' ','_')\n dataline[1] = dataline[1].replace('-','')\n currDateTime = datetime.strptime(dataline[1][:14],'%Y%m%d_%H:%M')\n if currDateTime >= self.startDateTime and currDateTime <= self.endDateTime:\n data.append(tuple(dataline))\n datafile.close()\n self.datatype = []\n for item in self.header:\n if item == 'station':\n self.datatype.append((item,'S3'))\n elif 'valid' in item:\n self.datatype.append(('time','S14'))\n elif 'skyc' in item:\n self.datatype.append((item,'S3'))\n elif item=='metar':\n self.datatype.append((item,'S99'))\n else:\n self.datatype.append((item,float))\n self.data = np.array(data,dtype=self.datatype)", "def __init__(self, raw_data_file,):\n self.raw_data_file = raw_data_file\n self.clean_data = self.cleanData()\n self.microtrip_data = []", "def __init__(self, dirname, sites, news_types):\n self.dirname = dirname\n self.sites = []\n self.news_types = []\n if type(sites) == str:\n self.sites.append(sites)\n if type(news_types) == str:\n self.news_types.append(news_types)\n else:\n self.sites = sites\n self.news_types = news_types\n self.list_news_path = list(self.get_list_news_files())\n # self.list_news_path = Parallel(n_jobs=-1)(delayed(list(self.get_list_news_files())))\n # self.feature_type = feature_type", "def _retrieve_data(self, log, progressbar, files):\n # ESGF frequently doesn't work. Until I get a document from them\n # that specifies a reliable API, I'm giving up.\n msg = \"ESGF has become too unreliable, so it's temporarily unsupported.\"\n raise NotImplementedError(msg)\n# login_successful = self._authenticator.login()\n# if not login_successful:\n# self._app.logger.warn(\"Failed to login.\")\n# session = self._authenticator.session\n\n temp_ds = []\n url_length = len(files)\n session = None\n\n # Add two to the progress bar. One for just starting, and another\n # for when it's all finished. Without these extra, the user can be\n # looking at a blank progress bar for the whole time, since _clean()\n # takes so long.\n progressbar.start(2*url_length)\n for i, remotefile in files:\n\n # The remotefile is just the filename, which is nicer for display.\n # Need the full url.\n url = self._url_from_file(remotefile)\n if session is None and self._authenticator.login(url):\n session = self._authenticator.session\n \n if session is not None:\n xdataset = xr.open_dataset(url,\n decode_cf=False,\n engine='pydap',\n session=session)\n msg = \"Cleaning: {0}.\".format(remotefile)\n# # Normalize it.\n# # FIX ME: Consider moving this to another place. This\n# # operation is the biggest bottleneck of this searching and\n# # retrieving data.\n self._clean(x)\n\n temp_ds.append(xdataset)\n msg = \"Retained: {0}\".format(filename)\n log.debug(msg) \n progressbar.update(msg)\n \n else:\n msg = \"Login failed.\"\n print msg\n log.debug(msg)\n progressbar.update(msg)\n\n # Don't stay logged on.\n self._authenticator.logout()\n\n # Return the list of xarray Dataset objects. The Data_repospecset data\n # structure can't hold the datasets thus far collected because, in\n # general, their coordinates will be defined on different lattices.\n return temp_ds", "def __init__(self):\n self.word_list.extend(self.load_corpus(\"reuters\"))\n self.corpus = \" \".join(self.word_list) # use spaces to join all the elements in the list\n # load the corpus to create the word list\n # note that the network is needed to download the corpus\n\n self.count_ngrams() # count the n-grams\n self.load_confusion_matrix() # read the confusion matrix from files\n self.load_vocabulary() # read the vocabulary from a file", "def load_data(self):\n raise NotImplementedError()", "def loadData ( self ) :\n df = pd.read_json ( self.dataset )\n df = df[pd.notnull ( df[2] )]\n df[1] = df[1].apply ( self.clean_text )\n\n self.X = df[1]\n self.y = df[2]", "def __init__(self, dir_path, window_size,\n user_map_path, computer_map_path, auth_type_map_path, logon_type_map_path):\n logging.info(f\"Initiating Dataset instance for directory {dir_path}\")\n self.directory = dir_path\n self.filenames = [filename for filename in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, filename))]\n assert len(self.filenames) > 0\n random.shuffle(self.filenames)\n self.window_size = window_size\n self.len = self.count_len()\n self.user_map, self.user_count = util.load_mapping(user_map_path)\n self.computer_map, self.computer_count = util.load_mapping(computer_map_path)\n self.auth_type_map, self.auth_type_count = util.load_mapping(auth_type_map_path)\n self.logon_type_map, self.logon_type_count = util.load_mapping(logon_type_map_path)", "def __init__(self, data: List):\n self.data: List = data\n self.texts: List = self.list_texts()\n self.post_nlp: List = False", "def __init__(self, header=None):\r\n\r\n self.data = []", "def __init__(self, the_PyBERT):\n\n plotdata = the_PyBERT.plotdata\n\n the_data = ArrayPlotData()\n\n for item_name in self._item_names:\n the_data.set_data(item_name, plotdata.get_data(item_name))\n\n self.the_data = the_data", "def __init__(self, path='data'):\r\n self.nb_data = 3\r\n self.path = path\r\n self.data_train_name = 'Xtr'\r\n self.data_test_name = 'Xte'\r\n self.features_name = '_mat100'\r\n self.label_train_name = 'Ytr'\r\n self.label_test_name = 'Ytr'\r\n # load raw data\r\n self.raw_data = {'train': self.load_data(self.data_train_name),\r\n 'test': self.load_data(self.data_test_name)}\r\n # load data features\r\n self.data_features = {'train': self.load_data(self.data_train_name, self.features_name, type_='features'),\r\n 'test': self.load_data(self.data_test_name, self.features_name, type_='features')}\r\n # load labels\r\n self.labels = {'train': self.load_data(self.label_train_name),\r\n 'test': self.load_data(self.label_test_name)}\r\n\r\n # toy data\r\n self.toy_data_functions = {\r\n 'blobs': blobs,\r\n 'two_moons': two_moons\r\n }\r\n self.toy_data = dict()", "def __init__(self):\n \n self.csv_features = {} # Create dictionary to load the CSV features\n self.meta_features = [] # Create list to load the metadata features", "def __init__(self, unencoded_dataset): \n self._unencoded_dataset = unencoded_dataset\n self._plain_train = self._get_plain_train()\n self._plain_test = self._get_plain_test()\n self._cipher_train = self._get_cipher_train()\n self._cipher_test = self._get_cipher_test()", "def __init__(self, prefix, date, county):\n# self._dummyivo = DUMMYIVO\n\n # this to keep pylint happy\n self._ballots = []\n self._filename = ''\n self._pctname = ''\n self._pctnumber = ''\n self._registered = 0\n\n self.readdata(prefix, date, county)", "def __init__(self, api_data, output_file):\n self.data = api_data\n self.output_file = output_file", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def prepare_data(self) -> None:\n if (self.root).is_dir():\n logger.info(\"Found the dataset.\")\n else:\n download_and_extract(self.root, DOWNLOAD_INFO)", "def __init__(self, train, test, head):\n\n # Save the URL's, in case we want it later\n self.train_url = train\n self.test_url = test\n self.head_url = head\n\n # Grab the data, and store the internal variables\n self._get_data()", "def DataLoader():\n #importing data\n House_Prices_Uncleaned = pd.read_csv(\"zillow_data/Zip_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_mon.csv\")\n #Cleaning house prices data\n\n House_Prices=pd.DataFrame(House_Prices_Uncleaned[\"RegionName\"][House_Prices_Uncleaned[\"CountyName\"]==\"New York County\"])\n\n House_Prices[\"Price\"]=pd.DataFrame(House_Prices_Uncleaned[\"2020-09-30\"])\n\n House_Rent_Uncleaned= pd.read_csv(\"zillow_data/Zip_ZORI_AllHomesPlusMultifamily_SSA.csv\")\n\n #Cleaning house rent data\n House_Rent=pd.DataFrame(House_Rent_Uncleaned[\"RegionName\"])\n House_Rent[\"Rent\"]=pd.DataFrame(House_Rent_Uncleaned[\"2020-09\"])\n\n return House_Prices, House_Rent", "def __init__(self, num_locations):\n self.dataset = {}\n self.num_locations = num_locations\n self.add_locations()", "def __init__(self, dat):\n self.data = dat", "def _load_data(self, save_temp=False):\n # directly read processed data and encode\n print ('Start tokenizing data...')\n self.data = json.loads(\n open(self.cfg.data_path+self.cfg.data_file, 'r', encoding='utf-8').read().lower())\n self.train, self.dev, self.test = [], [], []\n print ('Start encoding data...')\n p = progressbar.ProgressBar(len(self.data))\n p.start()\n p_idx = 0\n for fn, dial in self.data.items():\n p.update(p_idx)\n p_idx += 1\n if '.json' in fn:\n fn = fn.replace('.json', '')\n if 'all' in self.cfg.exp_domains or self.exp_files.get(fn):\n if self.dev_files.get(fn):\n self.dev.append(self._get_encoded_data(fn, dial))\n elif self.test_files.get(fn):\n self.test.append(self._get_encoded_data(fn, dial))\n else:\n if self.data_mode == 'train':\n self.train.append(self._get_encoded_data(fn, dial))\n elif self.data_mode == 'test':\n pass\n else:\n raise Exception('Wrong Reader Data Mode!!!')\n p.finish()", "def __init__(self, cfg, data_dir, train_files):\n self.cfg = cfg\n self.imgs, self.ids, self.anns = None, None, None\n self.data_dir = data_dir\n self.product_labels = {}\n print('loading annotations into memory...')\n tic = time.time()\n self.datasets = []\n if type(train_files) != list:\n train_files = [train_files]\n for train_file in train_files:\n labels_file = os.path.dirname(train_file)\n labels_file = os.path.join(labels_file, 'labels.txt')\n with open(labels_file, 'r') as f:\n self.product_names = {}\n for line in f:\n label, prod_name = line.split()\n self.product_labels[prod_name] = int(label)\n with open(train_file, 'r') as f:\n dataset = {}\n train_file_dir = os.path.dirname(train_file)\n for line in f:\n img, ann_file = line.split()\n img = os.path.join(train_file_dir, 'images',\n os.path.basename(img))\n ann_file = os.path.join(train_file_dir, 'annotations',\n os.path.basename(ann_file))\n dataset[img] = ann_file\n self.datasets.append(dataset)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n self.create_index()", "def __init__(self, data_path):\n self.perf_data = dill.load(open(data_path, 'rb'))\n #print(self.perf_data[0])\n print(len(self.perf_data))\n self.length = len(self.perf_data)\n\n # perform a few pre-processing steps\n for i in range(self.length):\n # store the length of the pitch contours for use later\n self.perf_data[i]['length'] = len(\n self.perf_data[i]['pitch_contour'])\n # store the length of the pitch contours for use later\n self.perf_data[i]['pitch_contour'] = self.normalize_pitch_contour(\n self.perf_data[i]['pitch_contour'])\n print(self.perf_data[0])", "def __init__(self):\n with open(\"sat.json\", \"r\") as infile:\n self._sat = json.load(infile)[\"data\"]\n #Define the headers for the csv\n self._headers = [\"DBN\", \"School Name\", \"Number of Test Takers\", \"Critical Reading Mean\", \"Mathematics Mean\", \"Writing Mean\"]", "def __init__(self):\n self._results = {}\n self._logs = {}", "def __init__(self, paths):\n assert isinstance(paths, ScatterPath)\n jsondata = ScatterJson(paths.jsondata)\n\n # load data from init file\n with open(paths.init_file, \"r\") as f:\n ## -- check header -- ##\n self.dim = np.fromfile(f, UINT_T, 1)[0]\n self.Ntraj = np.fromfile(f, UINT_T, 1)[0]\n self.inittemp = np.fromfile(f, DOUBLE_T, 1)[0]\n self.mass = np.fromfile(f, DOUBLE_T, self.dim)\n\n assert self.dim == jsondata.dim\n assert self.Ntraj == jsondata.Ntraj\n assert self.inittemp == jsondata.inittemp\n assert tuple(self.mass) == jsondata.mass\n\n ## -- load data -- ##\n self.r0p0 = np.fromfile(f, DOUBLE_T, self.Ntraj * self.dim * 2)\n self.r0p0 = self.r0p0.reshape((self.Ntraj, self.dim * 2))\n self.r0 = self.r0p0[:,:self.dim]\n self.p0 = self.r0p0[:,self.dim:]", "def __init__(self, config=None, first_ann_file=None,second_ann_file=None):\n # load dataset\n self.config = config\n\n self.dataset = {}\n self.anns = []\n self.imgToAnns = {}\n self.imgs = {}\n \n if not first_ann_file is None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(first_ann_file, 'r'))\n print('Done (t=%0.2fs)'%(time.time()- tic))\n self.dataset['images']= dataset['images']\n self.dataset['annotations'] = dataset['annotations']\n if 'classifications' in dataset.keys():\n self.dataset['cls_lbls'] = dataset['classifications']\n if not second_ann_file is None:\n dataset_second = json.load(open(second_ann_file, 'r'))\n self.split_second_ann(dataset_second)\n print('Done (t=%0.2fs)'%(time.time()- tic))\n\n self.process_dataset()\n self.createIndex()", "def __init__(self, num_detectors):\n self.dataset = []\n self.num_detectors = num_detectors\n self.add_detectors()", "def __init__(self, data_dir, transform):\n self.filenames = os.listdir(data_dir)\n self.filenames = [os.path.join(data_dir, f) for f in self.filenames]\n\n self.transform = transform" ]
[ "0.70053685", "0.6822351", "0.68023336", "0.67671615", "0.67606527", "0.6706474", "0.65779483", "0.65768045", "0.65497845", "0.65438706", "0.64940745", "0.64903885", "0.6471294", "0.6471034", "0.6459046", "0.64494765", "0.64485466", "0.6430215", "0.6430215", "0.6430215", "0.6430215", "0.6430215", "0.6430215", "0.64061767", "0.64008343", "0.6382642", "0.6371711", "0.63613206", "0.6321391", "0.6308581", "0.6306337", "0.62800485", "0.62721413", "0.625472", "0.6246983", "0.6236983", "0.6236983", "0.6235661", "0.62105674", "0.61990243", "0.619116", "0.61883116", "0.6179112", "0.6165288", "0.6150341", "0.61503136", "0.6142691", "0.61310226", "0.61296815", "0.612015", "0.6110511", "0.61020035", "0.60703635", "0.60668796", "0.6064803", "0.6056308", "0.60548764", "0.6030391", "0.60196686", "0.6017786", "0.60171825", "0.60103935", "0.60091907", "0.6006036", "0.6004283", "0.60012233", "0.6000864", "0.60002077", "0.5998677", "0.5990512", "0.5988813", "0.59833163", "0.5971363", "0.59655803", "0.59653294", "0.59510446", "0.5947376", "0.5945639", "0.59455055", "0.5944609", "0.5940914", "0.59283775", "0.59245485", "0.5923949", "0.5922001", "0.59200084", "0.59199214", "0.59194225", "0.5915887", "0.5915878", "0.5912647", "0.59075624", "0.5903592", "0.5903249", "0.5902463", "0.59015435", "0.5899074", "0.58893716", "0.58850425", "0.5882597", "0.58784723" ]
0.0
-1
The delete method has yet to be designed. NOT IN USE
def __delete__(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n ...", "def delete():", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n raise NotImplementedError", "def delete(self):\n raise NotImplementedError()", "def delete(self):\n raise NotImplementedError()", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n return 0", "def delete(self, _id):", "def delete(self):\n\n raise NotImplementedError()", "def delete(self):\n raise exceptions.NotImplemented", "def _Delete(self):\n pass", "def delete(self, data):\r\n pass", "def delete(self, obj):", "def delete(self, *args, **kwargs) -> Any:\n pass", "def delete(self, *args, **kwargs):\n raise NotImplementedError()", "def delete(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def test_delete(self):\n pass", "def beforeDelete(self):", "def delete(self, obj):\n raise NotImplementedError", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def do_delete(self, arg):\n \treturn False", "def test_delete_item_using_delete(self):\n pass", "def delete(self,key):\n\n pass", "def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)", "def test_delete_run(self):\n pass", "def delete(self, obj=None):\n pass", "def delete(self, name):\n\n pass", "def remove(self):", "def delete(self, id):\n raise NotImplementedError", "async def delete(self, delete: TPayload) -> None:", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()", "def delete(self):\n self.request().delete()", "def delete(self, key):", "def delete_callback(self):\n pass", "def before_delete(self, obj, st):\n pass", "def delete(self):\n return super(Task, self).delete(None)", "def delete(cls, id):\n raise Exception('Not Implemented Yet')", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete(self, keyword, key):", "def delete(self, cls, id):\n pass", "def delete(self):\n return self._delete", "def delete(self,id):\r\n return delete(id=id)", "def after_delete(self, obj, st):\n pass", "def aboutToDelete(self):\n \n pass", "def aboutToDelete(self):\n \n pass", "def aboutToDelete(self):\n \n pass", "def delete(self, application_id):", "def test_delete_record(self):\n pass", "def delete(self):\n self._client.delete(self)", "def delete(self) -> bool:\n return False", "def delete(self, *args, **kwargs):\n super(Image, self).delete(*args, **kwargs)", "def delete(self):\n self.data = None", "def delete(self, value):\n pass", "def hdelete(self, *p):\n raise UnsupportedMethodError('delete')", "def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def delete(self, name=None):\n raise NotImplementedError", "async def delete(self, *, reason: Optional[Any] = ...):\n ...", "def _delete(self, item):\n self.cv.delete(item)", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def delete(self):\n return self._finalize()", "def test_delete1(self):\n pass", "def remove():", "def delete(self):\n self.method = \"DELETE\"\n self.send()", "def delete_leader(self):", "def remove(self):\r\n\t\tself._delete()", "def delete(self, key):\n pass", "def delete(self, key):\n pass", "def delete(self):\r\n self.domain.delete_item(self)", "def delete(self, *args, **kwargs):\n self.portrait.delete()\n super(Giza, self).delete(*args, **kwargs)", "def _remove(self):\n pass", "def delete(self):\n self.call('DELETE', expect=error.NO_CONTENT)", "def _notify_delete(self, cuds_object):", "async def deleted(self, value):\n pass", "def delete(self, c_path):\n raise NotImplementedError", "def delete(self, uri, where, selectionArgs):\n pass", "def delete(self, uid):\n raise NotImplementedError", "def on_delete(self, payload):\n pass", "def delete_document(self):\n pass", "def delete_order():", "def test_client_document_delete(self):\n pass", "def delete(self):\n return self.request('', pylastica.request.Request.DELETE)", "def delete(thing, id_):\n pass", "def delete():\n click.echo('delete was called.')", "def delete(self):\r\n s = self.get_session()\r\n s.delete(self)\r\n s.commit()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()" ]
[ "0.8987653", "0.8940316", "0.8721936", "0.8721936", "0.8721936", "0.8721936", "0.85827905", "0.84170693", "0.84170693", "0.83346015", "0.83346015", "0.8312875", "0.8245658", "0.81583333", "0.8149139", "0.80602145", "0.80241364", "0.80155075", "0.79894155", "0.7970612", "0.78318375", "0.7548203", "0.7545496", "0.75188416", "0.7501334", "0.7448929", "0.7414133", "0.74045193", "0.734886", "0.73260415", "0.7317442", "0.7295535", "0.7252239", "0.72475153", "0.72403455", "0.7233429", "0.7225122", "0.7219255", "0.7217779", "0.7201827", "0.7199472", "0.7195216", "0.7191235", "0.71893513", "0.7162443", "0.715422", "0.71493965", "0.7137511", "0.7091197", "0.707705", "0.707705", "0.707705", "0.7070937", "0.7066679", "0.70646554", "0.70580703", "0.7050483", "0.70434463", "0.70343083", "0.70306075", "0.70169", "0.7005984", "0.6990898", "0.69695514", "0.69619703", "0.6959368", "0.6959368", "0.6959368", "0.6959368", "0.6959368", "0.6959368", "0.6959368", "0.6959368", "0.6959368", "0.69438076", "0.6943505", "0.6940469", "0.69254756", "0.6920152", "0.69181305", "0.69181305", "0.6907431", "0.6894011", "0.68922853", "0.6890431", "0.68870175", "0.6881705", "0.6880439", "0.6880264", "0.6870568", "0.68696254", "0.68666106", "0.6865379", "0.68613285", "0.68592525", "0.68559754", "0.6853279", "0.6851886", "0.6827413", "0.6827413" ]
0.7856196
20
Log a message to ``kastle`` logger.
def log(level: str, *messages: str) -> None: for message in messages: getattr(logger, level)(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(self, message: str):", "def log(self, message):", "def log(self, message):\n self._logger.write(message)", "def log(self, _strMessage=\"\"):\n self.edLogging.log(_strMessage)", "def log( loglevel, message ):\n E.log( loglevel, message )", "def log(\n message: str,\n application: str = \"Red Layer\",\n log_level: int = 0,\n push: bool = False,\n ):\n # send it to QGIS messages panel\n QgsMessageLog.logMessage(\n message=message, tag=application, notifyUser=push, level=log_level\n )\n\n # optionally, display message on QGIS Message bar (above the map canvas)\n if push:\n iface.messageBar().pushMessage(\n title=application, text=message, level=log_level, duration=(log_level+1)*3\n )", "def _log(self, message):\n pass", "def _log(self, msg):\n self.telegram_queue.put(f\"{__name__.split('.')[-1]}: {msg}\")", "def log(self, msg):\n\n\t\tself.eyetribe.log_message(msg)", "async def log(self, message, level=logging.INFO):\n\t\tawait self.handle_log(LogEntry(level, self.name, message))", "def log_message(self, msg):\n\t\tself.logView.log_message(msg)", "def log(msg):\n print msg", "def logger(self, message):\n if hasattr(self.log, '__call__'):\n self.log(message.strip())", "def log(message):\n if LOGPLEASE:\n logging.info(message)", "def log(self, msg):\n logging.info(\"Logging Message\")\n ml = self.monk_logs\n today = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n ml.insert({today: {\"log\": msg,\n \"sentiment\": self.get_sentiment(msg),\n \"weather\": Weather.get_weather()}})", "def Log(message):\n log_mod = sys.modules.get('twisted.python.log')\n if log_mod:\n log_mod.msg(message)\n else:\n print message", "def log(self, message):\n self._log += \"%s\\n\" % message\n print message", "def log(self, msg, logging_level):\n\n # log\n self.logger.log(logging_level, msg)", "def __log(self, msg: str, **kwargs: Any) -> None:\n self.logger.log(self.log_level, msg, **kwargs)", "def log(self, level, msg, *args, **kwargs):\n pass", "def log_message(self, format, *args):\n self._log(format, args)", "def log(self, message):\n timestamp = time.strftime(\"[%H:%M:%S]\", time.localtime(time.time()))\n self.file.write('%s %s\\n' % (timestamp, message))\n self.file.flush()", "def log_message(self, format, *args):", "def log(message):\n logger.debug(message)\n logger.info(message)\n logger.warning(message)\n logger.error(message)\n logger.critical(message)", "def logs_add_message(self, level, message):\n pass", "def logmsg(self, lvl, msg):\n self.logger.log(lvl, msg)", "def log(self, msg):\n print(msg)", "def log(msg):\n print(str(msg))", "def logIt(self, msg):\n\n if (self.logger): self.logger.logIt(msg)", "async def log(self, message, level=logging.INFO):\n\t\tif self.log_queue is not None:\n\t\t\tawait self.log_queue.put(LogEntry(level, self.modulename, message))\n\t\telse:\n\t\t\tprint(str(LogEntry(level, self.modulename, message)))", "def log_to_slack(self, message):\n log.info(f'MC: {message}')\n if not hasattr(self, 'slack_webhook'):\n log.info(f'MC: Not logging to slack because slack_webhook is not '\n f'defined.')\n return\n try:\n payload = {\"text\": message}\n if hasattr(self, 'slack_channel'):\n payload[\"channel\"] = self.slack_channel\n res = requests.post(self.slack_webhook, json=payload)\n res_text = res.text\n except Exception as e:\n res_text = repr(e)\n if res_text != 'ok':\n log.warning(f'MC: Error while logging to slack: {res_text}')", "def log_message(self, message):\n if self.root.is_logging:\n if len(repr(str(msg))) < 1:\n stdout(msg)\n self.root.log_file.write(\"%.4f\" % time.time() + \" \" + repr(str(msg)) + \"\\n\")\n return", "def log(message):\n\n # Trim the message.\n result = message.strip()\n\n # Print it to STDOUT.\n print result", "def log_message(self, fmt, *args):\n pass", "def logIt(self, msg):\n\n\t\tif( self.logger ): self.logger.logIt( msg )", "def log(str):\n\n Utils.send('log', str)", "def log(self, level, msg, *args, **kwargs):\n logging.Logger.log(self, level, '%s::%s'%(self.name, msg), *args, **kwargs)", "def on_message(self, msg):\n self.log.info(msg)", "def onMessage(self, msg):\n log.msg(str(msg))", "def log(message: str, level: adsk.core.LogLevels = adsk.core.LogLevels.InfoLogLevel, force_console: bool = False): \n # Always print to console, only seen through IDE.\n print(message) \n\n # Log all errors to Fusion log file.\n if level == adsk.core.LogLevels.ErrorLogLevel:\n log_type = adsk.core.LogTypes.FileLogType\n app.log(message, level, log_type)\n\n # If config.DEBUG is True write all log messages to the console.\n if DEBUG or force_console:\n log_type = adsk.core.LogTypes.ConsoleLogType\n app.log(message, level, log_type)", "def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()", "def log(message):\n print(\"{0}: {1}\".format(acm.Time.TimeNow(), message))", "def log_and_print(self, message):\n self.f.write(message + \"\\n\")\n print message", "def log(self, msg):\n self.ansible.log(msg)", "def log(message):\n if type(message) is not str:\n message = str(message)\n print(PREFIX + re.sub('\\n', '\\n' + PREFIX, message))", "def log(self, message):\n print(\"Server#{}\".format(message))\n if self.log_queue:\n self.log_queue.put()", "def log(self, level, msg, *args, **kwargs):\n self._logger.log(level, msg, *args, **kwargs)", "def log_message(self, formate, *args):\n return", "def log(tag, message=None):\n Log._post(\"log\", tag, message)", "def log(msg, logfile):\n print(msg)\n logfile.write(msg + \"\\n\")", "def log(self, level, msg, *args, **kwargs):\n\n if self.logger:\n self.logger.log(level, msg, *args, **kwargs)", "def log(txt):\n if sys.version_info[0] >= 3:\n if isinstance(txt, bytes):\n txt = txt.decode('utf-8')\n message = '%s: %s' % (ADDON_NAME, txt)\n else:\n if isinstance(txt, str):\n txt = txt.decode('utf-8')\n message = (u'%s: %s' % (ADDON_NAME, txt)).encode('utf-8') # pylint: disable=redundant-u-string-prefix\n xbmc.log(msg=message, level=xbmc.LOGDEBUG)", "def log(msg):\n\n if _has_logbook:\n _logger.info(msg)", "def logline(msg):\n print msg", "def log(self, msg, level=LOG_INFO):\n self.send_command('log', {\n 'msg': msg,\n 'level': level,\n })", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" %s\" % message)", "def log(cls, log_message, log_level):\n\n m = l = ''\n is_error = log_level == cls.LOG_LEVEL_ERROR\n\n if isinstance(log_message, GistException):\n log_message = log_message.to_json()\n m = log_message.get('app_message')\n l = '%s, File: %s - Line: %s' % (\n log_message.get('error_description'),\n log_message.get('filename'),\n log_message.get('line')\n )\n Logger.log(l, is_error)\n elif isinstance(log_message, ConnectionError):\n m = 'Apparently you are having problems with the internet connection'\n elif isinstance(log_message, Exception):\n exc_type, exc_value, exc_traceback = sys.exc_info()\n m = \"\"\"Apparently something is wrong with the plugin.\n Create a new issue with the log file content. Thanks!\"\"\"\n Logger.log(str(traceback.format_exception(exc_type, exc_value, exc_traceback)), is_error)\n\n elif isinstance(log_message, str):\n m = log_message\n\n cls.__show_app_message('Sync Settings: %s' % (m), log_level)", "def log(self, message, *args, newline):\n\n self.current_character.log(message, *args, newline=newline)", "def _log(self, format, args, level=None):\n if level is None:\n level = self.log_level\n xbmc.log(\n \"metadata.movie.stupid: %s - - [%s] %s\\n\" % (\n self.client_address[0], self.log_date_time_string(),\n format % args),\n level)", "def message(self, msg, show_log=True, log_message=None):\n self._last_message = msg\n if log_message:\n self.logger.log(log_message)\n elif show_log:\n self.logger.log(msg)", "def log(self, message):\n #logs.logger.debug(\"asyncore log: %s\" % message)\n pass", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" \" + message)", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" \" + str(message))", "def _log(self, marker, message):\n try:\n self.log_dest.write(marker + self.log_prefix + message + \"\\n\")\n self.log_dest.flush()\n except Exception:\n pass", "def write_log(self, msg: str):\n self.cta_engine.write_log(msg, self)", "def log(msg):\n\n print('datastore: %s' % msg)", "def log(self, message):\n #logs.logger.debug(\"asyncore log: %s\" % message)", "def log_message(self, message):\n log_info = {'logGroupName': self.log_group_name,\n 'logStreamName': self.log_stream_name,\n 'logEvents': [\n {\n 'timestamp': int(1000 * time.time()),\n 'message': '[{}]: {}'.format(self.stage, message)\n },\n ]}\n\n if self.sequence_token:\n log_info['sequenceToken'] = self.sequence_token\n\n response = self.logs_client.put_log_events(**log_info)\n\n self.sequence_token = response['nextSequenceToken']", "def logprint(self, message):\n print message\n self.log += message+\"\\n\"", "def w(msg):\n logging.warning('##### %s' % repr(msg))", "def logging(cls, lvl, message):\n log = getattr(logging, lvl)\n message = '[{}] {}'.format(cls.__name__, message)\n log(message)", "async def add_log(self, value):\n log_string = value\n print(log_string)\n self.embed.title = log_string\n self.embed.timestamp = datetime.datetime.now()\n self.embed.description = \"\"", "def log(message):\n if args.verbose:\n if args.syslog:\n syslog.syslog(syslog.LOG_NOTICE, message)\n else:\n print >>sys.stderr, message", "def log_msg(msg, loglevel=xbmc.LOGDEBUG):\n if isinstance(msg, unicode):\n msg = msg.encode('utf-8')\n if loglevel == xbmc.LOGDEBUG and FORCE_DEBUG_LOG:\n loglevel = xbmc.LOGNOTICE\n xbmc.log(\"%s --> %s\" % (ADDON_ID, msg), level=loglevel)", "def log(self, msg):\n log.msg(msg, level=log.INFO)", "def log(self, message):\n if VERBOSE:\n print self, message", "def log(self, msg):\n self.xymap.log(msg)", "def log_message(self, format, *args):\n\n self.logger.info(\"%s %s\" % (self.client_address[0], format % args))", "def logger(self, value):\n pass", "def log(msg):\n # Look at: https://console.cloud.google.com/logs to see your logs.\n # Make sure you have \"stdout\" selected.\n print('main: %s' % msg)", "def log(self, message, level=None):\n\n if level is None or level.lower() == \"all\":\n level = \"notset\"\n level = getattr(logging, level.upper())\n\n self.logger.log(level, message)", "def log_(\n message: str,\n logger: logging.Logger,\n level: str = \"info\",\n extra: Optional[Dict] = None,\n trim: bool = False,\n) -> None:\n if extra is None:\n extra = {}\n # Clean up the message for logging\n if message:\n message = message.replace(\"\\n\", \"\").replace(\" \", \" \").replace(\"{ \", \"{\")\n if trim:\n message = _trim_message(message)\n # Log.\n getattr(logger, level)(message, extra=extra)", "def log_message(self, fmt, *args):\r\n pass\r\n # log_message\r", "def _log(self, msg, mode=\"info\"):\n if mode == \"info\":\n self._logger.info(msg)\n elif mode == \"warning\":\n self._logger.warning(msg)", "def _send_log(event_params):\n log = storm_log.StormLog(\n os.environ['SPLUNKSTORM_ACCESS_TOKEN'],\n os.environ['SPLUNKSTORM_PROJECT_ID'])\n return log.send(**event_params)", "def log(self, msg: str) -> None:\n if self.args.verbose:\n print(msg)", "def msg(message):\n to_stdout(\" --- {message}\".format(message=message))\n if _logger:\n _logger.info(message)", "def log(self, stringToLog):\n print stringToLog\n self.stream.write(stringToLog + \"\\n\")", "def log_message(self, text):\n if self.message_log_file != -1:\n #open file in append mode and write line to file\n with open(self.message_log_file, 'a') as log_file:\n log_file.write(text+'\\n')\n return", "def _onLog(self, client:mqtt.Client, userdata:Any, level:int, buf:str) -> None:\n\t\tself.lowLevelLogging and self.messageHandler and self.messageHandler.logging(self, mqtt.LOGGING_LEVEL[level], f'MQTT: {buf}')", "def log(self, message, log_level=\"info\"):\n for word in self.__ignore_output:\n while word in message:\n message = message.replace(word, \"\")\n getattr(logger, log_level)(\"{}{}\".format(self.__output_prefix, message.strip()))", "def _log(self, message, timeout=0.1, force=False, level=0):\n # print(\"[LOG][to=%.3fs][force=%s] %s\"\n # % (timeout, str(force), message))\n if force or time.time() - self._lastLogWrite > timeout:\n self._logWriter.send({'message': message, 'level': level})\n if not force:\n self._lastLogWrite = time.time()\n self._ignoredLogInfoSent = False\n elif not self._ignoredLogInfoSent:\n self._ignoredLogInfoSent = True\n self._logWriter.send({'message': '[...]', 'level': 0})", "def debug(message):\n logging.getLogger().debug(message)", "def log(self, api, msg, level):\n return succeed(log.msg(msg, logLevel=level))", "def log(message, level=\"INFO\"):\r\n print(__get_formatted(message, level))", "async def log(self, guild: discord.Guild, text: str, *, do_not_format: bool=False) -> discord.Message:\n return await self.bot.send_modlog(guild, text if do_not_format else self.modlog_msg(text))", "def log_message(self, format_str, *args):\n LOGGER.debug(self._format_msg(format_str, *args))", "def debug(self, message, *args, **kwargs):\n\n self.logger.debug(message, *args, **kwargs)", "def log():\n return logging.getLogger(\"vodka\")", "def __log__(self, val):\n if lm_settings[\"debug\"]:\n try:\n log_file = open(\"language_manager/info/language_manager.log\", \"a\")\n except FileNotFoundError:\n log_file = open(lm_settings[\"logfile\"], \"w\")\n log_file.write(val)\n log_file.close()", "def do_log(self, arg):\n arg = \" %s :custom log\" % (arg)\n log(arg)" ]
[ "0.6749791", "0.66110027", "0.62929875", "0.61028975", "0.60638314", "0.60285443", "0.6009939", "0.6003057", "0.59888184", "0.5934584", "0.5918162", "0.59128755", "0.5903931", "0.5870506", "0.5867751", "0.58582294", "0.58571404", "0.58528185", "0.5843078", "0.5834469", "0.5821527", "0.58129835", "0.58026004", "0.5786256", "0.5776002", "0.5749734", "0.57484424", "0.5737095", "0.5735943", "0.57321334", "0.5719564", "0.57082313", "0.5682867", "0.5677971", "0.5674833", "0.5671925", "0.5661301", "0.5659194", "0.5650638", "0.56422937", "0.564057", "0.5630439", "0.56078374", "0.560551", "0.56049645", "0.5603583", "0.559211", "0.55646265", "0.5561668", "0.554355", "0.5536689", "0.55233043", "0.5513406", "0.55102545", "0.5503529", "0.54927254", "0.54915", "0.5480392", "0.5479169", "0.54700565", "0.5455861", "0.54518783", "0.5444645", "0.5437061", "0.5435233", "0.5434459", "0.5419106", "0.5417407", "0.5405525", "0.53885955", "0.53793097", "0.53588617", "0.53481066", "0.5334722", "0.5329062", "0.5323061", "0.5321751", "0.52881783", "0.5273954", "0.52677685", "0.5265842", "0.5262474", "0.52585113", "0.52581465", "0.5248171", "0.52432895", "0.5243046", "0.5217092", "0.5216658", "0.521652", "0.52016485", "0.51999295", "0.51976603", "0.51902413", "0.5187089", "0.51870006", "0.51827836", "0.5182735", "0.5182543", "0.517912", "0.5173729" ]
0.0
-1
Plot the languages stored in the dictionaries
def plot_languages(dict_usage_complexities, dict_cognitive_complexity): attested_languages = ( frozenset(['nor', 'and', 'or', 'not']), frozenset(['and', 'or', 'not']), frozenset(['and', 'not']), frozenset(['or', 'not']), ) fig, ax = plt.subplots(figsize=(8.27,4)) for name in dict_usage_complexities.keys(): # if not any([i in ['nc', 'nic', 'bc', 'XOR', 'c', 'ic'] for i in name]) and 'not' in name: if 'not' in name: # if True: usage_complexity = dict_usage_complexities[name] cognitive_complexity = dict_cognitive_complexity[name] if name in attested_languages: color = 'red' zorder = 10 if name == frozenset(['or', 'not']): yshift = 0.4 else: yshift = 0 ax.text( usage_complexity + 0.02, cognitive_complexity + 0.3 + yshift, s=','.join(name), fontsize='x-small' ) else: color='black' zorder = 1 # ax.scatter( # usage_complexity, cognitive_complexity, # color=color, # zorder=zorder # ) # ax.text( # usage_complexity, cognitive_complexity, # s=','.join(name), # fontsize='xx-small', # rotation=90, # color=color # ) ax.scatter(usage_complexity,cognitive_complexity,color=color) ax.set_xlabel('Usage complexity') ax.set_ylabel('Conceptual complexity') # ax.set_xlim(0,3) ax.set_xlim(1.05,2.8) # plt.show() plt.savefig('figure.png', dpi=300, transparent=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize_vecDict(vecDict):\n for url in vecDict:\n plt.plot(vecDict[url])\n plt.legend([key for key in vecDict])\n plt.title(f'Vectors for {len(vecDict)} Documents')\n plt.xlabel('Vector Dimensions')\n plt.ylabel('Document Value')\n plt.show()", "def draw_all_plots(self):\n\n plot_names = []\n e = self.find_di_tri(self.lang_found)\n letter_dct = e[1]\n di_dct = e[2]\n tri_dct = e[3]\n\n plot_name = self.lang_found + '_letters'\n self.wykres(letter_dct, 'Wyres liter', 'litera', plot_name, 0)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_digram'\n self.wykres(di_dct, 'Wykres digramów', 'digram', plot_name, 1)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_trigram'\n self.wykres(tri_dct, 'Wykres trigramów', 'trigram', plot_name, 2)\n plot_names.append(plot_name)\n\n for cnt, plt_scn in enumerate(self.plot_scenes):\n pic = QtGui.QPixmap(self.img_dir + '/' + plot_names[cnt] + \".png\")\n plt_scn.setPixmap(pic.scaled(427, 320, Qt.KeepAspectRatio))", "def show_line(dict, xlabel=\"x\", ylabel=\"y\", title=\"title\"):\n plt.clf()\n plt.cla()\n plt.plot(list(dict.keys()), list(dict.values()), alpha=0.4, color = 'g')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()", "def plot(self, *args):\n return self.vocab().plot(*args)", "def dispersion_plot(self, words):\n from nltk.draw import dispersion_plot\n\n dispersion_plot(self, words)", "def __plot( dict1, title):\n plt.title(title)\n plt.xlabel(\"Run Time (microsecondsS)\")\n plt.ylabel(\"Number of Items Retreived\")\n plt.ylim(0, max(dict1.values()))\n plt.xlim(min(dict1.keys()), max(dict1.keys()) )\n x1, y1 = zip(*dict1.items())\n\n plt.scatter(x1, y1)\n plt.show()", "def plot_embeddings(M_reduced, word2Ind, words):\n\n # YOUR CODE HERE\n \n for i,type in enumerate(words):\n x_coor,y_coor = M_reduced[word2Ind[type]][0],M_reduced[word2Ind[type]][1]\n \n plt.scatter(x_coor, y_coor, marker='*', color='red')\n plt.text(x_coor+0.05, y_coor+0.05, type, fontsize=12)\n \n plt.show()\n \n #raise NotImplementedError()", "def graph():\n fp = mpl.font_manager.FontProperties(family='JasmineUPC',size=24)\n x = np.arange(0,10)\n y = [386557057065, 368368395622, 242451971944, 225960095934, 161573560379, 107461232731, 89784502211, 73749349545, 54525219632, 52864743212]\n name = ['เชื้อเพลิงที่ได้จากแร่', 'เครื่องจักรและส่วนประกอบ', 'ยานยนต์และส่วนประกอบ', 'เครื่องอุปกรณ์ไฟฟ้าและส่วนประกอบ', 'เหล็กและเหล็กกล้า', 'พลาสติกและของทำด้วยพลาสติก', 'ของทำด้วยเหล็กหรือเหล็กกล้า', 'ทองแดงละของทำด้วยทองแดง', 'เคมีภัณฑ์เบ็ดเตล็ด', 'อุปกรณ์ที่ใช้ทางทัศนศาสตร์']\n ax = plt.gca(xticks=x)\n ax.set_xticklabels(name,rotation=1000,fontproperties=fp)\n plt.bar(x,y,color='g')\n plt.show()", "def get_label_by_language(language):\n\n if language in [\"en4\", \"en5\", \"en15\"]:\n plot_label = \"Supreme Court EN\"\n shade = True\n ls = \"-\"\n c = \"C2\"\n elif language in [\"en2\", \"en8\", \"en18\"]:\n plot_label = \"EuroParl EN\"\n shade = True\n ls = \"-\"\n c = \"C8\"\n\n elif language in [\"de2\", \"de8\", \"de18\"]:\n plot_label = \"EuroParl DE\"\n shade = True\n ls = \"-\"\n c = \"C4\"\n\n elif language in [\"de5\", \"de15\"]:\n plot_label = \"BGH Strafsenat\"\n shade = True\n ls = \"-\"\n c = \"C0\"\n\n elif language in [\"de6\", \"de16\"]:\n plot_label = \"BGH Zivilsenat\"\n shade = True\n ls = \"-\"\n c = \"C1\"\n\n elif language in [\"de7\", \"de17\"]:\n plot_label = \"BGH DE\"\n shade = True\n ls = \"-\"\n\n else:\n plot_label = language\n shade = True\n ls = \"-\"\n c = \"C1\"\n\n if language in [\"de15\", \"de16\", \"en15\", \"de17\", \"de18\", \"en18\"]:\n plot_label += \" shuffled\"\n\n return plot_label, shade, ls, c", "def plot_MDS():\n lds = {} #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"abc\":5,\"efg\":6...},...}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n \n distances={} #a dictionary of dictionaries that saves the distances between a language and all other languages\n \n for x in lds.keys():\n distances[x]={}\n for y in lds.keys():\n if x == y: distances[x][y]=0.0\n else: distances[x][y]=cosine_dist(lds[x],lds[y])\n\n dst=np.zeros([len(lds.keys()), len(lds.keys())])\n i=0\n j=0\n for x in lds.keys():\n j=0\n for y in lds.keys():\n dst[i,j]=distances[x][y]\n j+=1\n i+=1\n\n X, languages = prepare_data_matrix()\n\n transformer = MDS(n_components=2, dissimilarity='precomputed')\n transformed = transformer.fit_transform(dst)\n\n plt.scatter(transformed [:,0], transformed [:,1])\n for i in range(len(transformed)):\n plt.text(transformed[i,0], transformed[i,1], languages[i][:3])\n plt.show()", "def plot_timecourse_language_types(lang_class_prop_over_gen_df, title, file_path, file_name):\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n fig, ax = plt.subplots()\n\n if len(possible_form_lengths) == 1:\n palette = sns.color_palette([\"black\", \"red\", \"green\", \"grey\"])\n else:\n palette = sns.color_palette([\"black\",\n sns.color_palette(\"colorblind\")[3],\n sns.color_palette(\"colorblind\")[1],\n sns.color_palette(\"colorblind\")[2],\n sns.color_palette(\"colorblind\")[9],\n sns.color_palette(\"colorblind\")[0],\n sns.color_palette(\"colorblind\")[7]])\n\n sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette)\n # sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style=\"bars\")\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n plt.xlabel('Generation', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n handles, labels = ax.get_legend_handles_labels()\n\n labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O']\n\n # ax.legend(handles=handles[1:], labels=labels[1:])\n ax.legend(handles=handles, labels=labels)\n plt.tight_layout()\n plt.savefig(file_path + \"Timecourse_plot_lang_types_\" + file_name + \".png\")\n plt.show()", "def plot(self):\n fig, ax = plt.subplots()\n ticklabels = [item.strftime('%b %d') for item in self.series.index]\n ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))\n\n plt.ylabel('#Cases')\n i = 0\n for y in self.countries:\n plt.plot(ticklabels, self.series[y], GRAPH_FORMATS[i], label=y)\n i += 1\n ax.set_xticklabels(ticklabels, rotation='vertical', fontsize=10)\n plt.legend()\n plt.grid()\n if self.log:\n plt.yscale(\"log\")\n plt.show()", "def list(self):\n for key, value in self.languages.iteritems():\n print key, value", "def plot_word_class_pr_genre(df):\n df['nouns'] = df['nouns'] * 100\n df['verbs'] = df['verbs'] * 100\n df['adverbs'] = df['adverbs'] * 100\n # plotting nouns\n plotting_helper_method('nouns', 'genre', df)\n plt.title('Amount of nouns pr song pr. genre')\n plt.xlabel(\"Amount of nouns in each song\")\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/nouns_pr_genre_plot')\n\n # plotting verbs\n plotting_helper_method('verbs', 'genre', df)\n plt.title('Amount of verbs pr song pr. genre')\n plt.xlabel('Amount of verbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/verbs_pr_genre_plot')\n\n # plotting adverbs\n plotting_helper_method('adverbs', 'genre', df)\n plt.title('Amount of adverbs pr song pr. genre')\n plt.xlabel('Amount of adverbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/adverbs_pr_genre_plot')", "def print_languages(config_filepath, label_filepath):\n load_classifier(config_filepath)\n label_filepath = os.path.abspath(label_filepath)\n wili_labels = wili.get_language_data(label_filepath)\n iso2name = dict([(el['ISO 369-3'], el['English'])\n for el in wili_labels])\n print(', '.join(sorted([iso2name[iso]\n for iso in classifier.get_mapping_languages()\n if iso != 'UNK'])))", "def plot_brain_words(brain_scores, plot_order):\n vals = list(zip(*[v.values() for v in brain_scores.values()]))\n labels = Embeddings.get_labels(brain_scores.keys())\n\n def plot_data(ax, data, ord_vocab, ord_name):\n dscores = {'fMRI': 5, 'MEG': 6}[data]\n word_vals = vals[dscores]\n scores = {}\n wordlists = {}\n for label, val in zip(labels, word_vals): # embeddings\n word_dict = {} # Init dictionary with Brain vocab so we have vectors with same length to plot\n for w in ord_vocab:\n word_dict[w] = 0\n for p in val: # participants\n for word_pair in p: # word pairs\n word_dict[word_pair['word1']] += word_pair['hit']\n word_dict[word_pair['word2']] += word_pair['hit']\n # word_dict = dict(((w, word_dict[w]) for w in ord_vocab)) # Sort by concreteness\n scores[label] = list(word_dict.values())\n wordlists[label] = list(word_dict.keys())\n\n # Convert to structured array\n score_arrays = dict2struct_array(scores)\n\n tsuffix = ord_name + ' synset score'\n # Sort by ord_name Embedding\n if ord_name != 'Median' and ord_name != 'Most concrete':\n if ord_name not in labels:\n ord_name = Embeddings.get_label(ord_name)\n tsuffix = 'ordered by ' + ord_name\n score_arrays = np.sort(score_arrays, order=ord_name)\n ord_vocab = [w for w, s in sorted(zip(wordlists[ord_name], scores[ord_name]), key=lambda x: x[1])]\n\n colours, linestyles, alphas = PlotColour.colour_by_modality(labels)\n # allhits = sum([hits for hits in scores.values()], [])\n # ax.set_yticklabels([i for i in range(min(allhits), max(allhits), 6)], rotation=90)\n plot_scores(score_arrays,\n vecs_names=labels,\n labels=None,\n colours=colours,\n linestyles=linestyles,\n title=f'{data} - {tsuffix}',\n alphas=alphas,\n xtick_labels=ord_vocab,\n ax=ax,\n show=False,\n type='scatter',\n swapaxes=True)\n ax.set_xlabel('Hit number')\n ax.yaxis.set_ticks(range(len(ord_vocab)))\n ax.set_yticklabels(ord_vocab, fontsize=14)\n\n if plot_order == 'concreteness':\n # Order by word concreteness\n word_concs = [[w] + list(wn_concreteness(w)) for w in DataSets.fmri_vocab]\n ord_med_vocab = [w for w, cme, cma in sorted(word_concs, key=lambda x: x[1])]\n ord_max_vocab = [w for w, cme, cma in sorted(word_concs, key=lambda x: x[2])]\n\n axs = [i for i in range(4)]\n fig, ((axs[0], axs[1]), (axs[2], axs[3])) = plt.subplots(2, 2, figsize=(20, 15))\n plot_data(axs[0], 'fMRI', ord_med_vocab, 'Median')\n plot_data(axs[1], 'MEG', ord_med_vocab, 'Median')\n plot_data(axs[2], 'fMRI', ord_max_vocab, 'Most concrete')\n plot_data(axs[3], 'MEG', ord_max_vocab, 'Most concrete')\n else:\n axs = [i for i in range(2)]\n fig, ((axs[0], axs[1])) = plt.subplots(1, 2, figsize=(20, 13))\n plot_data(axs[0], 'fMRI', DataSets.fmri_vocab, plot_order)\n plot_data(axs[1], 'MEG', DataSets.fmri_vocab, plot_order)\n\n legs, leglabels = PlotColour.get_legend() # Leave out WordNet concreteness [1:]\n fig.legend(legs[1:], leglabels[1:], loc=9, edgecolor='inherit', ncol=7, borderaxespad=-0.2, numpoints=1, fontsize=16)\n fig.tight_layout(pad=1.0)\n\n return fig", "def _plot_dict_scatter(d):\n xvals, yvals = _dict2lists(d)\n pylab.scatter(xvals, yvals)", "def plotRD(country,level,ngrams,timeint,totalgrams,color,ax=None):\n path = prepath+'{}/Level_{}/results_{}grams/{}hour_{}grams_RD.txt'.format(country,level,totalgrams,timeint,ngrams)\n # MODIFICAR SI AGREGAS MAS PAISES <--------------___!!!!!!!!\n dist4country = { \"Mexico\":np.arange(0,11), \"United_Kingdom\":np.arange(0,10),\"Spain\":np.arange(0,9),\"India\":np.arange(0,11),'Argentina':np.arange(0,11)}\n base=2\n distancias = np.power(base, dist4country[country]).astype(float)*3*1000 #definicion de distancias en km.\n data = np.loadtxt(path)\n \n if not ax:\n fig = plt.figure(figsize=(10,7))\n ax = fig.add_subplot(1,1,1)\n ax.set_title(\"{},{}Km,ngrams={},timeint={}\".format(country,distancias[level]/1000,ngrams,timeint))\n ax.grid(which='both')\n\n ax.plot(data[:,0],data[:,1],c=color,label='{},{}Km,$\\delta t$={},ng={}'.format(country,distancias[level]/1000,timeint,ngrams))\n ax.set_xlabel(\"$k$\")\n ax.set_ylabel('d(k)')\n ax.set_xscale('log')", "def get_variables_to_plot(dictionary):\n done = False\n keys = list(dictionary.keys())\n print_variables_found_on_file(keys)\n print(\"\\n\\nWhich variable is the x-axis? (Give the number.)\")\n x_axis = [keys[int(input()) - 1]]\n y_axis = []\n while not done:\n print_variables_found_on_file(keys)\n print(\"\\n\\nWhich variable is the y-axis? (Give the number.)\")\n y_axis.append(keys[int(input()) - 1])\n print(\"\\nX-axis: \" + x_axis[0] + \"\\nY-axis: \"+\n \"\".join([i+\", \" for i in y_axis]).strip(\", \"))\n done = not is_yes(input(\"\\nAdd more y-axis values?\\n\"))\n return x_axis+y_axis", "def plot_countryperskill(data_df, **args):\n name = args.get('name', 'VARIABLE NAME')\n idx = args.get('idx', data_df.index.values)\n order = args.get('order', np.array([9, 0, 1, 2, 3, 4, 5, 6, 8, 7], int))\n dd = args.get('dd', .7) # 3.3\n wdth = args.get('wdth', 8) # 7\n hght = args.get('hght', 4)\n markersize = 60\n target_y = args.get('target_y', 1)\n label_y = args.get('label_y', r'$\\rho$')\n colors14 = args.get('colors14', ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', \\\n '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', \\\n '#cab2d6', '#6a3d9a', '#ffff99', '#b15928', \\\n '#dd1c77', '#8dd3c7'])\n plt.figure(facecolor='w', figsize=(wdth, hght))\n meth_labels = [r'$Lit$', r'$Lit^2$', r'$Lit^3$', r'$Lit^4$', r'$Lit^5$', \\\n r'$Pop$', r'$Pop^2$', r'$Lit^3Pop$', r'$Lit^2Pop$', r'$LitPop$']\n idx = idx[order]\n meth_labels = [meth_labels[i] for i in order]\n # empty plots for legend handlers:\n for i in np.arange(0, len(countries_sel)): # country\n plt.scatter([], [], marker='o', s=markersize, edgecolor='black', linewidth='.4',\\\n c=colors14[i], label=countries[countries_sel[i]])\n plt.legend()\n\n plt.scatter([0, len(idx)+dd], [0.7, 0.7], marker='.', lw=1, c='white') # legendspace\n\n # actual plotting:\n for i in np.arange(0, len(countries_sel)): # country\n for j in np.arange(0, len(idx)):\n # rp - pearson correlation:\n plt.scatter([j], data_df[countries[countries_sel[i]]][idx[j]], marker='o', \\\n s=markersize, edgecolor='black', linewidth='.4',\\\n alpha=1., c=colors14[i], zorder=j+10)\n if not target_y == 'none':\n plt.plot([0, j], [target_y, target_y], c='#d3d3d3', lw=5, ls='-', zorder=1)\n\n plt.xticks(np.arange(0, len(idx)), meth_labels, color='black', rotation=30)\n plt.grid(axis='y')\n # plt.xlabel('Method')\n plt.ylabel(label_y)\n plt.title(name)\n\n plt.savefig(os.path.join(output_path, experiment_name + '_' + 'allcountries_perScore_v4_' + name + '.pdf'),\\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n plt.show()", "def plot_individual_tm(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n print str(item[\"path_id\"])\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n fig_title = yprop + item[\"cation_type\"] # Plot by cation\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111)\n ax.scatter(x,y, s=70, zorder=2, color=tm_color_dict[item[\"tm_type\"][0]], linewidths=2.5, edgecolors='black',\n label=item[\"tm_type\"][0])\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]], linestyle='dashed')\n else:\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([7,22])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def make_labels(painting):\n labels = {}\n for dcTitleLang, dcTitle in \\\n painting['object']['proxies'][0]['dcTitle'].iteritems():\n labels[dcTitleLang] = {'language': dcTitleLang, 'value': dcTitle[0]}\n return labels", "def plot_collective(xdict, ydict, xprop, yprop, documents):\n x_ion = {\"Mg\": [], \"Ca\": [], \"Zn\": [], \"Li\": [], \"Na\": []}\n y_ion = {\"Mg\": [], \"Ca\": [], \"Zn\": [], \"Li\": [], \"Na\": []}\n for item in documents:\n if item[\"path_id\"][-3:] == \"001\":\n x_ion[item[\"cation_type\"]].append(xdict[item[\"path_id\"]])\n y_ion[item[\"cation_type\"]].append(ydict[item[\"path_id\"]])\n fig = plt.figure(figsize=(6,6), dpi=plotting_dpi)\n ax = fig.add_subplot(111)\n for ion in [\"Mg\", \"Ca\", \"Zn\", \"Li\", \"Na\"]:\n ax.scatter(x_ion[ion], y_ion[ion], s=70, zorder=2, color=color_dict[ion], linewidths=2.5, edgecolors='black',\n label=ion)\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n \n # # Plot lines for fitting, if useful\n # x2 = np.arange(-700, 3300, 1)\n # ax.plot(x2, x2)\n \n # # For setting axis boundaries\n # ax.set_xlim([-700, 3500])\n # ax.set_ylim([0,100])\n \n # Plot display settings\n ax.set_xlabel(xlabel, fontsize=24)\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size':10})\n # plt.legend(loc='best')\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.show()", "def visualize_stats(diction, plot_image_name, wordcloud_image_name, plot_title, path):\n\n # sort dictionary by values\n sorted_dict = OrderedDict(sorted(diction.items(), key=lambda t: t[1]))\n # get 20 first key-value pairs of sorted dict\n topdict = dict(list(sorted_dict.items())[-20:])\n\n # make horizontal-bar plots\n sns.set_style(\"darkgrid\")\n ax = plt.figure().gca()\n ax.yaxis.grid(False)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.barh(range(len(topdict)), list(\n topdict.values()), align='center')\n plt.yticks(range(len(topdict)), list(topdict.keys()))\n plt.xlabel('Frequency')\n plt.title(plot_title)\n # save figure to an image\n plt.savefig(path + plot_image_name, bbox_inches=\"tight\")\n plt.close()\n\n # make word clouds (maximum 100 words)\n wc = WordCloud(width=900, height=600, max_words=100, relative_scaling=1,\n normalize_plurals=False, background_color='white').generate_from_frequencies(diction)\n plt.imshow(wc)\n plt.axis(\"off\")\n # save cloud to an image\n wc.to_file(path + wordcloud_image_name)\n plt.close()", "def plot_topic_words(args, k, word_list):\n\n fig, (axis) = plt.subplots(\n # change nrows and ncols if more plots\n nrows=1, ncols=1, figsize=(18, 9), dpi=80, facecolor=\"w\", edgecolor=\"k\"\n )\n ticks = [str(x) for x in timelist]\n\n tokens = [vocab.index(w) for w in word_list]\n # betas has one weight (?) per time slice for every token\n betas = [beta[k, :, x] for x in tokens]\n for i, comp in enumerate(betas):\n # comp is the list of weights for each slice\n axis.plot(\n comp, label=word_list[i], lw=2, linestyle=\"--\", marker=\"o\", markersize=4\n )\n axis.legend(frameon=False, fontsize=14)\n axis.set_xticks(np.arange(T))\n axis.set_xticklabels(timelist, fontsize=14)\n axis.set_title('Topic: ' + str(k), fontsize=20)\n axis.set_xlabel(xlabel, fontsize=16)\n axis.set_ylabel(\"Word Probability\", fontsize=16)\n fig.tight_layout()\n\n # Save plot to subdirectory in results directory\n sub_dir = os.path.join(\"word_evolutions\", os.path.basename(args.beta_file).split(\"_beta.mat\")[0])\n fig_path = os.path.join(os.path.dirname(args.beta_file), sub_dir, str(k) + \"_word_evolution.png\")\n # Make directory if it doesn't exist\n if not os.path.exists(os.path.dirname(fig_path)): os.makedirs(os.path.dirname(fig_path))\n plt.savefig(fig_path)\n plt.close()\n # plt.show()\n print(\"Figure saved to\", fig_path)", "def plot_lev(od):\r\n plt.bar(*zip(*od.items()))\r\n plt.title(\"Levenshtein distance distribution in the pre-pandemic period\")\r\n plt.xlabel(\"Levenshtein distance\")\r\n plt.ylabel(\"Number of pairs of edits\")\r\n plt.savefig('RQ2_lavenshtein_dis(Figure 4.8).png',dpi=500)", "def plot_genre_and_word_count(df):\n plotting_helper_method('word_count', 'genre', df)\n\n plt.title('Word count pr. genre')\n plt.xlabel('Word Count')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/word_count_plot')", "def plot_wc(diction, filename):\n assert isinstance(diction, dict)\n assert isinstance(filename, str)\n #word_could_dict=Counter(g)\n custom_mask = np.array(Image.open(\"book.png\"))\n wordcloud = WordCloud(background_color=\"white\",\n #mode=\"RGBA\",\n #colormap='Dark2',\n #colormap='RdBu',\n colormap='BrBG',\n collocations=False,\n mask=custom_mask, contour_width=1,\n contour_color='black',\n width=1200, height=1000,\n max_font_size=80,\n scale=3,\n ).generate_from_frequencies(diction)\n #wc = WordCloud(background_color=\"white\", mask=custom_mask)\n #wc = WordCloud(background_color=\"white\", collocations=False, mask=custom_mask, contour_width=1, contour_color='gray')\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.savefig(\"{}.png\".format(filename))\n plt.show()", "def plot_collective_tm(xdict, ydict, xprop, yprop, documents):\n x_ion = {\"Ti\": [], \"V\": [], \"Fe\": [], \"Cr\": [], \"Mn\": [], \"Co\": [], \"Ni\": [], \"Mo\": []}\n y_ion = {\"Ti\": [], \"V\": [], \"Fe\": [], \"Cr\": [], \"Mn\": [], \"Co\": [], \"Ni\": [], \"Mo\": []}\n for item in documents:\n x_ion[item[\"tm_type\"][0]].append(xdict[item[\"path_id\"]])\n y_ion[item[\"tm_type\"][0]].append(ydict[item[\"path_id\"]])\n fig = plt.figure(figsize=(6,6), dpi=plotting_dpi)\n ax = fig.add_subplot(111)\n for ion in [\"Ti\", \"V\", \"Cr\", \"Mn\", \"Co\", \"Ni\"]:\n ax.scatter(x_ion[ion], y_ion[ion], s=70, zorder=2, color=tm_color_dict[ion], linewidths=2.5, edgecolors='black',\n label=ion)\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n\n # # Plot lines for fitting, if useful\n # x2 = np.arange(-700, 3300, 1)\n # ax.plot(x2, x2)\n\n # # For setting axis boundaries\n # ax.set_xlim([-700, 3500])\n # ax.set_ylim([0,100])\n\n # Plot display settings\n ax.set_xlabel(xlabel, fontsize=24)\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size':10})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.show()", "def show_wordcloud(dictionary, title, min_font = 10):\n wordcloud = WordCloud(min_font_size=min_font).generate_from_frequencies(dictionary)\n plt.figure(figsize = (8, 8), facecolor = None) \n plt.imshow(wordcloud) \n plt.axis(\"off\")\n if title:\n plt.title(title)\n else:\n plt.title(\"Word Cloud\")\n plt.tight_layout(pad = 0) \n\n plt.show()", "def plot_avgs(li,arg):\n key = {'Word':1,'Paragraph':2}\n n_groups = len(article_list)\n en = []\n simple = []\n for sub_li in li:\n en.append(sub_li[1][key[arg]])\n simple.append(sub_li[2][key[arg]])\n\n fig, ax = plt.subplots()\n index = np.arange(n_groups)\n bar_width = 0.35\n\n rects1 = plt.bar(index, en, bar_width, alpha = 1, color = 'b', label = 'English')\n rects2 = plt.bar(index + bar_width, simple, bar_width, alpha = 1, color = 'r', label = 'Simple English')\n\n plt.xlabel('Article')\n plt.ylabel('Average Word Length')\n plt.title('Average ' + arg + ' Length of Simple English and English')\n plt.xticks(index + bar_width, article_list)\n plt.legend()\n\n plt.tight_layout()\n plt.show()", "def plot_countries(self):\n import matplotlib.pyplot as plt\n import seaborn as sns\n from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n ###\n #check if internships are available in the given file\n ###\n to_read = str(self.exchanged_offers_filepath.get())\n if not os.path.isfile(to_read):\n tkMessageBox.showwarning(title=\"File doesn't exist\",message=\"The filename or the location you entered does not exist!\")\n return None\n else:\n self.exchanged = pd.read_csv(to_read,sep=',',usecols=['domestic offer code','foreign offer code','country','field','min duration','max duration'])\n \n if self.exchanged.empty:\n tkMessageBox.showwarning(title=\"No available data\",\n message=\"No exchanged offers are available in the given file! Add some offers first and try again later\")\n return None\n else:\n ###\n #use pandas functionalities for the plots\n ### \n frequency = pd.DataFrame() \n for country in self.exchanged['country'].unique():\n frequency[country] = [len(self.check_country(self.exchanged,country))]\n frequency = frequency.transpose()\n frequency.columns=['values']\n \n ###\n #making figure\n ###\n fig, ax = plt.subplots(figsize=(4,14))\n frequency.sort_values(by='values').plot(ax=ax,kind='barh',figsize=(4,10))\n ax.tick_params(axis='both', labelsize=16)\n fig.tight_layout()\n\n ###\n #show figure in new tkinter window\n ###\n figure_window_2 = tk.Toplevel()\n figure_window_2.title('Figure')\n \n ###\n #create label to put figure in\n ###\n figure_canvas = FigureCanvasTkAgg(fig,master=figure_window_2)\n figure_canvas.get_tk_widget().grid(column=0,row=0)", "def plot_data(filename):\n\twordlist,freqlist = most_common(filename,10)\n\tplt.plot(freqlist)\n\tplt.ylabel('frequency of words')\n\tplt.xlabel(wordlist)\n\tplt.show()", "def plot_data(self):", "def _plot_dict_line(d, label=None):\n xvals, yvals = _dict2lists(d)\n if label:\n pylab.plot(xvals, yvals, label=label)\n else:\n pylab.plot(xvals, yvals)", "def plot_PCA():\n X, languages = prepare_data_matrix()\n #print(X)\n eigenvectors, eigenvalues=power_iteration_two_components(X)\n explain = explained_variance_ratio(X, eigenvectors, eigenvalues)\n X=project_to_eigenvectors(X,eigenvectors)\n\n #print(X)\n plt.title('Explained variance: %.3f' % explain)\n plt.scatter(X[:,0], X[:,1])\n for i in range(len(X)):\n plt.text(X[i,0], X[i,1], languages[i][:3])\n plt.show()", "def main():\r\n wordfile=input(\"Enter word file: \")\r\n year=int(input(\"Enter year: \"))\r\n yrlist=printedWords(wordData.readWordFile(wordfile))\r\n total=wordsForYear(year,yrlist)\r\n print(\"Total printed words in\",year,\":\",total)\r\n import simplePlot\r\n labels = 'Year', 'Total Words'\r\n plot = simplePlot.plot2D('Number of printed words over time', labels)\r\n for yc in yrlist:\r\n point = yc.year, yc.count\r\n plot.addPoint(point)\r\n plot.display()", "def scatter_plot(dictionary, variable1, variable2): \r\n plt.clf() # Deletes the previous plot \r\n plt.scatter(dictionary[variable1], dictionary[variable2], color='purple')\r\n plt.title('Scatter plot between ' + variable1 + \" and \" + variable2)\r\n plt.xlabel(variable1)\r\n plt.ylabel(variable2)\r\n plt.savefig(variable1 + \" and \" + variable2)", "def continent_data_le(data):\n data_1997 = data[data.year == 1997]\n europe_1997 = data_1997[data_1997.continent == 'Europe']\n america_1997 = data_1997[data_1997.continent == 'Americas']\n\n plt.subplot(2, 1, 1)\n plt.title('Life Expectancy')\n plt.hist(europe_1997.lifeExpectancy)\n plt.ylabel('Europe ')\n\n plt.subplot(2, 1, 2)\n plt.hist(america_1997.lifeExpectancy)\n plt.ylabel('America')\n\n plt.show()", "def plot_individual(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n # fig_title = yprop + item[\"cation_type\"] # Plot by cation\n fig_title = yprop # All together\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111) \n ax.scatter(x,y, s=70, zorder=2, color=color_dict[item[\"cation_type\"]], linewidths=2.5, edgecolors='black')\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dashed')\n elif item[\"path_id\"][-3:] == \"003\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dotted')\n else:\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([0,100])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def plot_single(\n data_dict: dict,\n keys: str,\n x_ticks: List[str] = ['2015', '2016', '2017', '2018', '2019', '2020'],\n show_preprint: bool = False,\n title_text: str = '',\n figpath: str = 'comparison_plot.pdf',\n logscale=False,\n) -> None:\n\n sns.set_palette(sns.color_palette('colorblind', 10))\n plt.rcParams.update({'hatch.color': 'w'})\n plt.rcParams['figure.facecolor'] = 'white'\n plt.figure(figsize=(8, 5))\n\n arxiv, biorxiv, pubmed, medrxiv, chemrxiv, preprint = [], [], [], [], [], []\n\n for key in keys:\n try:\n arxiv.append(data_dict[key]['arxiv'])\n biorxiv.append(data_dict[key]['biorxiv'])\n medrxiv.append(data_dict[key]['medrxiv'])\n chemrxiv.append(data_dict[key]['chemrxiv'])\n pubmed.append(data_dict[key]['pubmed'])\n except KeyError:\n raise KeyError(\n f'Did not find all DBs for {key}, only found {data_dict[key].keys()}'\n )\n preprint.append(arxiv[-1] + biorxiv[-1] + medrxiv[-1] + chemrxiv[-1])\n\n ind = np.arange(len(arxiv[0])) # the x locations for the groups\n width = [0.75] * len(ind) # the width of the bars: can also be len(x) sequence\n fnc = np.log10 if logscale else np.copy\n\n plts = []\n legend_plts = []\n if show_preprint:\n bars = [pubmed, preprint]\n legend_platform = ['PubMed', 'Preprint']\n if logscale:\n sums = np.array(pubmed) + np.array(preprint)\n logsums = np.log10(sums)\n bars = [pubmed * logsums / sums, preprint * logsums / sums]\n\n else:\n bars = [pubmed, arxiv, biorxiv, chemrxiv, medrxiv]\n legend_platform = ['PubMed', 'ArXiv', 'BiorXiv', 'ChemRxiv', 'MedRxiv']\n if logscale:\n sums = (\n np.array(pubmed)\n + np.array(arxiv)\n + np.array(biorxiv)\n + np.array(chemrxiv)\n + np.array(medrxiv)\n )\n logsums = np.log10s(sums)\n bars = [\n pubmed * logsums / sums,\n arxiv * logsums / sums,\n biorxiv * logsums / sums,\n chemrxiv * logsums / sums,\n medrxiv * logsums / sums,\n ]\n for idx in range(len(keys)):\n bottom = 0\n\n for bidx, b in enumerate(bars):\n if idx == 0:\n p = plt.bar(\n ind,\n b[idx],\n width,\n linewidth=1,\n edgecolor='k',\n bottom=bottom,\n )\n else:\n p = plt.bar(\n ind,\n b[idx],\n width,\n color=next(iter(plts[bidx])).get_facecolor(),\n linewidth=1,\n edgecolor='k',\n bottom=bottom,\n )\n\n bottom += b[idx]\n plts.append(p)\n legend_plts.append(\n plt.bar(ind, np.zeros((len(ind),)), color='k', bottom=bottom)\n )\n\n plt.ylabel('Counts', size=17) if not logscale else plt.ylabel(\n 'Counts (log scale)', size=17\n )\n plt.xlabel('Years', size=17)\n plt.title(title_text, size=17)\n # Customize minor tick labels\n\n plt.xticks(ind, x_ticks, size=14)\n ymax = plt.gca().get_ylim()[1]\n if logscale:\n yticks = np.arange(1, ymax).astype(int)\n plt.yticks(yticks, np.power(10, yticks))\n\n plt.tick_params(axis='y', labelsize=17)\n\n plt.legend(\n legend_platform,\n prop={'size': 14},\n loc='upper left',\n title='Platform:',\n title_fontsize=17,\n ncol=1,\n )\n\n get_step_size = lambda x: round(x / 10, -math.floor(math.log10(x)) + 1)\n ymax = plt.gca().get_ylim()[1]\n\n for y_step in plt.yticks()[0]:\n plt.hlines(y_step, xmax=10, xmin=-1, color='black', linewidth=0.1)\n plt.xlim([-0.5, len(ind)])\n plt.ylim([0, ymax * 1.02])\n\n plt.tight_layout()\n plt.savefig(figpath)\n plt.show()", "def _put_labels_in_chr(self, languagename, langs, curr_lang):\n chrdict = self._chrdict\n varlist = self._varlist\n vlblist = self._vlblist\n lbllist = self._lbllist\n \n old_varlab_key = \"_lang_v_\" + curr_lang\n old_vallab_key = \"_lang_l_\" + curr_lang\n \n # change _lang_c and _lang_list, \n # and put data_label in chrdict if non-empty\n if \"_dta\" not in chrdict:\n chrdict[\"_dta\"] = {}\n dta_dict = chrdict[\"_dta\"]\n dta_dict[\"_lang_c\"] = languagename\n dta_dict[\"_lang_list\"] = \" \".join(langs) + \" \" + languagename\n if self._data_label != '':\n dta_dict[old_varlab_key] = self._data_label\n \n # put current variable and value labels in chrdict\n for varname, i in zip(varlist, range(self._nvar)):\n varlab = vlblist[i]\n vallab = lbllist[i]\n \n if varlab == '' and vallab == '': # then nothing to store\n continue\n \n if varname not in chrdict:\n chrdict[varname] = {}\n \n var_dict = chrdict[varname]\n \n # store current if non-empty\n if varlab != '': var_dict[old_varlab_key] = varlab\n if vallab != '': var_dict[old_vallab_key] = vallab", "def plot_ablation_losses():\n\n path1 = os.path.join(path_to_here, '../data/landscape_visualizations/DMSO/original/losses00-54.pickle')\n file = open(path1, 'rb')\n losses_array_1 = pickle.load(file)\n losses_array_1 = losses_array_1[1:, :]\n\n path2 = os.path.join(path_to_here, '../data/landscape_visualizations/DMSO/D_t/losses00-44.pickle')\n file = open(path2, 'rb')\n losses_array_2 = pickle.load(file)\n losses_array_2 = losses_array_2\n\n path3 = os.path.join(path_to_here, '../data/landscape_visualizations/DMSO/D_x_y/losses00-44.pickle')\n file = open(path3, 'rb')\n losses_array_3 = pickle.load(file)\n losses_array_3 = losses_array_3\n\n path4 = os.path.join(path_to_here, '../data/landscape_visualizations/DMSO/homog/losses00-76.pickle')\n file = open(path4, 'rb')\n losses_array_4 = pickle.load(file)\n losses_array_4 = losses_array_4[1:, :]\n\n paths = [path1, path2, path3, path4]\n\n\n\n\n # 'pdf', 'BC', 'pde', 'total', 'norm'\n fig = plt.figure(figsize = (2.1, 1.5))\n\n\n for path, losses_array, label in zip(paths, [losses_array_1, losses_array_2, losses_array_3, losses_array_4], ['D(x, y, t)', 'D(t)', 'D(x, y)', 'D']):\n\n print('shape', losses_array.shape)\n\n losses_base = os.path.basename(path)\n total_losses = losses_array[3, :] # total losses are idx 3 for these runs\n idxs = []\n means = []\n for idx in range(len(total_losses)-200):\n num_half_hrs = int(losses_base[6:8]) + idx*(int(losses_base[9:11])-int(losses_base[6:8]))/len(total_losses)\n if num_half_hrs < 55:\n idxs.append(num_half_hrs)\n means.append(np.mean(total_losses[idx:idx+200]))\n\n\n plt.scatter([i/2 for i in idxs], np.log10(means), s = 0.1, label = label)\n\n plt.ylabel(r'$log_{10}L_{total}$', fontsize = 6, labelpad = 1)\n plt.xlabel('Hours trained', fontsize = 6, labelpad = 1)\n plt.tick_params(axis = 'both', labelsize = 6)\n plt.tight_layout()\n plt.legend(fontsize = 6)\n\n\n\n plt.savefig(path_to_here+'/../outputs/ablation.png', dpi = 1200)", "def plot_comparison(\n data_dict: dict,\n keys: List[str],\n x_ticks: List[str] = ['2015', '2016', '2017', '2018', '2019', '2020'],\n show_preprint: bool = False,\n title_text: str = '',\n keyword_text=None,\n figpath: str = 'comparison_plot.pdf',\n) -> None:\n\n sns.set_palette(sns.color_palette(\"colorblind\", 10))\n plt.rcParams.update({'hatch.color': 'w'})\n plt.rcParams['figure.facecolor'] = 'white'\n plt.figure(figsize=(8, 5))\n\n arxiv, biorxiv, pubmed, medrxiv, chemrxiv, preprint = [], [], [], [], [], []\n\n for key in keys:\n try:\n arxiv.append(data_dict[key]['arxiv'])\n biorxiv.append(data_dict[key]['biorxiv'])\n medrxiv.append(data_dict[key]['medrxiv'])\n chemrxiv.append(data_dict[key]['chemrxiv'])\n pubmed.append(data_dict[key]['pubmed'])\n except KeyError:\n raise KeyError(\n f'Did not find all DBs for {key}, only found {data_dict[key].keys()}'\n )\n preprint.append(arxiv[-1] + biorxiv[-1] + medrxiv[-1] + chemrxiv[-1])\n\n ind = np.arange(len(arxiv[0])) # the x locations for the groups\n width = [0.2] * len(ind) # the width of the bars: can also be len(x) sequence\n if len(keys) == 2:\n pos = [-0.2, 0.2]\n elif len(keys) == 3:\n pos = [-0.3, 0.0, 0.3]\n\n plts = []\n legend_plts = []\n patterns = ('|||', 'oo', 'xx', '..', '**')\n if show_preprint:\n bars = [pubmed, preprint]\n legend_platform = ['PubMed', 'Preprint']\n else:\n bars = [pubmed, arxiv, biorxiv, chemrxiv, medrxiv]\n legend_platform = ['PubMed', 'ArXiv', 'BiorXiv', 'ChemRxiv', 'MedRxiv']\n for idx in range(len(keys)):\n bottom = 0\n\n for bidx, b in enumerate(bars):\n if idx == 0:\n p = plt.bar(\n ind + pos[idx],\n b[idx],\n width,\n linewidth=1,\n edgecolor='k',\n bottom=bottom,\n )\n else:\n p = plt.bar(\n ind + pos[idx],\n b[idx],\n width,\n color=next(iter(plts[bidx])).get_facecolor(),\n linewidth=1,\n edgecolor='k',\n bottom=bottom,\n )\n\n bottom += b[idx]\n plts.append(p)\n legend_plts.append(\n plt.bar(ind + pos[idx], np.zeros((len(ind),)), color='k', bottom=bottom)\n )\n\n plt.ylabel('Counts', size=15)\n plt.xlabel('Years', size=15)\n plt.title(f\"Keywords: {title_text}\", size=14)\n # Customize minor tick labels\n plt.xticks(ind, x_ticks, size=10)\n\n legend = plt.legend(\n legend_platform,\n prop={'size': 12},\n loc='upper left',\n title='Platform:',\n title_fontsize=13,\n ncol=1,\n )\n\n # Now set the hatches to not destroy legend\n\n for idx, stackbar in enumerate(plts):\n pidx = int(np.floor(idx / len(bars)))\n for bar in stackbar:\n bar.set_hatch(patterns[pidx])\n\n for idx, stackbar in enumerate(legend_plts):\n for bar in stackbar:\n bar.set_hatch(patterns[idx])\n\n if not keyword_text:\n keyword_text = [''] * len(keys)\n\n plt.legend(\n legend_plts,\n keyword_text,\n loc='upper center',\n prop={'size': 12},\n title='Keywords (X):',\n title_fontsize=13,\n )\n plt.gca().add_artist(legend)\n\n get_step_size = lambda x: round(x / 10, -math.floor(math.log10(x)) + 1)\n ymax = plt.gca().get_ylim()[1]\n step_size = np.clip(get_step_size(ymax), 5, 1000)\n y_steps = np.arange(0, ymax, step_size)\n\n for y_step in y_steps:\n plt.hlines(y_step, xmax=10, xmin=-1, color='black', linewidth=0.1)\n plt.xlim([-0.5, len(ind)])\n plt.ylim([0, ymax * 1.02])\n\n plt.tight_layout()\n plt.savefig(figpath)\n plt.show()", "def plot_dict_bar(aDictionary):\n\t# Convert strings to float\n\tfor key in aDictionary:\n\t\taDictionary[key] = float(aDictionary[key])\n\t\t\n\t# Plot the result\n\tplt.bar(range(len(aDictionary)), aDictionary.values(), align='center')\n\tplt.xticks(range(len(aDictionary)), aDictionary.keys(), rotation=90)\n\t\n\tplt.show()", "def show_graph(d:dict):\n x = []\n y = []\n for key, value in d.items():\n x.append(str(key))\n y.append(value)\n\n x_pos = [i for i, _ in enumerate(x)]\n plt.figure()\n plt.bar(x_pos, y, color='green')\n plt.xlabel(\"Size\")\n plt.ylabel(\"Number of images\")\n plt.title(\"Count by size\")\n plt.xticks(x_pos, x)", "def display_eng_word():\n en_word = rand_word[\"English\"] # Grabs the English word of the current word\n canvas.itemconfig(card_title, text=\"English\", fill=\"white\") # Change screen title to English\n canvas.itemconfig(card_word, text=en_word, fill=\"white\") # Display the english word of the current displaying french word\n canvas.itemconfig(canvas_image, image=back_image) # Changes the background", "def plot(self, *names):\r\n for name in names:\r\n if name in self.__obs.keys():\r\n list_obs = self.__obs[name]\r\n if not isinstance(list_obs[0], matrix):\r\n fig = plt.figure()\r\n plt.plot(self.__obs[name])\r\n else:\r\n fig = plt.figure()\r\n for i in range(list_obs[0].size):\r\n plt.plot([float(obs[i]) for obs in list_obs], label=\"Dimension {0}\".format(i))\r\n plt.legend()\r\n plt.ylabel(name)\r\n plt.show()\r\n else:\r\n for sous_objet in self.__sous_objets:\r\n if re.match((sous_objet+\"?\").encode('string-escape'), name.lower()):\r\n self.__dict__[sous_objet].plot(name)", "def display_gender_freq(d, title):\n he_val = []\n she_val = []\n authors = []\n\n for entry in d:\n authors.append(entry)\n he_val.append(d[entry][0])\n she_val.append(d[entry][1])\n\n fig, ax = plt.subplots()\n plt.ylim(0, 1)\n\n index = np.arange(len(d.keys()))\n bar_width = 0.35\n opacity = 0.4\n\n he_val = tuple(he_val)\n she_val = tuple(she_val)\n authors = tuple(authors)\n\n rects1 = ax.bar(index, he_val, bar_width, alpha=opacity, color='b', label='He')\n rects2 = ax.bar(index + bar_width, she_val, bar_width, alpha=opacity, color='r', label='She')\n\n ax.set_xlabel('Authors')\n ax.set_ylabel('Frequency')\n ax.set_title('Gendered Pronouns by Author')\n ax.set_xticks(index + bar_width / 2)\n plt.xticks(fontsize=8, rotation=90)\n ax.set_xticklabels(authors)\n ax.legend()\n\n fig.tight_layout()\n filepng = \"visualizations/he_she_freq\" + title + \".png\"\n filepdf = \"visualizations/he_she_freq\" + title + \".pdf\"\n plt.savefig(filepng, bbox_inches='tight')\n plt.savefig(filepdf, bbox_inches='tight')", "def oneplot(moviedict, toplotdict, figw, figh, figdpi, fontsz, border, ylabel, ylim, time, ymin, \n lw):\n print(toplotdict.keys())\n \n fontv = mpl.font_manager.FontProperties()\n # Uncomment line below to set the font to verdana; the default matplotlib font is very \n # similar (just slightly narrower).\n fontv = mpl.font_manager.FontProperties(fname='/usr/share/matplotlib/mpl-data/fonts/ttf/arial.ttf')\n fontv.set_size(fontsz)\n \n fonti = mpl.font_manager.FontProperties()\n # Uncomment line below to set the font to verdana; the default matplotlib font is very \n #similar (just slightly narrower).\n fonti = mpl.font_manager.FontProperties(fname='/usr/share/matplotlib/mpl-data/fonts/ttf/ariali.ttf')\n fonti.set_size(fontsz)\n \n fig1 = plt.figure(figsize=(figw, figh), dpi=figdpi, facecolor='w', edgecolor='k')\n \n #Plots data on one graph with parameters specified in the moviedict directory.\n for k, v in moviedict.iteritems():\n print(k)\n cond1, offset, color, inum = v\n xvals = toplotdict[k][0]\n data = toplotdict[k][1] + offset\n condition = cond1\n \n\n plt.plot(xvals, data, color, linewidth=0.75, label=condition)\n print(condition)\n #if k == 'mov_20110113_180524':\n #plt.text(0.5, offset+7, inum+condition, horizontalalignment='left', \n #fontproperties=fontv)\n #else:\n #plt.text(0.5, offset+9, inum+condition, horizontalalignment='left', \n #fontproperties=fontv)\n \n # Plots text labels\n \n if k == 'mov_20101130_200030':\n plt.text(0.03, offset+6, inum, horizontalalignment='left', fontproperties=fontv)\n plt.text(0.11, offset+6.5, condition, horizontalalignment='left', \n fontproperties=fonti)\n if k == 'mov_20101130_201605':\n plt.text(0.03, offset+5, inum, horizontalalignment='left', fontproperties=fontv)\n plt.text(0.125, offset+5.5, condition, horizontalalignment='left', \n fontproperties=fonti)\n if k == 'mov_20101213_193258':\n plt.text(0.03, offset+3, inum, horizontalalignment='left', fontproperties=fontv)\n plt.text(0.137, offset+3.5, condition, horizontalalignment='left', \n fontproperties=fonti)\n\n ax = plt.gca()\n\n ## Plots legend.\n #legend = plt.legend()\n ### Manipulates order of the legend entries.\n ##handles, labels = ax.get_legend_handles_labels()\n ##handles2 = handles[0], handles[2], handles[1], handles[3]\n ##labels2 = labels[0], labels[2], labels[1], labels[3]\n ##legend = ax.legend(handles2, labels2, bbox_to_anchor=(0, 0, 1, 1), \n ##transform=plt.gcf().transFigure)\n ### Changes legend font to fontsz.\n #ltext = legend.get_texts()\n #plt.setp(ltext, fontsize=fontsz)\n ### Removes border around the legend.\n #legend.draw_frame(False)\n \n \n #Uncomment lines below to display without top and right borders.\n if border == 'no':\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n pass\n elif loc in ['right','top']:\n spine.set_color('none') # don't draw spine\n else:\n raise ValueError('unknown spine location: %s'%loc)\n \n #Uncomment lines below to display ticks only where there are borders.\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ## Removes tick labels and ticks from yaxis.\n ax.axes.yaxis.set_major_locator(matplotlib.ticker.NullLocator())\n \n # Specifies axis labels and axis tick label sizes.\n plt.xlabel(XLABEL, fontproperties=fontv, labelpad=4)\n plt.ylabel(ylabel, fontproperties=fontv, labelpad=4)\n plt.xticks(fontproperties=fontv)\n plt.yticks(fontproperties=fontv)\n \n # Specifies axis limits.\n plt.axis( [0, time, ymin, ylim])\n \n # Adjusts the space between the plot and the edges of the figure; (0,0) is the lower lefthand corner of the figure.\n fig1.subplots_adjust(top=0.95)\n fig1.subplots_adjust(left=0.15)\n #fig1.subplots_adjust(right=0.95)\n fig1.subplots_adjust(bottom=0.15)", "def plot_skillpercountry(data_df, **args):\n\n name = args.get('name', 'VARIABLE NAME')\n idx = args.get('idx', data_df.index.values)\n dd = args.get('dd', 5.8) # 3.3\n wdth = args.get('wdth', 8) # 7\n hght = args.get('hght', 4)\n markersize = 60\n target_y = args.get('target_y', 1)\n label_y = args.get('label_y', r'$\\rho$')\n meth_labels = [r'$Lit$', r'$Lit^2$', r'$Lit^3$', r'$Lit^4$', r'$Lit^5$', \\\n r'$Pop$', r'$Pop^2$', r'$Lit^3Pop$', r'$Lit^2Pop$', r'$LitPop$']\n\n plt.figure(facecolor='w', figsize=(wdth, hght))\n\n for i in np.arange(0, len(methods_show)):\n if not methods_show[i]:\n markers_list[i] = ''\n else:\n plt.scatter([], [], marker=markers_list[i], lw=1, c=colors3[c3_10[i]], \\\n s=markersize, edgecolor='black', linewidth='.4', label=meth_labels[i])\n plt.legend()\n # legendspace:\n plt.scatter([0, len(idx)+dd], [0.7, 0.7], marker='.', lw=1, c='white')\n\n # actual plotting:\n for i in countries_sel: # country\n for j in np.arange(0, len(idx)):\n # rp - pearson correlation:\n plt.scatter([i], data_df[countries[i]][idx[j]], marker=markers_list[j], \\\n c=colors3[c3_10[j]],\\\n s=markersize, edgecolor='black', linewidth='.5',\\\n alpha=1., zorder=j+10)\n if not target_y == 'none':\n plt.plot([0, i], [target_y, target_y], c='#d3d3d3', lw=5, ls='-', zorder=1)\n\n plt.xticks(countries_sel, [countries[i] for i in countries_sel], color='black')\n plt.grid(axis='y')\n plt.xlabel('Country')\n plt.ylabel(label_y)\n plt.title(name)\n\n plt.savefig(os.path.join(output_path, experiment_name + '_' + 'allcountries_v4_' + name + '.pdf'),\\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n plt.show()", "def plotting(self, figsize=(12, 12), types=['freqs']):\n ax = plt.figure(figsize=figsize)\n if 'freqs' in types:\n count_dict = self.count_freq(types=1)\n plt.title(\n f'Total keys in count_dict: {sum(list(count_dict.values()))}')\n barh = plt.barh(list(count_dict.keys()), list(count_dict.values()), color=[\n np.random.rand(3,) for _ in range(self.categories)])\n for rect in barh:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_height()/2.0, height,\n '%d' % int(height), ha='center', va='bottom')\n\n plt.legend()\n plt.show()", "def plot_embedding(X, y, d, title=None):\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min)\n\n # Plot colors numbers\n plt.figure(figsize=(10,10))\n ax = plt.subplot(111)\n for i in range(X.shape[0]):\n # plot colored number\n plt.text(X[i, 0], X[i, 1], str(y[i]),\n color=plt.cm.bwr(d[i] / 1.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)", "def plot_embedding(X, y, d, title=None):\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min)\n\n # Plot colors numbers\n plt.figure(figsize=(10, 10))\n plt.subplot(111)\n for i in range(X.shape[0]):\n # plot colored number\n plt.text(X[i, 0], X[i, 1], str(y[i]),\n color=plt.cm.bwr(d[i] / 1.),\n fontdict={'weight': 'bold', 'size': 9})\n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)", "def plot_embeddings(M_reduced, word2Ind, words):\n\n # ------------------\n # Write your implementation here.\n x_coords = M_reduced[:, 0]\n y_coords = M_reduced[:, 1]\n \n for word in words:\n idx = word2Ind[word]\n embedding = M_reduced[idx]\n x = embedding[0]\n y = embedding[1]\n \n plt.scatter(x, y, marker='x', color='red')\n plt.text(x, y, word, fontsize=9)\n plt.show()\n\n # ------------------", "def plot(self):\n return self.graph(edge_labels='words_in_out').plot()", "def base_plot_keys(self):\r\n plot_keys = [\"loss\", \"l1_loss\", \"mse_loss\", \"dur_loss\"]\r\n \r\n if self.use_fe_condition:\r\n plot_keys += [\"pitch_loss\", \"energy_loss\"]\r\n return plot_keys", "def plot_1d (cities):\n # find the first selected city\n city = None\n for c in cities:\n if 'selected' in c:\n city = c\n if not city:\n print('No city selected for plot_1d')\n return\n\n points_per_day = ('morn', 'day', 'eve', 'night')\n nb_per_day = len(points_per_day)\n \n T = [ measure['temp'][key] - KELVIN\n for measure in city['data']\n for key in points_per_day ]\n X = range(1,len(T)+1)\n \n bar_plot = plot.bar (X, T, 0.1)\n\n plot.ylabel(u'ªC')\n plot.title(u'Temperatures in {}'.format(xpath(city, 'city/name')))\n\n D = [ date_repr(measure['dt']) for measure in city['data'] ]\n Dx = [ 4*n+2 for n in range(len(city['data']))]\n plot.xticks(Dx, D, rotation='vertical')\n #plot.yticks(np.arange(0,81,10))\n #plot.legend( (p1[0], p2[0]), ('Men', 'Women') )\n\n plot.show()", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()", "def visualization(positive_weights_dict, negative_weights_dict, l2_list):\n plt.figure()\n cmap_positive = plt.get_cmap('Reds')\n cmap_negative = plt.get_cmap('Blues')\n xx = l2_list\n plt.plot(xx, [0.]*len(xx), '--', lw=1, color='k')\n i, j = 0, 0\n for key, value in positive_weights_dict.items():\n color = cmap_positive(0.8*((i+1)/(5*1.2)+0.15))\n plt.plot(l2_list, value, '-', label=key, linewidth=4.0, color=color)\n i += 1\n for key, value in negative_weights_dict.items():\n color = cmap_negative(0.8*((j+1)/(5*1.2)+0.15))\n plt.plot(l2_list, value, '-', label=key, linewidth=4.0, color=color)\n j += 1\n \n plt.legend(loc='best', ncol=3, prop={'size':16}, columnspacing=0.5)\n plt.axis([1, 1e5, -1, 2])\n plt.title('Coefficient path')\n plt.xlabel('L2 penalty ($\\lambda$)')\n plt.ylabel('Coefficient value')\n plt.xscale('log')\n plt.rcParams.update({'font.size': 18})\n plt.tight_layout()", "def plotify(title, data, key):\n\n color_map = {\n 'totale_positivi' : 'mediumvioletred',\n 'totale_casi' : 'orangered'\n }\n\n # create a new figure\n plt.figure()\n\n dates = list()\n values = list()\n\n for d in data:\n dates.append(f\"{d['data']:%d-%b}\")\n values.append(int(d[key]))\n\n\n\n # Add title and axes names\n plt.title(title)\n # plt.xlabel('data')\n # plt.ylabel(key)\n\n\n plt.plot(dates, values, marker='o', color=color_map[key], linewidth=3)\n plt.xticks(rotation=45)\n bottom, top = plt.ylim()\n plt.ylim(bottom=bottom, top=top)\n plt.grid()\n \n # prettify y values\n current_values = plt.gca().get_yticks()\n plt.gca().set_yticklabels(['{:n}'.format(int(x)) for x in current_values])\n\n # responsive layout\n plt.tight_layout()\n\n\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n\n ### Release memory\n # Clear the current axes.\n plt.cla() \n # Clear the current figure.\n plt.clf() \n # Closes all the figure windows.\n plt.close('all') \n # plt.close(fig)\n gc.collect()\n\n return buf", "def plot_country(name, case):\n click.echo(click.style(\n \"Generating Plot....\", fg='cyan', bold='true'))\n plot_time_series.TimeSeriesPloTs.plot_country(case, name)\n click.echo(click.style(\n \"Done....\", fg='green', bold='true'))", "def main():\n\n import codecs\n\n file_path = '../sample_texts/hi-Deva.txt'\n with codecs.open(file_path, 'r', encoding='UTF-8') as input_file:\n sample_text = input_file.read()\n create_png(sample_text.strip(), 'hindi.png',\n family='Noto Sans Devanagari', language='hi', rtl=False)\n\n file_path = '../sample_texts/ar-Arab.txt'\n with codecs.open(file_path, 'r', encoding='UTF-8') as input_file:\n sample_text = input_file.read()\n create_png(sample_text.strip(), 'arabic.png',\n family='Noto Naskh Arabic', language='ar', rtl=True)\n\n file_path = '../sample_texts/mn-Mong.txt'\n with codecs.open(file_path, 'r', encoding='UTF-8') as input_file:\n sample_text = input_file.read()\n create_png(sample_text.strip(), 'mong.png',\n family='Noto Sans Mongolian', language='mn', vertical=True, rtl=False)", "def ploter(self):\n if len(self.dataset[self.first_title]) != 2:\n print('plot is only avilable for two features')\n return\n x_axis = []\n y_axis = []\n for title in self.dataset:\n x_axis.append(self.dataset[title][0])\n y_axis.append(self.dataset[title][1])\n plt.plot(x_axis, y_axis, 'o')\n plt.show()", "def plot_loss(loss_values_dict, save_name):\n fig, ax = plt.subplots(figsize=(10, 10))\n # Add each method to the plot\n for (method_name, loss_val_array) in loss_values_dict.items():\n print(method_name, len(loss_val_array))\n ax.plot(range(len(loss_val_array)), loss_val_array, label=method_name)\n ax.legend(loc='upper right')\n plt.xlabel('iteration')\n plt.ylabel('loss')\n plt.title('Grad Descent in Hyperbolic Space')\n plt.savefig(save_name)", "def make_plot(self):\n self.ax[0].set_ylabel(r'$C_{{\\ell}}^{{\\kappa\\kappa}}$')\n self.ax[1].set_ylabel('$\\mathrm{rel. dev. [\\%]$}')\n self.ax[1].set_xlabel(r'$\\ell$')", "def eda_plot():\n\n df1 = pd.read_csv('eda_malware.csv')\n df2 = pd.read_csv('eda_random.csv')\n df3 = pd.read_csv('eda_popular.csv')\n\n df = pd.concat([df1, df2, df3], ignore_index=True)\n df['label'].replace([0,1],['Benign','Malware'],inplace=True)\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB', '#97C1A9']\n # b vs. m: node types counts\n f1 = pd.crosstab(df['label'], df['node_types_counts'])\n\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 5768], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types.png')\n\n # for a better look, limit type 5 malware to 2k counts only\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 2000], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types1.png')\n\n # node types\n # for malware: extract node types info for node types counts > 5, and sum up each types counts\n node_types = df[(df['label'] == 'Malware') & (df['node_types_counts'] >= 5)]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [215060, 2823059, 3135725, 5641356, 10679709, 16547701]\n labels = ['Others', 'static,Node', 'public,static,Node', 'Node', 'external,Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Malware: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_m.png')\n\n # for benign: extract node types info for node types counts, and sum up each types counts\n node_types = df[(df['label'] == 'Benign')]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [77967, 2892033, 2964924, 5287258, 6478196, 20364339]\n labels = ['Others', 'staticNode', 'public,staticNode', 'external,Node', 'Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Benign: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_b.png')\n\n # benign vs malware: counts\n sizes = [8435, 802]\n labels = ['Benign', 'Malware']\n\n colors = ['#EAB6AB','#D9E6F3']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Number of Benign vs. Malware', y=1.05)\n\n plt.show()\n fig1.savefig('bm_counts.png')\n\n # number of edges vs number of nodes\n groups = df.groupby('label')\n colors = ['#FFAEA5', '#A2E1DB']\n\n # Plot\n fig, ax = plt.subplots()\n ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n for name, group in groups:\n if name == 'Benign':\n c = colors[0]\n else:\n c = colors[1]\n ax.plot(group.number_edges, group.number_nodes, marker='o', linestyle='', ms=4, label=name, c=c)\n ax.legend()\n ax.set_xlabel('Number of Edges')\n ax.set_ylabel('Number of Nodes')\n ax.set_title('Benign & Malware: Number of Edges vs. Number of Nodes', y=1.05)\n\n plt.show()\n fig.savefig('bm_edges_nodes.png')", "def worldplot(data):\n \n plt.rcParams['font.size'] = 18\n world_df= geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));\n\n world_df = world_df[world_df[\"iso_a3\"].isin(data[\"recipient_iso3\"])];\n\n #world_2df.[\"OFa_all_con\"] = np.nan;\n #world_2df.sort_values(by=\"iso_a3\").head()\n for i in world_df.index:\n for j in data.index:\n if world_df.loc[i,\"iso_a3\"] == data.loc[j,\"recipient_iso3\"]:\n world_df.loc[i,\"OFa_all_con\"] = data.loc[j, \"OFa_all_con\"];\n\n\n fig, ax = plt.subplots(1,1, figsize=(22,14))\n ax.axis('off')\n fig.suptitle('Chinese Development Finance', fontsize=25)\n \n world_df.plot(column='OFa_all_con', ax = ax, legend=True, legend_kwds={\"label\":\"\\n Chinese Development Finance in $10 bln.\",\n \"orientation\": \"horizontal\"}, \n missing_kwds={\"color\": \"lightgrey\",\n \"edgecolor\": \"red\",\n \"hatch\": \"///\",\n \"label\": \"Missing values\"});", "def visualize_days():\n\n # grab our parsed data that we parsed earlier\n data_file = parse(MY_FILE, \",\")\n\n counter = Counter(item['DayOfWeek'] for item in data_file)\n\n data_list = [\n counter['Monday'],\n counter['Tuesday'],\n counter['Wednesday'],\n counter['Thursday'],\n counter['Friday'],\n counter['Saturday'],\n counter['Sunday']\n ]\n\n day_tuple = tuple(['Mon','Tues','Wed','Thurs','Fri','Sat','Sun'])\n\n plt.plot(data_list)\n\n # num of ticks needed for our x-axis & assign labels\n plt.xticks(range(len(day_tuple)),day_tuple)\n \n plt.savefig(\"Days.png\")\n plt.clf()", "def create_chart(conf, entries):\r\n serie_index = 0\r\n for serie in conf['series']:\r\n data = []\r\n for entry in entries:\r\n if entry is not None:\r\n data.append(entry.datatolist(str(serie['db'])))\r\n conf['series'][serie_index]['data'] = data\r\n serie_index += 1\r\n \r\n \"\"\" Add PlotBands \"\"\" \r\n plotBands = []\r\n last_entry = len(entries)-1\r\n n = 1\r\n while n < last_entry and\\\r\n entries[n].phase is not None and\\\r\n entries[n] is not None and\\\r\n entries[n].next().phase is not None:\r\n begin = entries[n].dt\r\n phase = entries[n].phase\r\n n += 1\r\n while entries[n] is not None and\\\r\n entries[n].phase is not None and\\\r\n entries[n].phase == phase and\\\r\n n < last_entry:\r\n n += 1\r\n end = entries[n].dt\r\n plotBand = {\r\n 'color': PhaseColor[phase],\r\n 'from': datetime_to_timestamp(begin),\r\n 'to': datetime_to_timestamp(end)\r\n }\r\n plotBands.append(plotBand)\r\n conf['xAxis']['plotBands'] = plotBands\r\n \r\n \"\"\" Add Labels \"\"\" \r\n condition_flag_allumage = '((prec.phase is not None) and (prec.phase is not PHASE_ALLUMAGE))'\r\n condition_next_is_not_maintien = '((next.phase is not None) and (next.phase is not PHASE_MAINTIEN))'\r\n labels = json.loads(json.dumps(ChartLabel)) #make a copy of original object\r\n labels['name'] = 'Labels'\r\n for entry in entries:\r\n if entry is not None and entry.phase is not None:\r\n #Label Allumage \r\n if entry.event is not None:\r\n data = {\r\n \"x\": datetime_to_timestamp(entry.dt),\r\n \"title\": 'Allumage'\r\n }\r\n labels['data'].append(data)\r\n \"\"\"\r\n # Label Combustion \r\n if entry.phase == PHASE_COMBUSTION and\\\r\n entry.prec() is not None and\\\r\n entry.prec().phase is not PHASE_COMBUSTION and\\\r\n entry.all_next_verify_condition(5, condition_next_is_not_maintien):\r\n data = {\r\n \"x\": datetime_to_timestamp(entry.dt),\r\n \"title\": 'Combustion'\r\n }\r\n labels['data'].append(data)\r\n \"\"\"\r\n conf['series'].append(labels)\r\n\r\n \"\"\" Add Subtitle (plotbands legend) \"\"\"\r\n #conf[\"subtitle\"] = ChartLegend\r\n\r\n \"\"\" Add Title (date begin date end) \"\"\"\r\n if len(entries) > 3:\r\n begin = pretty_date(entries[0].dt)\r\n end = pretty_date(entries[len(entries)-1].dt)\r\n #conf[\"title\"][\"text\"] = 'Monitoring Chaudière du {0} au {1}'.format(begin, end)\r\n conf[\"title\"][\"text\"] = 'Monitoring Chaudière'\r\n conf[\"subtitle\"][\"text\"] = ' du {0} au {1}'.format(begin, end)\r\n\r\n else:\r\n conf[\"title\"][\"text\"] = 'Monitoring Chaudière'\r\n\r\n \"\"\" Return new conf \"\"\"\r\n return conf", "def set_labels(self):\n\n if 1 <= self.selected_data <= 2:\n self.plot_select.setLabel(\"left\", \"P (kPa)\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"P (kPa)\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 3:\n self.plot_select.setLabel(\"left\", \"ext\", \"\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"ext\", \"\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 4:\n self.plot_select.setLabel(\"left\", \"U\", \"V\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"U\", \"V\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n # self.plot_simulate.setLabel(\"left\", \"ext\", \"\")\n # self.plot_simulate.setLabel(\"bottom\", \"t\", \"s\")\n\n self.plot_distribution.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_distribution.setLabel(\"bottom\", \"d_p\", \"m\")\n self.plot_distribution.showGrid(y=True)\n\n self.plot_rotatometer.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_rotatometer.setLabel(\"bottom\", \"laimennusvirtaus\")\n self.plot_rotatometer.showGrid(y=True)", "def scatter_plot_players(dict_of_players):\n seasons = [x[2:4] for x in dict_of_players.keys()]\n plt.plot(seasons, dict_of_players.values())\n plt.show()", "def plot_dicts(dict1, dict2, color):\n\tplotlists = prep_for_plot(dict1, dict2)\n\tlist1 = plotlists[0]\n\tlist2 = plotlists[1]\n\tplt.scatter(list1, list2, marker='.', color=color)", "def plot_barplot_language_types(lang_class_prop_over_gen_df, title, file_path, file_name, n_runs, n_batches, n_gens, gen_start, lang_class_baselines_all, lang_class_baselines_fully_expressive, possible_form_lengths):\n\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n\n proportion_column_as_results = dataframe_to_language_stats(lang_class_prop_over_gen_df, n_runs, n_batches, n_gens, possible_form_lengths)\n\n proportion_column_from_start_gen = proportion_column_as_results[:, gen_start:]\n\n proportion_column_from_start_gen = proportion_column_from_start_gen.flatten()\n\n runs_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n for k in range(n_language_classes):\n runs_column_from_start_gen.append(i)\n runs_column_from_start_gen = np.array(runs_column_from_start_gen)\n\n generation_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n for k in range(n_language_classes):\n generation_column_from_start_gen.append(j)\n generation_column_from_start_gen = np.array(generation_column_from_start_gen)\n\n class_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n if n_language_classes == 4:\n class_column_from_start_gen.append('degenerate')\n class_column_from_start_gen.append('holistic')\n class_column_from_start_gen.append('compositional')\n class_column_from_start_gen.append('other')\n elif n_language_classes == 7:\n class_column_from_start_gen.append('D')\n class_column_from_start_gen.append('H')\n class_column_from_start_gen.append('H+Div.')\n class_column_from_start_gen.append('C')\n class_column_from_start_gen.append('C+Red.-part')\n class_column_from_start_gen.append('C+Red.-whole')\n class_column_from_start_gen.append('O')\n\n new_data_dict = {'run': runs_column_from_start_gen,\n 'generation': generation_column_from_start_gen,\n 'proportion': proportion_column_from_start_gen,\n 'class': class_column_from_start_gen}\n\n lang_class_prop_over_gen_df_from_starting_gen = pd.DataFrame(new_data_dict)\n\n if len(possible_form_lengths) == 1:\n palette = sns.color_palette([\"black\", \"red\", \"green\", \"grey\"])\n else:\n palette = sns.color_palette([\"black\",\n sns.color_palette(\"colorblind\")[3],\n sns.color_palette(\"colorblind\")[1],\n sns.color_palette(\"colorblind\")[2],\n sns.color_palette(\"colorblind\")[9],\n sns.color_palette(\"colorblind\")[0],\n sns.color_palette(\"colorblind\")[7]])\n\n sns.barplot(x=\"class\", y=\"proportion\", data=lang_class_prop_over_gen_df_from_starting_gen, palette=palette)\n\n # plt.axhline(y=lang_class_baselines_all[0], xmin=0.0, xmax=0.25, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[1], xmin=0.25, xmax=0.5, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[2], xmin=0.5, xmax=0.75, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[3], xmin=0.75, xmax=1.0, color='k', linestyle='--', linewidth=2)\n #\n # if title == 'Mutual Understanding Only' or title == 'Minimal Effort & Mutual Understanding':\n # plt.axhline(y=lang_class_baselines_fully_expressive[0], xmin=0.25, xmax=0.5, color='0.6', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_fully_expressive[1], xmin=0.5, xmax=0.75, color='0.6', linestyle='--', linewidth=2)\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n # plt.xlabel('Language class')\n plt.xlabel('', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n plt.tight_layout()\n\n if holistic_without_partial_meaning is True:\n plt.savefig(file_path + \"Barplot_lang_types_\" + file_name + \"_burn_in_\" + str(gen_start) + \".png\")\n else:\n plt.savefig(file_path + \"Barplot_lang_types_\" + file_name + \"_burn_in_\" + str(gen_start) + \"_NEW.png\")\n plt.show()", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def get_language_list_gui():\n _ = get_gettext()\n language = {}\n language['connect'] = _(\"Connect\")\n language['ip'] = _(\"IP\")\n language['netmask'] = _(\"Netmask\")\n language['gateway'] = _('Gateway')\n language['dns'] = _('DNS')\n language['use_static_ip'] = _('Use Static IPs')\n language['use_static_dns'] = _('Use Static DNS')\n language['use_encryption'] = _('Use Encryption')\n language['advanced_settings'] = _('Advanced Settings')\n language['wired_network'] = _('Wired Network')\n language['wired_network_instructions'] = _('To connect to a wired network,'\n ' you must create a network profile. To create a network profile, type a'\n ' name that describes this network, and press Add.')\n language['automatic_connect'] = _('Automatically connect to this network')\n language['secured'] = _('Secured')\n language['unsecured'] = _('Unsecured')\n language['channel'] = _('Channel')\n language['preferences'] = _('Preferences')\n language['wpa_supplicant_driver'] = _('WPA Supplicant Driver')\n language['wireless_interface'] = _('Wireless Interface')\n language['wired_interface'] = _('Wired Interface')\n language['hidden_network'] = _('Hidden Network')\n language['hidden_network_essid'] = _('Hidden Network ESSID')\n language['connected_to_wireless'] = _('Connected to $A at $B (IP: $C)')\n language['connected_to_wired'] = _('Connected to wired network (IP: $A)')\n language['not_connected'] = _('Not connected')\n language['no_wireless_networks_found'] = _('No wireless networks found.')\n language['killswitch_enabled'] = _('Wireless Kill Switch Enabled')\n language['key'] = _('Key')\n language['username'] = _('Username')\n language['password'] = _('Password')\n language['anonymous_identity'] = _('Anonymous Identity')\n language['identity'] = _('Identity')\n language['authentication'] = _('Authentication')\n language['path_to_pac_file'] = _('Path to PAC File')\n language['select_a_network'] = _('Choose from the networks below:')\n language['connecting'] = _('Connecting...')\n language['wired_always_on'] = _('Always show wired interface')\n language['auto_reconnect'] = _('Automatically reconnect on connection loss')\n language['create_adhoc_network'] = _('Create an Ad-Hoc Network')\n language['essid'] = _('ESSID')\n language['use_wep_encryption'] = _('Use Encryption (WEP only)')\n language['before_script'] = _('Run script before connect')\n language['after_script'] = _('Run script after connect')\n language['disconnect_script'] = _('Run disconnect script')\n language['script_settings'] = _('Scripts')\n language['use_ics'] = _('Activate Internet Connection Sharing')\n language['madwifi_for_adhoc'] = _('Check if using madwifi/atheros drivers')\n language['default_wired'] = _('Use as default profile (overwrites any previous default)')\n language['use_debug_mode'] = _('Enable debug mode')\n language['use_global_dns'] = _('Use global DNS servers')\n language['use_default_profile'] = _('Use default profile on wired autoconnect')\n language['show_wired_list'] = _('Prompt for profile on wired autoconnect')\n language['use_last_used_profile'] = _('Use last used profile on wired autoconnect')\n language['choose_wired_profile'] = _('Select or create a wired profile to connect with')\n language['wired_network_found'] = _('Wired connection detected')\n language['stop_showing_chooser'] = _('Stop Showing Autoconnect pop-up temporarily')\n language['display_type_dialog'] = _('Use dBm to measure signal strength')\n language['scripts'] = _('Scripts')\n language['invalid_address'] = _('Invalid address in $A entry.')\n language['global_settings'] = _('Use these settings for all networks sharing this essid')\n language['encrypt_info_missing'] = _('Required encryption information is missing.')\n language['enable_encryption'] = _('This network requires encryption to be enabled.')\n language['wicd_auto_config'] = _('Automatic (recommended)')\n language[\"gen_settings\"] = _(\"General Settings\")\n language[\"ext_programs\"] = _(\"External Programs\")\n language[\"dhcp_client\"] = _(\"DHCP Client\")\n language[\"wired_detect\"] = _(\"Wired Link Detection\")\n language[\"route_flush\"] = _(\"Route Table Flushing\")\n language[\"backend\"] = _(\"Backend\")\n language[\"backend_alert\"] = _(\"Changes to your backend won't occur until the daemon is restarted.\")\n language['0'] = _('0')\n language['1'] = _('1')\n language['2'] = _('2')\n language['3'] = _('3')\n language['4'] = _('4')\n language['5'] = _('5')\n language['6'] = _('6')\n language['7'] = _('7')\n language['8'] = _('8')\n language['9'] = _('9')\n language['interface_down'] = _('Putting interface down...')\n language['resetting_ip_address'] = _('Resetting IP address...')\n language['interface_up'] = _('Putting interface up...')\n language['setting_encryption_info'] = _('Setting encryption info')\n language['removing_old_connection'] = _('Removing old connection...')\n language['generating_psk'] = _('Generating PSK...')\n language['generating_wpa_config'] = _('Generating WPA configuration file...')\n language['flushing_routing_table'] = _('Flushing the routing table...')\n language['configuring_interface'] = _('Configuring wireless interface...')\n language['validating_authentication'] = _('Validating authentication...')\n language['setting_broadcast_address'] = _('Setting broadcast address...')\n language['setting_static_dns'] = _('Setting static DNS servers...')\n language['setting_static_ip'] = _('Setting static IP addresses...')\n language['running_dhcp'] = _('Obtaining IP address...')\n language['dhcp_failed'] = _('Connection Failed: Unable to Get IP Address')\n language['aborted'] = _('Connection Cancelled')\n language['bad_pass'] = _('Connection Failed: Bad password')\n language['done'] = _('Done connecting...')\n return language", "def create_education_chart(region_list, comparison):\n print('education chart HI')\n print(comparison)\n if comparison == 'field':\n qty_data = create_data_by_field_qty(region_list, 'education')\n qty_chart = {\n 'chartType': 'bar',\n 'chartName': 'Status Pendidikan menurut Jumlah Orang',\n 'dataFields': qty_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Jumlah Orang',\n 'tooltipStringFormat': ['_', ' ', 'Orang']\n }\n }\n\n dataset_total = sum(qty_data['values'])\n pct_data = create_data_by_field_pct(qty_data, dataset_total)\n pct_chart = {\n 'chartType': 'doughnut',\n 'chartName': 'Status Pendidikan menurut Persentase Orang',\n 'dataFields': pct_data,\n 'dataOptions': {\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Persentase Orang',\n 'tooltipStringFormat': ['_', '%']\n } \n }\n\n chart_list = {'chartList': [qty_chart, pct_chart]}\n jsonprint(chart_list)\n return chart_list\n\n elif comparison == 'region':\n (qty_list, label_list) = \\\n create_data_by_region_qty(region_list, 'education')\n\n print(qty_list, label_list)\n\n dataset_total_list = get_dataset_total_list(qty_list)\n pct_list = create_data_by_region_pct(qty_list, \n dataset_total_list)\n\n chart_list = {'chartList': [], 'labelList': label_list}\n for index, chart in enumerate(qty_list):\n pct_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', '%'],\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Persentase Orang'\n }\n qty_list[index]['dataOptions'] = {\n 'tooltipStringFormat': ['_', ' ', 'Orang'],\n 'fieldAxis': 'Status Pendidikan',\n 'measureAxis': 'Jumlah Orang'\n }\n\n field = pct_list[index]['field']\n pct_list[index]['chartName'] = \\\n \"Persentase Orang dengan Status Pendidikan '\" + field + \\\n \"' menurut Kecamatan\"\n qty_list[index]['chartName'] = \\\n \"Jumlah Orang dengan Status Pendidikan '\" + \\\n field + \"' menurut Kecamatan\"\n\n chart_list['chartList'].append(pct_list[index])\n chart_list['chartList'].append(qty_list[index])\n\n jsonprint(chart_list)\n return chart_list", "def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):\n # sort the dictionary by decreasing value, into a list of tuples\n sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))\n # unpacking the list of tuples into two lists\n sorted_keys, sorted_values = zip(*sorted_dic_by_value)\n #\n if true_p_bar != \"\":\n \"\"\"\n Special case to draw in (green=true predictions) & (red=false predictions)\n \"\"\"\n fp_sorted = []\n tp_sorted = []\n for key in sorted_keys:\n fp_sorted.append(dictionary[key] - true_p_bar[key])\n tp_sorted.append(true_p_bar[key])\n plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Predictions')\n plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Predictions', left=fp_sorted)\n # add legend\n plt.legend(loc='lower right')\n \"\"\"\n Write number on side of bar\n \"\"\"\n fig = plt.gcf() # gcf - get current figure\n axes = plt.gca()\n r = fig.canvas.get_renderer()\n for i, val in enumerate(sorted_values):\n fp_val = fp_sorted[i]\n tp_val = tp_sorted[i]\n fp_str_val = \" \" + str(fp_val)\n tp_str_val = fp_str_val + \" \" + str(tp_val)\n # trick to paint multicolor with offset:\n # first paint everything and then repaint the first number\n t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')\n plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')\n if i == (len(sorted_values)-1): # largest bar\n adjust_axes(r, t, fig, axes)\n else:\n plt.barh(range(n_classes), sorted_values, color=plot_color)\n \"\"\"\n Write number on side of bar\n \"\"\"\n fig = plt.gcf() # gcf - get current figure\n axes = plt.gca()\n r = fig.canvas.get_renderer()\n for i, val in enumerate(sorted_values):\n str_val = \" \" + str(val) # add a space before\n if val < 1.0:\n str_val = \" {0:.2f}\".format(val)\n t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')\n # re-set axes to show number inside the figure\n if i == (len(sorted_values)-1): # largest bar\n adjust_axes(r, t, fig, axes)\n # set window title\n fig.canvas.set_window_title(window_title)\n # write classes in y axis\n tick_font_size = 12\n plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)\n \"\"\"\n Re-scale height accordingly\n \"\"\"\n init_height = fig.get_figheight()\n # comput the matrix height in points and inches\n dpi = fig.dpi\n height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)\n height_in = height_pt / dpi\n # compute the required figure height\n top_margin = 0.15 # in percentage of the figure height\n bottom_margin = 0.05 # in percentage of the figure height\n figure_height = height_in / (1 - top_margin - bottom_margin)\n # set new height\n if figure_height > init_height:\n fig.set_figheight(figure_height)\n\n # set plot title\n plt.title(plot_title, fontsize=14)\n # set axis titles\n # plt.xlabel('classes')\n plt.xlabel(x_label, fontsize='large')\n # adjust size of window\n fig.tight_layout()\n # save the plot\n fig.savefig(output_path)\n # show image\n if to_show:\n plt.show()\n # close the plot\n plt.close()", "def create_pronoun_graph(list_of_speeches_pronoun_data):\n\n # clean up the data\n processed_speech_data_list = data_process(list_of_speeches_pronoun_data)\n\n fig, ax = plt.subplots(figsize=(50,10))\n ax.set_title('Pronoun Type Density of Presidential Inaugural Addresses', fontsize=50)\n plt.xlabel('Inaugural Address', fontsize=40)\n plt.ylabel('Pronoun Type Rate', fontsize=40)\n plt.rc('xtick',labelsize=20)\n plt.rc('ytick',labelsize=20)\n\n N = len(list_of_speeches_pronoun_data)\n\n first_person_singular = processed_speech_data_list[1]\n first_person_plural = processed_speech_data_list[2]\n second_person = processed_speech_data_list[3]\n third_person_singular = processed_speech_data_list[4]\n third_person_plural = processed_speech_data_list[5]\n\n ind = np.arange(N) # the x locations for the groups\n width = 0.1 # the width of the bars\n\n # the order for these are arbitrary\n p1 = ax.bar(ind, first_person_singular, width, color='b', bottom=0)\n p2 = ax.bar(ind + width, first_person_plural, width, color='g', bottom=0)\n p3 = ax.bar(ind + width*2, second_person, width, color='r', bottom=0)\n p4 = ax.bar(ind + width*3, third_person_singular, width, color='c', bottom=0)\n p5 = ax.bar(ind + width*4, third_person_plural, width, color='m', bottom=0)\n\n ax.set_xticks(ind + width / 5)\n ax.set_xticklabels(processed_speech_data_list[0], rotation='vertical')\n\n ax.legend((p1[0], p2[0], p3[0], p4[0], p5[0]),\n ('First Person Singular',\n 'First Person Plural',\n 'Second Person',\n 'Third Person Singular',\n 'Third Person Plural'\n ),\n fancybox=True,\n title = \"Legend\")\n\n plt.show()", "def visualise_stats(stats_dict : dict):\n nbr_mags = len(stats_dict.keys())\n nbr_specs = np.amax([len(stats_dict[mag].keys()) for mag in stats_dict.keys()])\n\n for mag in stats_dict.keys():\n plt.figure(figsize=(20,8))\n for key, val in stats_dict[mag].items():\n ax = sns.distplot(list(val.keys()), \n hist_kws={\"weights\":list(val.values()), \"alpha\": 0.1}, \n kde_kws = {\"weights\":list(val.values()), \"label\":key})\n plt.title(mag+\" magnification\", fontsize = 14)\n plt.legend()\n \n fig, axes = plt.subplots(nbr_specs,1, figsize=(20,4*nbr_specs))\n for mag in stats_dict.keys():\n i=0\n for key, val in stats_dict[mag].items():\n ax = sns.distplot(list(val.keys()), ax = axes[i], \n hist_kws={\"weights\":list(val.values()), \"alpha\": 0.1}, \n kde_kws = {\"weights\":list(val.values()), \"label\":mag})\n axes[i].set_title(key+\" specification\", fontsize = 14)\n axes[i].legend()\n i+=1", "def deap_plot_hyp(stats, colour=\"blue\"):\n plt.ion()\n # plot hypervolumes\n hyp = []\n for gen in stats:\n hyp.append(gen['hypervolume'])\n plt.figure()\n plt.plot(hyp, color=colour)\n plt.xlabel(\"Function Evaluations\")\n plt.ylabel(\"Hypervolume\")", "def plot_resto(resto_dict):\n\tfrom matplotlib import pyplot as plt\n\tfig = plt.figure()\n\tax = plt.axes()\n\tkeys = resto_dict.keys()\n\tx = range(len(resto_dict[keys[0]]))\n\tfor ew in keys:\n\t\tresto_ew = ax.plot(x, resto_dict[ew], label=ew)\n\thandles, labels = ax.get_legend_handles_labels()\n\tax.legend(handles, labels, loc=5)\n\tplt.title('Change in P(*|resto) over time')\n\tplt.xlabel('iterations')\n\tplt.savefig('resto.png')\n\tplt.show()", "def draw_galaxies(fig, ax):\r\n table = os.path.join(tables_dir, \"misgeld_et_al_2008.tsv\")\r\n ra, dec, diam = np.loadtxt(table, usecols=(0, 1, 15), delimiter=\"|\").T\r\n ################################################\r\n # Center is set in NGC 3311 according to catalog\r\n x = canvas.arcsec2kpc(3600. * (ra - canvas.ra0))\r\n y = canvas.arcsec2kpc(3600. * (dec - canvas.dec0))\r\n #################################################\r\n ax.plot(x, y, \"og\", ms=16, markerfacecolor='none', mec=\"y\")\r\n return", "def graphify(evictions_per_week):\r\n weeks = []\r\n for week in evictions_per_week.keys():\r\n if '2020' in week:\r\n weeks.append(week)\r\n evictions_filed = []\r\n for week in weeks:\r\n evictions_filed.append(evictions_per_week[week])\r\n plt.figure(figsize=(50, 10))\r\n plt.plot(weeks, evictions_filed)\r\n plt.xlabel('Date')\r\n plt.ylabel('Evictions filed')\r\n plt.title('Evictions filed by the week')\r\n plt.show()\r\n return weeks, evictions_filed", "def barGraph(listOfWord, listOfFrequency):\r\n\r\n\tindex = np.arange(len(listOfWord))\r\n\r\n\tplt.title(\"Frekuensi Kemunculan Kata\")\r\n\tplt.barh(index, listOfFrequency)\r\n\tplt.xlabel('Frekuensi')\r\n\tplt.yticks(index, listOfWord, fontsize=6)\r\n\r\n\tplt.show()", "def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color,\n true_p_bar):\n # sort the dictionary by decreasing value, into a list of tuples\n sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))\n # unpacking the list of tuples into two lists\n sorted_keys, sorted_values = zip(*sorted_dic_by_value)\n #\n if true_p_bar != \"\":\n \"\"\"\n Special case to draw in (green=true predictions) & (red=false predictions)\n \"\"\"\n fp_sorted = []\n tp_sorted = []\n for key in sorted_keys:\n fp_sorted.append(dictionary[key] - true_p_bar[key])\n tp_sorted.append(true_p_bar[key])\n plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Predictions')\n plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Predictions',\n left=fp_sorted)\n # add legend\n plt.legend(loc='lower right')\n \"\"\"\n Write number on side of bar\n \"\"\"\n fig = plt.gcf() # gcf - get current figure\n axes = plt.gca()\n r = fig.canvas.get_renderer()\n for i, val in enumerate(sorted_values):\n fp_val = fp_sorted[i]\n tp_val = tp_sorted[i]\n fp_str_val = \" \" + str(fp_val)\n tp_str_val = fp_str_val + \" \" + str(tp_val)\n # trick to paint multicolor with offset:\n # first paint everything and then repaint the first number\n t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')\n plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')\n if i == (len(sorted_values) - 1): # largest bar\n adjust_axes(r, t, fig, axes)\n else:\n plt.barh(range(n_classes), sorted_values, color=plot_color)\n \"\"\"\n Write number on side of bar\n \"\"\"\n fig = plt.gcf() # gcf - get current figure\n axes = plt.gca()\n r = fig.canvas.get_renderer()\n for i, val in enumerate(sorted_values):\n str_val = \" \" + str(val) # add a space before\n if val < 1.0:\n str_val = \" {0:.2f}\".format(val)\n t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')\n # re-set axes to show number inside the figure\n if i == (len(sorted_values) - 1): # largest bar\n adjust_axes(r, t, fig, axes)\n # set window title\n fig.canvas.set_window_title(window_title)\n # write classes in y axis\n tick_font_size = 12\n plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)\n \"\"\"\n Re-scale height accordingly\n \"\"\"\n init_height = fig.get_figheight()\n # compute the matrix height in points and inches\n dpi = fig.dpi\n height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)\n height_in = height_pt / dpi\n # compute the required figure height\n top_margin = 0.15 # in percentage of the figure height\n bottom_margin = 0.05 # in percentage of the figure height\n figure_height = height_in / (1 - top_margin - bottom_margin)\n # set new height\n if figure_height > init_height:\n fig.set_figheight(figure_height)\n\n # set plot title\n plt.title(plot_title, fontsize=14)\n # set axis titles\n plt.xlabel(x_label, fontsize='large')\n # adjust size of window\n fig.tight_layout()\n # save the plot\n fig.savefig(output_path)\n # show image\n if to_show:\n plt.show()\n # close the plot\n plt.close()", "def diagram_plugs(data_no,\r\n data_little,\r\n data_means,\r\n data_great,\r\n data_large_enough,\r\n data_super_large,\r\n er_no, er_little,\r\n er_means,\r\n er_great,\r\n er_large_enough,\r\n er_super_large):\r\n\r\n\r\n plt.bar(range(6), [data_no,\r\n data_little,\r\n data_means,\r\n data_great,\r\n data_large_enough,\r\n data_super_large],\r\n width=0.1, color='black',\r\n yerr=[er_no, er_little, er_means,\r\n er_great, er_large_enough,\r\n er_super_large],\r\n ecolor='black', capsize=10)\r\n\r\n\r\n plt.xticks(range(6), ['non', 'petit', 'moyen',\r\n 'grand', 'assez grand', 'tres grand'])\r\n\r\n\r\n plt.ylabel('Taux de pollution en AQI')\r\n plt.title(\"Taux de pollution selon les bouchons\")\r\n\r\n nouveau = new()\r\n print(nouveau)\r\n plt.savefig(nouveau, transparent=True)\r\n plt.clf()\r\n plt.close()\r\n\r\n shutil.move(nouveau, '/app/static/popo')\r\n\r\n return nouveau", "def plot_localization_length(ax, c, k, dis_param, n, x):\n E = 2*np.asarray(c)*np.cos(k)\n gamma_inf = lyap_gamma(c,dis_param, E)\n loc_length3 = np.exp(-2*n*gamma_inf)\n if x.size == loc_length3.size :\n ax.autoscale(False)\n ax.plot(x, loc_length3,color='black', ls='--')", "def helix_triplet_stats (self):\n\n for Value in ['Phi']:\n\n HistogramPlot(np.array(self. values_list(Value, flat=True)), 'myproject/myapp/static/myapp/static/Stats/HelixTriplet/'+Value )\n #zrobic jakies dict coby robilo ranges, uzaleznialo np od zakresu albo od czegos\n\n return", "def render(self):\n map = {0:'.', 1:'x', 2:'o'} # grid label vs how to plot\n print(''.join(map[i] for i in self.grid[0:3]))\n print(''.join(map[i] for i in self.grid[3:6]))\n print(''.join(map[i] for i in self.grid[6:9]))\n print('====')", "def render(self):\n map = {0:'.', 1:'x', 2:'o'} # grid label vs how to plot\n print(''.join(map[i] for i in self.grid[0:3]))\n print(''.join(map[i] for i in self.grid[3:6]))\n print(''.join(map[i] for i in self.grid[6:9]))\n print('====')", "def plot_results(store):\n plt.figure()\n c = 0\n for i in store.keys():\n plt.scatter(i[0], -1*i[1], color=get_colour(store[i]))\n c += 1\n plt.show()", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def movies_lang(dataset, index_, lang_):\r\n movies_=[]\r\n for row in dataset.values():\r\n if(row[index_] == lang_):\r\n movies_.append(row[13])\r\n explore_data(movies_,0,5,False)\r\n return movies_", "def plot_results(t_val, mood):\r\n N = 8\r\n theta = np.linspace(0.0, 2 * np.pi , N, endpoint=False)\r\n the_stats = [t_val['number_words'], t_val['average_character_length'], \r\n t_val['signs'], t_val['multiple_signs'], t_val['question'],\r\n t_val['exclamation'], t_val['name'], mood] \r\n \r\n width = np.pi / N \r\n\r\n plt.figure()\r\n \r\n handle = plt.subplot(111, polar=True)\r\n handle.set_xticklabels(['Word', 'AvrChar', 'Signs', '2Signs', '?', '!', 'name', 'mood'])\r\n \r\n handle.bar(theta, the_stats, width=width, bottom=1.0)\r\n \r\n plt.show()", "def generate_wordcloud(dict_, title='WordCloud', PATH=None):\n wordcloud = WordCloud(min_font_size=10).generate_from_frequencies(dict_)\n plt.figure(figsize = (8, 8), facecolor = None) \n plt.imshow(wordcloud) \n plt.axis(\"off\") \n plt.title(title, size = 24)\n plt.tight_layout(pad = 0) \n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n plt.show()", "def plot_colour(self, label):\n label = label.lower()\n pretty_colours = {}\n # SPIce HD\n pretty_colours['544'] = 'maroon'\n pretty_colours['545'] = 'goldenrod'\n pretty_colours['548'] = 'blueviolet'\n pretty_colours['549'] = 'forestgreen'\n # H2\n ## DOM Efficiency Sets\n pretty_colours['551'] = 'cornflowerblue'\n pretty_colours['552'] = 'cornflowerblue'\n pretty_colours['553'] = 'cornflowerblue'\n pretty_colours['554'] = 'mediumseagreen'\n pretty_colours['555'] = 'mediumseagreen'\n pretty_colours['556'] = 'mediumseagreen'\n ## Hole Ice Sets\n pretty_colours['560'] = 'olive'\n pretty_colours['561'] = 'olive'\n pretty_colours['564'] = 'darkorange'\n pretty_colours['565'] = 'darkorange'\n pretty_colours['572'] = 'teal'\n pretty_colours['573'] = 'teal'\n ## Dima Hole Ice Set without RDE\n pretty_colours['570'] = 'mediumvioletred'\n ## Baseline\n pretty_colours['585'] = 'slategrey'\n # Systematics\n pretty_colours['aeff_scale'] = 'maroon'\n pretty_colours['atm_muon_scale'] = 'goldenrod'\n pretty_colours['deltam31'] = 'blueviolet'\n pretty_colours['theta23'] = 'forestgreen'\n pretty_colours['hole_ice_fwd'] = 'mediumvioletred'\n pretty_colours['dom_eff'] = 'cornflowerblue'\n pretty_colours['genie_ma_qe'] = 'mediumseagreen'\n pretty_colours['genie_ma_res'] = 'olive'\n pretty_colours['hole_ice'] = 'darkorange'\n pretty_colours['nue_numu_ratio'] = 'teal'\n pretty_colours['theta13'] = 'fuchsia'\n pretty_colours['barr_nu_nubar'] = 'thistle'\n pretty_colours['barr_uphor'] = 'orchid'\n pretty_colours['delta_index'] = 'navy'\n # Mass ordering\n pretty_colours['no'] = 'r'\n pretty_colours['io'] = 'b'\n # Asimov fits\n pretty_colours['th_to_wh'] = 'darkviolet'\n pretty_colours['wh_to_th'] = 'deepskyblue'\n colourlabel = None\n for colourkey in pretty_colours.keys():\n if (colourkey in label) or (colourkey == label):\n colourlabel = pretty_colours[colourkey]\n if colourlabel is None:\n logging.debug(\"I do not have a colour scheme for your label %s. \"\n \"Returning black.\"%label)\n colourlabel = 'k'\n return colourlabel", "def _plotDisplay(self):\n self.gc.tick_labels.set_xformat('ddd')\n self.gc.tick_labels.set_yformat('ddd')\n if self.csys == 'GAL':\n if self.xlabel is None: self.xlabel = r'Galactic longitude $l$ $(^{\\circ})$'\n if self.ylabel is None: self.ylabel = r'Galactic latitude $b$ $(^{\\circ})$'\n else:\n if self.xlabel is None: self.xlabel = r'RA (J2000)'\n if self.ylabel is None: self.ylabel = r'Dec (J2000)'\n self.gc.axis_labels.set_xtext(self.xlabel)\n self.gc.axis_labels.set_ytext(self.ylabel)\n self.gc.set_axis_labels_font(size=self.ftsize1)\n self.gc.tick_labels.set_font(size=self.ftsize2) # <====== perhaps a string here?\n self.gc.ticks.set_color('black')", "def plot_embedding(X, y, d, title=None):\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min)\n\n # Plot colors numbers\n plt.figure(figsize=(10,10))\n ax = plt.subplot(111)\n for i in range(X.shape[0]):\n # plot colored number\n plt.text(X[i, 0], X[i, 1], str(y[i]),\n color=plt.cm.bwr(d[i] / 1.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)\n plt.savefig('dann_tsne.png')", "def process(self):\n\n linelang = defaultdict(int)\n wordlang = defaultdict(int)\n\n linefont = defaultdict(int)\n wordfont = defaultdict(int)\n\n inputfiles = self.input_files\n for input_file in inputfiles:\n\n alignurl = input_file.url\n pcgts = parse(alignurl, True)\n page = pcgts.get_Page()\n regions = page.get_TextRegion()\n\n for region in regions:\n lines = region.get_TextLine()\n\n for line in lines:\n try:\n llang = line.primaryLanguage\n linelang[llang] += 1\n except TypeError:\n pass\n\n try:\n lfont = line.fontFamily\n linefont[lfont] += 1\n except TypeError:\n pass\n\n words = line.get_Word()\n for word in words:\n try:\n wlang = word.language\n wordlang[wlang] += 1\n except TypeError:\n pass\n\n try:\n wfont = word.get_TextStyle().fontFamily\n wordfont[wfont] += 1\n except TypeError:\n pass\n\n #predominant language\n try:\n lang = max(linelang, key=lambda k: linelang[k])\n except TypeError:\n try:\n lang = max(wordlang, key=lambda k: wordlang[k])\n except TypeError:\n lang = 'German'\n\n #predominant font\n try:\n font = max(linefont, key=lambda k: linefont[k])\n except TypeError:\n try:\n font = max(wordfont, key=lambda k: wordfont[k])\n except TypeError:\n font = 'Antiqua'\n\n\n print(lang)\n print(font)" ]
[ "0.60507584", "0.5974315", "0.5905134", "0.5861216", "0.58539575", "0.57940316", "0.5775768", "0.5712009", "0.57002044", "0.5681444", "0.56463766", "0.5642815", "0.5637291", "0.5622407", "0.5596009", "0.5593054", "0.5562897", "0.55299807", "0.5506552", "0.5498803", "0.5488566", "0.54884934", "0.5484237", "0.54395294", "0.5429819", "0.5396051", "0.5392924", "0.5341676", "0.53403795", "0.5325545", "0.5324486", "0.5312993", "0.5307334", "0.5288536", "0.52881336", "0.52775586", "0.5275407", "0.5247371", "0.5241965", "0.52236974", "0.5209994", "0.5191526", "0.5173098", "0.51719403", "0.5154915", "0.51510036", "0.51499695", "0.5149342", "0.51474524", "0.51455647", "0.5139125", "0.5124407", "0.5122582", "0.5122472", "0.511772", "0.5113524", "0.51098835", "0.5108075", "0.51068676", "0.5100923", "0.5083864", "0.5080142", "0.5079618", "0.5066396", "0.5056124", "0.50531906", "0.5048879", "0.50399756", "0.50340927", "0.5028162", "0.5027743", "0.50213206", "0.50201315", "0.4994733", "0.49937493", "0.49922848", "0.49786982", "0.49786213", "0.4975082", "0.49695265", "0.49691683", "0.4959511", "0.4955051", "0.49549052", "0.4953798", "0.49526384", "0.49507117", "0.49504742", "0.4944443", "0.4939862", "0.4939862", "0.4939771", "0.4925729", "0.492458", "0.4919048", "0.4918421", "0.49183044", "0.49154276", "0.49131465", "0.49119878" ]
0.6960431
0
Requests frames for a product.
def find(cls, product_id, start=None, end=None, limit=None, sort=None, reruns=None, **kwargs): return super(ProductFrame, cls).find(product_id=product_id, start=start, end=end, limit=limit, sort=sort, reruns=reruns, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _request_frame(self):\n self._send_command('GET_FRAME')", "def GetProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def products(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/products'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def ListProducts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getProducts():\n return \"http://api.tcgplayer.com/catalog/products\"", "def all_frames(request):\n frames = Product.objects.filter(department=\"2\")\n\n context = {\n 'frames': frames,\n }\n\n return render(request, 'products/frames.html', context)", "def get_product_web_page(product = None):\n \n products_list = get_product_list()\n modis_url_dict = {prod: '{}v006'.format(prod.lower()) for prod in \n products_list if prod[0] == 'M'}\n viirs_url_dict = {prod: '{}v001'.format(prod.lower()) \n for prod in products_list if prod[:3] == 'VNP'}\n modis_url_dict.update(viirs_url_dict)\n base_addr = ('https://lpdaac.usgs.gov/products/{0}')\n if product is None or not product in modis_url_dict.keys():\n print 'Product not found... redirecting to data discovery page'\n addr = ('https://lpdaac.usgs.gov')\n else:\n addr = base_addr.format(modis_url_dict[product])\n webbrowser.open(addr)", "def BuyProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def WatchProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def processProductsRequest(self):\n\n\t\t# Use the simple page renderer to create the body content\n\t\treturn self.render_simple_page('Products')", "def test_products_get(mocker, data):\n mocker.patch(\"sps.request.fetch\", autospec=True)\n request.fetch.return_value = data\n assert products.get(None, \"fake-file-name\", False, False)\n request.fetch.assert_called_with(\"https://scc.suse.com/api/package_search/products\")", "def stream(self) -> None:\n\n # Get the current frame and scan it for barcodes.\n products: Any # TODO: actual type\n (frame, products) = self.barcode_scanner.get_and_scan_current_frame()\n\n # Display frame.\n self.image.update_image(frame)\n\n if not products:\n # Continue streaming video.\n self.image.after(self.stream_delay, self.stream)\n self.confirm_button.config(state=tk.DISABLED)\n else:\n self.confirm_button.config(state=tk.NORMAL)\n\n # Show product information in GUI.\n for product in products:\n self.barcode_text.set(product.barcode)\n\n if type(product) == prdct.OFFProduct:\n self.brand_text.set(product.brands)\n self.name_text.set(product.name)\n else:\n self.name_text.set(\"Product not found.\")", "def openproducts(self):\n\n print \"Open products\"\n self.combo_product_list.setEnabled(True)\n frame=self.combo_area_list.currentText()\n self.combo_product_list.clear()\n self.combo_dataset_list.clear()\n self.combo_variable_list.clear()\n print str(frame)\n list_glo=[]\n if str(frame) == \"GLOBAL\":\n for key in self.dict_prod.keys():\n if str(frame) in key :\n list_glo.append(str(key))\n ind=0\n #print \"Frame %s \" %(frame)\n for key in self.dict_prod.keys():\n if str(frame) == \"BAL\":\n frame1=\"_BAL_\"\n frame2=\"-BAL-\"\n if frame1 in key or frame2 in key :\n self.combo_product_list.addItem(str(key))\n elif str(frame) == \"NWS\":\n frame1=\"NORTHWESTSHELF_\"\n frame2=\"NWS\"\n if frame1 in key or frame2 in key :\n self.combo_product_list.addItem(str(key))\n elif str(frame) == \"GLOBAL\":\n if str(frame) in key :\n if ind == 0 :\n self.combo_product_list.addItem(list_glo[5])\n elif ind == 5 : \n self.combo_product_list.addItem(list_glo[0])\n else : \n self.combo_product_list.addItem(list_glo[ind])\n ind+=1\n else :\n if str(frame) in key :\n self.combo_product_list.addItem(str(key))\n self.combo_dataset_list.setEnabled(True)", "def product_card(driver, open_login_page):\n return ProductPage(driver)", "def get_product(name):\n\n products = get_products({'producer': name})\n\n return response(\n 'Successfully retreived all the products for company ' + name,\n 200,\n { 'data':\n { 'products': products }\n }\n )", "def get_product_info(self, product):\n\n product_link = self.url + product.a['href']\n product_page = self.get_response(product_link)\n product_soup = BeautifulSoup(product_page.content, 'html.parser')\n\n # get product details\n product_brand = product_soup.find('h2').text.strip()\n product_name = product_soup.find('h1').text.strip()\n\n product_details = product_soup.find('div', id='z-pdp-detailsSection')\n\n product_attributes = []\n for detail_section in product_details.find_all('div', class_='h-container h-flex-no-shrink h-tabs__panel h-align-left'):\n for tag in detail_section.find_all('p'):\n product_attributes.append(tag.text.strip())\n\n # get product image\n product_img_thumbs = product_soup.find('div', id='z-pdp-topSection')\n product_img_thumbs = product_img_thumbs.find(\n 'div', class_='h-container h-carousel h-carousel-thumbnail vertical h-align-left')\n\n img_links = []\n product_img_link = ''\n for img_thumb in product_img_thumbs.find_all('picture'):\n img_link = img_thumb.find('img')['src'].replace('thumb', 'zoom')\n if 'packshot' in img_link:\n product_img_link = img_link\n else:\n img_links.append(img_link)\n\n # product_img_link = 'https:' + product_img.split('\"')[1].split('?')[0]\n product_img_id = product_img_link.split('/')[-1].split('@')[0]\n\n return {'name': product_name,\n 'brand': product_brand,\n 'id': product_img_id,\n 'img_url': product_img_link,\n 'model_img_urls': ', '.join(img_links),\n 'attributes': ', '.join(product_attributes)}", "def apiquery(self, product_url, params={}):\n requesturl = self.config['host'] + product_url\n timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n signature = hmac.new(self.config['key'],\n ''.join([self.config['username'], timestamp, product_url]),\n digestmod=hashlib.sha1).hexdigest()\n params['timestamp'] = timestamp\n params['signature'] = signature\n params['api_username'] = self.config['username']\n if 'format' not in params.keys():\n params['format'] = self.config['dataformat']\n req = requests.get(requesturl, params=params)\n if req.status_code != requests.codes.ok:\n try:\n json_response = req.json()\n self.raise_best_exception(json_response)\n except KeyError:\n raise UnexpectedError(req.status_code, req.text)\n return req", "def on_get(self, req, resp):\n sample = load_data()\n\n print(sample.base.describe())\n resp.status = falcon.HTTP_200\n\n frame_str = str(sample.base.describe()) + \"\\n\"\n resp.body = frame_str\n # resp.body = sample.base.describe()", "def printProductsFromServer():\n server_response = getRequest(getProducts())\n\n print('SERVER RESPONSE')\n # pprint(server_response) # prints the JSON from the server\n\n # Print out all the products and corresponding IDs\n for item in server_response[\"results\"]:\n id = item[\"productId\"]\n name = item[\"productName\"]\n print('Product Name' , name , \" | Product ID: \", id)", "async def run(product_ids):\n url = \"http://challenge-api.luizalabs.com/api/product/{}\"\n tasks = []\n\n # Fetch all responses within one Client session,\n # keep connection alive for all requests.\n async with ClientSession() as session:\n for product_id in product_ids:\n task = asyncio.ensure_future(utils.fetch(url.format(product_id), session))\n tasks.append(task)\n\n self.responses = await asyncio.gather(*tasks)", "def products():\n username = session['username']\n api_key = session['api_key']\n url = 'https://consumernotebook.com/api/v1/products/?username={0}&apikey={1}'.format(username, api_key)\n r = requests.get(url)\n products = []\n if r.status_code != 200:\n error = \"{0} error. Are you sure you entered a valid API key?\".format(r.status_code)\n return render_template('products.html', error=error)\n else:\n products_json = json.loads(r.content)\n for product in products_json[u'objects']:\n products.append(product[u'title'])\n return render_template('products.html', products=products)", "def api_id():\n if 'productName' in request.args:\n productName = request.args['productName']\n if utils.specialCharCheck(productName):\n return jsonify({ \"status\": \"200\",\"data\" : \"Special Character is not allowed in the search!\" })\n else:\n return jsonify({ \"status\": \"200\",\"data\" : \"Please specify product name\" })\n\n itemList = []\n try :\n # below path needs to come from the configuration file\n path='E:/Project/Bar Raiser/11 July - Scrapping Hathagon/chromedriver.exe'\n url='https://www.amazon.in/s?k='+productName\n browser= utils.getDriverInfo(path)\n htmlSourceSoup=utils.getHtmlSource(url, browser)\n linkDiv = htmlSourceSoup.find_all('div', {'class' : 'sg-col-inner'})\n itemList=utils.getJsonFromHtml(linkDiv)\n\n if not itemList:\n result = {\n \"status\" : \"200\",\n \"productList\" : \"Product not found!\"\n }\n else:\n result = { \n \"status\" : \"200\",\n \"productList\" : itemList\n }\n\n return jsonify(result)\n except Exception as ex:\n return jsonify({ \"status\": \"500\",\"data\" : \"Server error while processing the request\", \"error\":ex })", "def get_products_from_page(url):\n\n def get_data_from_book(book):\n \"\"\"Return data from one book.\"\"\"\n src_img = book.find(\"img\").get(\"src\")\n src_img = src_img.replace(\"../\", \"\")\n image = \"http://books.toscrape.com/\" + src_img\n\n in_stock = False\n in_stock_or_not = book.find(\"p\", {\"class\", \"instock\"}).text\n if \"In stock\" in in_stock_or_not:\n in_stock = True\n\n name = book.find(\"h3\").find(\"a\").text\n\n price = book.find(\"p\", {\"class\", \"price_color\"}).text\n price = price.replace(\"Â\", \"\")\n\n rating = book.find(\"p\", {\"class\", \"star-rating\"}).get(\"class\")[1]\n rating = w2n.word_to_num(rating)\n\n return {\n \"image\": image,\n \"in_stock\": in_stock,\n \"name\": name,\n \"price\": price,\n \"rating\": rating,\n }\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n books = soup.find_all(\"article\", {\"class\", \"product_pod\"})\n\n result = list(map(get_data_from_book, books))\n return result", "def get_product(self, page_size=10, pages_number=1):\n products = []\n params = self.params.copy()\n params[\"page_size\"] = page_size\n\n try:\n response = requests.get(self.url, params=params, timeout=3)\n response.json()\n except requests.ConnectionError:\n print(\"Error when fetching the API\")\n for i in range(pages_number):\n params[\"page\"] = i + 1\n response = requests.get(self.url, params=params)\n if response.status_code == 200:\n products.extend(response.json()[\"products\"])\n return products", "def get_open(product=None, limit=100):\n\n params = {\n 'limit': limit,\n 'product_id': product,\n }\n\n log.info('getting all OPEN ORDERS')\n\n resp = httpapi.get(\n common.api_url + 'orders',\n params=params,\n auth=common.auth,\n )\n\n return resp.json(), resp", "def RegisterProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_get_product(self):\n # get the id of a product\n test_product = self._create_products(1)[0]\n resp = self.app.get(\n \"/products/{}\".format(test_product.id), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"name\"], test_product.name)\n \n # print the repr of a product\n rep = \"%s\" % test_product", "def get_product(params, freezer_id, token):\n # Avoid SQL injection before doing requests\n # with the token and check the validity of it\n token = MySQLdb.escape_string(token)\n if not validator_db.valid_token(token):\n return custom_response(400, responseMessage.BAD_TOKEN)\n\n if params == 'all':\n if freezer_id == 0:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('all'),\n (token,),\n header=True))\n else:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('all-one'),\n (token,\n freezer_id,),\n header=True))\n\n if params == 'inside':\n if freezer_id == 0:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('inside'),\n (token,),\n header=True))\n else:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('inside-one'),\n (token,\n freezer_id,),\n header=True))\n\n if params == 'outside':\n if validator_db.valid_token(token):\n if freezer_id == 0:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('outside'),\n (token,),\n header=True))\n else:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('outside-one'),\n (token,\n freezer_id,),\n header=True))\n\n return custom_response(400, responseMessage.BAD_PARAMETER)", "def do_features_request(module_type=None, version=None, software=None):\n\n #  connect to database\n cur_db = connect_db(\"172.20.38.50\", \"mvelay\", \"user\", \"sandbox\")\n cursor = cur_db.cursor()\n\n # build whole query\n cur_query = \"\"\" SELECT feature, supported FROM t_feature\n WHERE module=\"%s\" AND version=\"%s\" AND sw=\"%s\";\"\"\" \\\n % (module_type[0], version[0], software[0])\n\n print cur_query\n cursor.execute(cur_query)\n results = cursor.fetchall()\n cursor.close()\n\n if results:\n results = results[:1000] # Limit to first 1000 results\n else:\n results = None\n\n return module_type[0], version[0], software[0], results", "def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')", "def retrieve_product_infos(self):\n\n # PRODUCT NAME\n try:\n product_name = self.product['product_name'].capitalize()\n except KeyError:\n product_name = None\n\n # PRODUCT CODE\n try:\n product_code = self.product['code'].capitalize()\n except KeyError:\n product_code = None\n\n # URL\n try:\n product_url = self.product['url'].lower()\n except KeyError:\n product_url = None\n\n # IMAGE URL\n try:\n image_url = self.product['image_url'].lower()\n except KeyError:\n image_url = None\n\n # QUANTITY\n try:\n quantity = self.product['quantity'].capitalize()\n except KeyError:\n quantity = None\n\n # INGREDIENTS\n try:\n ingredients = self.product['ingredients_text_fr'].capitalize()\n except KeyError:\n ingredients = None\n\n # BRAND\n brands = []\n try:\n for brand in self.product['brands'].split(','):\n brand = brand.strip().capitalize()\n if (\n brand != ''\n and brand not in brands\n ):\n brands.append(brand)\n except KeyError:\n pass\n\n # STORES\n stores = []\n try:\n for store in self.product['stores'].split(','):\n store = store.strip().capitalize()\n if (\n store != ''\n and store not in stores\n ):\n stores.append(store)\n except KeyError:\n pass\n\n # COUNTRY\n try:\n countries = self.product['countries'].capitalize()\n except KeyError:\n countries = None\n if 'France' in countries:\n countries = 'France'\n else:\n countries = None\n\n # COMPARE TO CATEGORY\n try:\n compare_to = self.product['compared_to_category'].capitalize().split(':')[1]\n except KeyError:\n compare_to = None\n try:\n Categories.objects.get(\n name=compare_to\n )\n except Categories.DoesNotExist:\n compare_to = None\n except:\n importable = False\n\n # CATEGORIES HIERARCHY\n try:\n categories_hierarchy = [\n category.split(':')[1] for category in self.product['categories_hierarchy']\n ]\n except KeyError:\n categories_hierarchy = None\n\n # NUTRISCORE GRADE\n nutriscore_labels = [\n 'nutrition_grade_fr',\n 'nutriscore_grade'\n ]\n nutriscore = 'F'\n i = 0\n while (\n i < len(nutriscore_labels)\n and nutriscore == 'F'\n ):\n try:\n nutriscore = self.product[nutriscore_labels[i]].upper()\n except KeyError:\n i += 1\n\n product_infos = {\n 'product_name': product_name,\n 'product_code': product_code,\n 'product_url': product_url,\n 'image_url': image_url,\n 'quantity': quantity,\n 'ingredients': ingredients,\n 'brands': brands,\n 'stores': stores,\n 'countries': countries,\n 'compare_to': compare_to,\n 'categories_hierarchy': categories_hierarchy,\n 'nutriscore': nutriscore\n }\n\n nutriments = self.product['nutriments']\n for nutriment in self.list_nutriments:\n try:\n product_infos[nutriment] = float(nutriments[nutriment])\n except KeyError:\n product_infos[nutriment] = 0\n\n return product_infos", "def test_get_product(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.data['name'], 'Producto 1')\n self.assertEqual(response.data['description'], 'Descripcion producto 1')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '24.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser1')\n self.assertEqual(response.data['category']['name'], 'general')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.client.get('/api/1.0/products/2/')\n self.assertEqual(response.data['name'], 'Producto 2')\n self.assertEqual(response.data['description'], 'Descripcion producto 2')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '312.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser2')\n self.assertEqual(response.data['category']['name'], 'deportes')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def Get(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def fusion_api_get_firmware_driver(self, uri=None, api=None, headers=None, param=''):\n return self.driver.get(uri, api, headers, param)", "def test_with_request_factory(self):\n\n request = self.factory.get(\"/watch\")\n engine = import_module(settings.SESSION_ENGINE)\n request.session = engine.SessionStore()\n response = product_all(request)\n html = response.content.decode(\"utf8\")\n self.assertInHTML(\"<title>Book Store</title>\", html)\n self.assertEqual(response.status_code, 200)", "def view_products():\n min_id = (Product.select().order_by(Product.product_id.asc()).get()).product_id\n max_id = (Product.select().order_by(Product.product_id.desc()).get()).product_id\n print(f\"\\nPlease select id between {min_id} & {max_id}\")\n id = int(input(\"Select product id: \"))\n while id not in range(min_id, max_id+1):\n print(\"Your selection must be between {} and {}\".format(min_id, max_id))\n id = int(input(\"Select product id: \"))\n print(f\"\"\"\\n-Product: {Product.get_by_id(id).product_name}\n-Quantity: {Product.get_by_id(id).product_quantity}\n-Price: {Product.get_by_id(id).product_price} cents\n-Date updated: {Product.get_by_id(id).date_updated}\\n\"\"\")\n input(\"\\nPress ENTER to continue\")\n clear()", "def fetch(self):\n try:\n self.genre = 'Review'\n log.debug(self.log_msg(\"Fetching the prouct page url %s\"%self.currenturi))\n res=self._getHTML(self.currenturi) # Assuming self.currenturi is at the product page\n self.rawpage=res['result']\n self._setCurrentPage()\n try:\n self.parent_page_title = stripHtml(self.soup.find('h1',{'id':'pgTitleDetail'}).renderContents())\n except:\n self.parent_page_title =''\n try:\n self.__product_price = self.soup.find('tbody',{'class':'prices'}).td.renderContents().replace('$','')\n except:\n log.exception(\"Error in fetching product_price\")\n self.__product_price = None\n\n parent_page_url = self.task.instance_data['uri']\n review_first_page_url = self.soup.find('a',text=\"Show All Customer Reviews &#187; \").parent['href']\n review_url_order = \"&sortReviewsBy=DateDescending\"\n self.currenturi = self.base_url + review_first_page_url + review_url_order\n log.info(self.log_msg('current_uri :: %s'%(self.currenturi)))\n self._getParentPage()\n self.next_url_links=[]\n self.fetch_next_link = True\n while self.fetch_next_link:\n self._iterateReviewPages(parent_page_url)\n return True\n except Exception,e:\n log.exception(self.log_msg(\"Exception occured in fetch()\"))\n return False", "def subproduct_add_case(request):\n session = request.session.get('new_product', {})\n if request.GET.get('gtin', None):\n session['gtin'] = request.GET.get('gtin')\n if request.GET.get('package_level', None):\n session['package_level'] = request.GET.get('package_level')\n if request.GET.get('package_type', None):\n session['package_type'] = request.GET.get('package_type')\n request.session['new_product'] = session\n\n if not session:\n raise Http404()\n gtin = session.get('gtin', 0)\n prefix = prefix_service.find_item(\n user=request.user,\n starting_from=str(gtin)\n )\n if not prefix:\n raise Http404()\n pl = session.get('package_level', None)\n if not pl:\n flash(request, 'Choose a package level', 'danger')\n return redirect(reverse('products:add_product'))\n\n try:\n page = int(request.GET.get('page', '1'))\n except (ValueError, TypeError):\n page = 1\n\n settings_per_page = getattr(settings, 'PRODUCTS_PER_PAGE', 10)\n try:\n per_page = int(request.GET.get('per_page'))\n except (ValueError, TypeError):\n per_page = None\n if per_page:\n session['per_page'] = per_page\n else:\n per_page = session.get('per_page', settings_per_page)\n\n prefixes = prefix_service.all(user=request.user)\n package_level = package_level_service.get(pl)\n products = Product.service.get_available_subproducts(\n owner=request.user,\n package_level=package_level\n )\n\n target_markets = gtin_target_market_service.get_by_products_list(products)\n target_market_choices = [['', '']]\n for target_market in target_markets:\n try:\n if target_market_choices[-1][0] == target_market.target_market.code:\n continue\n except Exception:\n pass\n target_market_choices.append([\n target_market.target_market.code,\n target_market.target_market.market\n ])\n\n filter_data = {}\n form = SubProductsForm()\n filterform = FilterForm()\n if request.method == 'POST':\n form = SubProductsForm(request.POST)\n if form.is_valid():\n if request.POST.get('filtersubmit'):\n filterform = FilterForm(request.POST)\n if filterform.is_valid():\n products = product_helper.filter_list(products, filterform)\n session['adding_filter'] = filter_data\n else:\n # form = forms.SubProductsForm(request.form)\n # we no longer use data from form but from session\n sub_products = session.get('sub_products', [])\n sub_products.sort()\n\n if len(sub_products) > 0:\n sub_products_data = Product.service.check_subproducts(\n sub_product_gtins=sub_products,\n package_level=package_level,\n owner=request.user\n )\n if sub_products_data['is_valid']:\n # we have subproducts, we move to the next step\n session['sub_products'] = sub_products\n # return redirect(reverse('products:subproduct_add_case_details'))\n return redirect('/products/js-add/#/details?package_level=%s&package_type=%s' % (session['package_level'], session['package_type']))\n else:\n # we have incorrect subproducts\n flash(request, sub_products_data['error'], 'danger')\n return redirect(reverse('products:subproduct_add_case'))\n else:\n # we do not have subproducts - we reselect\n flash(request, 'You must choose products before proceeding to next form', 'danger')\n return redirect(reverse('products:subproduct_add_case'))\n else:\n session['sub_products'] = []\n\n if request.GET.get('clear_filter'):\n if session.get('adding_filter'):\n del session['adding_filter']\n if session.get('adding_filter'):\n filter_data = session['adding_filter']\n else:\n filterform = FilterForm()\n filterform.initial['pallet'] = False\n if package_level.id >= PackageLevel.CASE:\n filterform.initial['case'] = False\n if package_level.id >= PackageLevel.PACK:\n filterform.initial['pack'] = False\n\n # products = ProductFilter(filter_data, queryset=products).qs\n filterform.set_prefixes(prefixes)\n\n if products:\n paginator = Paginator(products, per_page)\n try:\n paginator_page = paginator.page(page)\n except InvalidPage:\n paginator_page = paginator.page(1)\n object_list = paginator_page.object_list\n else:\n paginator_page = None\n object_list = None\n\n assoc_products = get_assoc_products_by_products_list(object_list)\n\n filterform.fields['target_market'].choices = target_market_choices\n filterform.base_fields['target_market'].choices = target_market_choices\n filterform.declared_fields['target_market'].choices = target_market_choices\n context = {\n 'products': object_list,\n 'assoc_products': assoc_products,\n 'prefix': prefix,\n 'form': form,\n 'filterform': filterform,\n 'pagination': paginator_page,\n 'per_page': per_page,\n 'ppp': settings_per_page,\n 'enable_leading': True # user.profile.enable_leading\n }\n\n return render(request, 'products/subproduct_add_case.html', context=context)", "def GetCart(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['density'] = response.css('#description strong::text').get()\n item['available'] = not response.css('.msgSoldOut')\n\n\n for selector in response.css('.fpBktParam'):\n item['raw_string'] = selector.css('span::text').get()\n item['price'] = selector.css('div::text').getall()[1]\n yield item", "def __init__(self):\n\n self.session = requests.session()\n self.current_user_agent_index = 0\n self.headers = {\n 'Host': 'www.amazon.com',\n 'User-Agent': _USER_AGENT_LIST[0],\n 'Accept': 'text/html,application/xhtml+xml,\\\n application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n }\n self.product_dict_list = []", "def products(self):\n response = requests.get(self._url(self._PRODUCTS_PATH), headers=self._headers)\n return response.json()", "def page1(self):\n result = request101.GET('/Cars_Sample_App/supercars.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ))\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'manu'\n # 15 different values for token_mid found in response, using the first one.\n self.token_mid = \\\n httpUtilities.valueFromBodyURI('mid') # '3'\n\n grinder.sleep(124)\n request102.GET('/Cars_Sample_App/images/enquire_but.gif')\n\n request103.GET('/Cars_Sample_App/images/line.gif')\n\n request104.GET('/Cars_Sample_App/images/manufacturers/Bmw.gif')\n\n request105.GET('/Cars_Sample_App/images/manufacturers/AstonMartin.gif')\n\n request106.GET('/Cars_Sample_App/images/manufacturers/Ferrari.gif')\n\n request107.GET('/Cars_Sample_App/images/insurance_but.gif')\n\n grinder.sleep(90)\n request108.GET('/Cars_Sample_App/images/manufacturers/Porsche.gif')\n\n request109.GET('/Cars_Sample_App/images/manufacturers/Jaguar.gif')\n\n request110.GET('/Cars_Sample_App/images/pipe.gif')\n\n request111.GET('/Cars_Sample_App/images/manufacturers/Lotus.gif')\n\n return result", "def products():\n\n\treturn render_template(\"products.html\")", "def get(self, request, product_id=None, page_no=None):\n if product_id is None:\n return Response(\"product_id cannot be null\", status=status.HTTP_400_BAD_REQUEST)\n \n if page_no is None:\n page_no = 1\n if page_no <= 0:\n return Response(\"Page Number must be >= 1\", status=status.HTTP_400_BAD_REQUEST)\n\n queryset = Qanda.objects.using('scraped').filter(product_id=product_id)\n if queryset.count() == 0:\n return Response(f\"No QandA exists for this product - {product_id}\", status=status.HTTP_404_NOT_FOUND)\n \n ITEMS_PER_PAGE = 10\n queryset = queryset[(page_no - 1) * ITEMS_PER_PAGE : (page_no) * ITEMS_PER_PAGE]\n \n serializer = QandASerializer(queryset, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def sync_product_with_gmc(self, products):\n def get_names(cat):\n \"\"\" Return the list [cat.name, cat.parent_id.name, ...] \"\"\"\n res = []\n while cat:\n res.append(cat.name)\n cat = cat.parent_id\n return res\n\n currency = self.env.user.company_id.currency_id.name\n\n service = self.gmc_flow()\n\n count = 1\n for product in products:\n if product.google_mcid:\n offerId = product.google_mcid\n product.write({'google_sync_date': fields.Date.today()})\n else:\n offerId = 'CM%s' % get_unique_id()\n product.write({'google_mcid': offerId, 'google_sync_date': fields.Date.today()})\n #Display ads id\n if product.google_display_ads_id:\n displayAdsId = product.google_display_ads_id\n else:\n displayAdsId = 'ADS%s' % get_unique_id()\n product.write({'google_display_ads_id': displayAdsId})\n product_data = {\n 'offerId': offerId,\n 'displayAdsId': displayAdsId,\n 'title': product.name,\n 'description': product.description_sale,\n #Use product template url as variants are not shown sepratly.\n 'link': product.google_merchant_center_id.website + \"/shop/product/%s\" % (product.product_tmpl_id.id,),\n 'imageLink': product.google_merchant_center_id.website + '/web/image/%s/%s/%s/image.jpg' % ('product.template', product.product_tmpl_id.id, 'image'),\n #Note: Instead of passing website url passsed backend URl because Store not accept image without type\n 'contentLanguage': product.google_content_language,\n 'targetCountry': product.google_target_country,\n 'channel': product.google_channel,\n 'availability': product.google_availability,\n 'condition': product.google_condition,\n 'googleProductCategory': \" > \".join(reversed(get_names(product.google_product_category_id))),\n 'productType': \" > \".join(reversed(get_names(product.categ_id))),\n 'brand': product.google_product_brand_id and product.google_product_brand_id.name or '',\n 'price': {\n 'value': product.list_price,\n 'currency': currency},\n 'shipping': [{\n 'country': product.google_target_country,\n 'service': product.google_shipping,\n 'price': {'value': product.google_shipping_amount,\n 'currency': currency}\n }],\n 'taxes': [\n {\n 'rate': product.google_tax_rate,\n 'country': product.google_target_country,\n }],\n 'shippingWeight': {\n 'value': product.weight * 1000, \n 'unit': 'grams'\n },\n }\n\n #Check if identifierExists than only add mpn\n if product.google_identifier_exists:\n product_data.update({'mpn': product.default_code})\n if not product.google_barcode_as_gtin and product.google_gtin:\n product_data.update({'gtin': product.google_gtin})\n elif product.google_barcode_as_gtin and product.barcode:\n product_data.update({'gtin': product.barcode})\n else:\n product_data.update({'identifierExists': 'no'})\n\n #add some optional attributes\n if product.google_gender:\n product_data.update({'gender': product.google_gender})\n if product.google_age_group:\n product_data.update({'ageGroup': product.google_age_group})\n if product.google_product_size_id:\n product_data.update({'sizes': [product.google_product_size_id and product.google_product_size_id.name or '']})\n if product.google_product_color_id:\n product_data.update({'color': product.google_product_color_id and product.google_product_color_id.name or '',})\n if product.google_expiration_date:\n #pass date in perticular formate\n expiration_date = product.google_expiration_date.strftime('%Y-%m-%d')\n product_data.update({'expirationDate': expiration_date})\n\n #Optionla Attributes for Remarketing\n if product.google_display_ads_similar_ids:\n product_data.update({'displayAdsSimilarIds': [prod.google_display_ads_id for prod in product.google_display_ads_similar_ids]})\n if product.google_display_ads_title:\n product_data.update({'displayAdsTitle': product.google_display_ads_title})\n if product.google_display_ads_link:\n product_data.update({'displayAdsLink': product.google_display_ads_link})\n if product.google_display_ads_value:\n product_data.update({'displayAdsValue': product.google_display_ads_value})\n if product.google_excluded_destination:\n product_data.update({'destinations': {\n 'destinationName': 'DisplayAds', \n 'intention': 'excluded'}\n })\n\n # Add product.\n request = service.products().insert(merchantId=product.google_merchant_center_id.name, body=product_data)\n\n try:\n result = request.execute()\n _logger.info('Count: %s------- Product: %s', count, product)\n count += 1\n self.env.cr.commit()\n except errors.HttpError as e:\n error = simplejson.loads(e.content.decode('utf-8'))\n raise UserError(_(\"%s when syncronizing %s.\") % (error['error'].get('message'),product.name))", "def sli_get(obj, product_name, name):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n slis = client.sli_list(product[0], name=name)\n if not slis:\n fatal_error('SLI {} does not exist'.format(name))\n\n print(json.dumps(slis[0], indent=4))", "def enumerate_products(self, dataframe, context_col, fragment_col):\n # create dict\n self.enumerated_products_smi = {}\n for (ctx, frag) in zip(dataframe[context_col].tolist(), dataframe[fragment_col].tolist()):\n self.enumerated_products_smi[(ctx, frag)] = None\n\n # function to return value from this dict:\n def get_product(row):\n return self.enumerated_products_smi[(row[context_col], row[fragment_col])]\n\n # function to return value for novelty\n def get_novelty(row):\n if row['ENUMERATED_PRODUCT'] in self.mol_smi_dict:\n return self.mol_smi_dict[row['ENUMERATED_PRODUCT']]\n else:\n return True\n\n # write rxn files\n enum_object = enum_mols.MMPEnumerateNewMols(self.logger)\n enum_object.write_rxn_files()\n enum_object.write_reactants_simple_dict(self.enumerated_products_smi)\n enum_object.do_reactions()\n\n for cut_type, rtn_ctx, rtn_frag, new_mol in enum_object.yield_products_simple_dict_input():\n if (rtn_ctx, rtn_frag) in self.enumerated_products_smi:\n self.enumerated_products_smi[(rtn_ctx, rtn_frag)] = new_mol\n else:\n self.logger.debug(\"got a return I was not expecting: %s, %s -> %s\" % (rtn_ctx, rtn_frag, new_mol))\n\n # standardise\n temp_smifi = tempfile.NamedTemporaryFile(suffix=\".smi\", delete=False, encoding='utf-8', mode='wt')\n std_smi_lookup = {}\n # take the product smi and store as product_smi => None\n arbitary_id = 0\n for smi_no_std in list(self.enumerated_products_smi.values()):\n arbitary_id += 1\n std_smi_lookup[smi_no_std] = arbitary_id\n std_smi_lookup[arbitary_id] = smi_no_std\n temp_smifi.write(smi_no_std + \" \" + str(arbitary_id) + \"\\n\")\n temp_smifi.close()\n\n #\n # send in a dict of old_smi => old_smi\n # should turn this into old_smi => new_smi\n self.logger.debug(\"Requested standardisation on %s\" % temp_smifi.name)\n new_smi_dict = self.generate_std_smiles(temp_smifi.name, smi_id_map='id_smi')\n std_smi_lookup.update(new_smi_dict)\n\n #\n for key, value in list(self.enumerated_products_smi.items()):\n # print value, \" >> \", std_smi_lookup[std_smi_lookup[value]]\n self.enumerated_products_smi[key] = std_smi_lookup[std_smi_lookup[value]]\n\n # add the return enumerated mols to the df\n dataframe['ENUMERATED_PRODUCT'] = dataframe.apply(lambda row: get_product(row), axis=1)\n dataframe['NOVEL'] = dataframe.apply(lambda row: get_novelty(row), axis=1)\n #\n del enum_object\n #\n return dataframe", "def parse(self, response):\n product_urls = response.css(\n '.product-li .product-image a::attr(href)'\n ).getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n\n next_page_number = 2\n if '?' in response.url:\n return\n while next_page_number < 37:\n # import logging\n # logging.log(logging.WARNING, f\"This is a warning {len(product_urls)} : {product_urls[0]}\")\n next_page_url = f'{response.url}?p={next_page_number}'\n yield scrapy.Request(response.urljoin(next_page_url))\n next_page_number += 1", "def get_all_product():\r\n\r\n with mysql.db_session() as session:\r\n product = session.query(Product).all()\r\n\r\n if not product:\r\n return response.create_not_found_response()\r\n\r\n response_data = [each.to_dict() for each in product]\r\n\r\n return response.Response(message=response_data)", "def product_list(request):\n error = {\n 'status': False,\n 'name': None,\n 'text': None,\n 'level': None,\n 'debug': None\n }\n\n limit, error = get_limit(request, error)\n\n serializer = FreshSerializer()\n queryset = Product.objects.all()[:limit]\n\n if not queryset:\n error = {\n \"status\": True,\n \"name\": \"No Products\",\n \"text\": \"No Products found\",\n \"level\": \"Information\",\n \"debug\": \"\"\n }\n\n data = {\n \"products\": json.loads(serializer.serialize(queryset)),\n \"error\": error\n }\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")", "def request_pack_stock(proxy, headers):\n query = \"\"\"\n query SearchPackListings($input: SearchPackListingsInput!) {\n searchPackListings(input: $input) {\n data {\n searchSummary {\n data {\n ... on PackListings {\n data {\n id\n price\n title\n remaining\n totalPackCount\n expiryDate\n preorder\n images {\n type\n url\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n }\n \"\"\"\n variables = {'input': {'searchInput': {'pagination': {'cursor': \"\", 'direction': \"RIGHT\", 'limit': '100'}}}}\n\n url = 'https://api.nbatopshot.com/marketplace/graphql?SearchPackListings'\n r = requests.post(url, json={'query': query, 'variables': variables})\n packs = json.loads(r.text)\n #df_data = packs['data']['searchPackListings']['data']['searchSummary']['data']['data']\n #df = pd.DataFrame(df_data)\n return packs", "def FetchLicense(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def request(self, http_method, url, payload=None, querystring=None,\r\n all_pages=None):\r\n\r\n try:\r\n\r\n response = self.fetch_response(\r\n http_method, url, payload=payload, querystring=querystring)\r\n try:\r\n if self.is_valid_response(response):\r\n response_json = None\r\n if response.status_code != 204:\r\n if response.status_code == 201 and response.content == b'':\r\n pass\r\n else:\r\n response_json = response.json()\r\n # check 'all_pages' required, response received is\r\n # partial(code 206) and contains info about total size of\r\n # the collection\r\n content_range = response.headers.get('content-range')\r\n if all_pages and response.status_code == 206 and\\\r\n content_range:\r\n # 'content-range': '0-99/789'\r\n total_size = self.get_total_size_from_content_range(\r\n content_range)\r\n myranges = [\r\n \"{0}-{1}\".format(i, i + constants.MAX_LIMIT)\r\n for i in range(constants.OFFSET, total_size,\r\n constants.MAX_LIMIT)]\r\n for myrange in myranges:\r\n response = self.fetch_response(\r\n http_method, url, payload=payload,\r\n querystring=querystring, myrange=myrange)\r\n if self.is_valid_response(response):\r\n response_json.extend(response.json())\r\n else:\r\n self.raise_http_exception(response)\r\n\r\n return response_json\r\n else:\r\n self.raise_http_exception(response)\r\n\r\n except ValueError as ex:\r\n # its low-level or response level error caused by\r\n # response.json() and not in requests.exceptions\r\n error_msg = \"ValueError: '{0}' for Method: '{1}' URL: '{2}'\"\\\r\n \" PayLoad: '{3}' QueryString: '{4}'\".format(\r\n str(ex), http_method, url, payload, querystring)\r\n LOG.error(error_msg)\r\n raise PowerStoreException(PowerStoreException.VALUE_ERROR,\r\n error_msg)\r\n except socket.error as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(PowerStoreException.SOCKET_ERR,\r\n str(exception))\r\n except SSLError as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(PowerStoreException.SSL_ERROR,\r\n str(exception))\r\n except ConnectionError as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(PowerStoreException.CONNECTION_ERROR,\r\n str(exception))\r\n except TooManyRedirects as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(\r\n PowerStoreException.TOO_MANY_REDIRECTS_ERROR, str(exception))\r\n except Timeout as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(PowerStoreException.TIMEOUT_ERROR,\r\n str(exception))", "def get_product_details(product_url: str) -> dict:\n def get_available_sizes(postID, sizeStr, product_mainID):\n \"\"\"\n List size haye mojood va tamoom shode ro ba API mide\n POST: https://store.lining.com/ajax/goods_details.htm\n \"\"\"\n api_url = 'https://store.lining.com/ajax/goods_details.html'\n data = {\n 'postID': postID,\n 'sizeStr': sizeStr,\n 'product_mainID': product_mainID\n }\n r = get_json(api_url, data=data)\n onsale_sizes = r['data']['onSale']\n logging.debug('Onsale Sizes: ' + repr(onsale_sizes))\n return onsale_sizes\n\n def get_pid_from_url(url):\n \"\"\" ID mahsool ro az URL darmiare \"\"\"\n return re.findall(r'store.lining.com/shop/goods-(\\w+).html\\w*', url)[0]\n\n def translate_keyword(keyword):\n \"\"\" tarjome key marboot be desctioption \"\"\"\n define = {\n '运动类型': 'Sport Type',\n '性别': 'Sex',\n '颜色': 'Color',\n '鞋透气指数': 'Shoes breathability index',\n '鞋软硬指数': 'Shoe soft and hard index',\n }\n if keyword in define:\n return define[keyword]\n else:\n return keyword\n ###########################################################\n\n details = dict()\n soup = get_soup(product_url)\n\n # product ID\n pid = get_pid_from_url(product_url)\n logging.debug('PID: ' + pid)\n details['pid'] = pid\n\n # product name\n name = soup.find('h1', {'id': 'product_name'}).text.strip()\n logging.debug('Name: ' + name)\n details['name'] = name\n\n # part number\n sku = soup.find('span', {'id': 'partNumber'}).find('span', {'class': 'v'}).text.strip()\n part_number = sku[0:sku.find('-')]\n logging.debug('Part Number: ' + part_number)\n details['sku'] = sku\n details['part_number'] = part_number\n\n # price\n price = soup.find('span', {'id': 'listPrice'}).find('span', {'class': 'v'}).text.strip().replace('¥', '')\n price_offer = soup.find('span', {'id': 'offerPrice'}).find('span', {'class': 'v'}).text.strip().replace('¥', '')\n logging.debug('Price: %s [offer]-> %s' % (price, price_offer))\n details['price'] = price\n details['price_offer'] = price_offer\n\n # all sizes\n all_sizes = list()\n for tag in soup.find('div', {'id': 'sizelist'}).find_all('div', 'size-layer'):\n tag = tag.find('input')\n # all_size -> [(id, size, status), ...]\n all_sizes.append(\n (\n tag.get('id').replace('size_list_', ''),\n tag.get('value'),\n None,\n )\n )\n available_sizes = get_available_sizes(\n postID=pid,\n product_mainID=part_number,\n # first element of all_sizes list\n # all_size -> [(id, size, status), ...]\n sizeStr=','.join([s[0] for s in all_sizes]),\n )\n # update all sizes status\n for i in range(len(all_sizes)):\n if all_sizes[i][1] in available_sizes:\n all_sizes[i] = (\n all_sizes[i][0],\n all_sizes[i][1],\n 'onsale',\n )\n else:\n all_sizes[i] = (\n all_sizes[i][0],\n all_sizes[i][1],\n 'stockout',\n )\n logging.debug('All Sizes: %s' % repr(all_sizes))\n details['all_sizes'] = all_sizes\n\n # description images\n description_images = list()\n desc = soup.find('div', {'id': 'PD_desc_picture'})\n for img in desc.find_all('img'):\n img = img.get('orginalsrc')\n logging.debug('description_images[]: ' + img)\n description_images.append(img)\n details['description_images'] = description_images\n\n # description key/value\n description = dict()\n for li in soup.find('ul', {'id': 'p_spec'}).find_all('li'):\n key = li.find('span', {'class': 't'}).text.replace(':', '').strip()\n key = translate_keyword(key)\n value = li.find('span', {'class': 'v'}).text.strip()\n description[key] = value\n logging.debug('%s -> %s' % (key, value))\n details['description'] = description\n\n # slider images\n slider_images = list()\n for li in soup.find('div', {'class': 'box'}).find_all('li'):\n img = li.find('img').get('big')\n logging.debug('slider_images[]: ' + img)\n slider_images.append(img)\n details['slider_images'] = slider_images\n\n # related products\n related_products_id = list()\n for li in soup.find('div', {'id': 'f_litimg'}).find_all('li'):\n url = li.find('a').get('href')\n url = 'store.lining.com' + url\n pid = get_pid_from_url(url)\n logging.debug('related_products_id[]: %s -> %s' % (pid, url))\n related_products_id.append(pid)\n details['related_products_id'] = related_products_id\n\n return details", "def request(cls, mws_access_key, mws_secret_key, mws_account_id,\n mws_marketplace_id, id_type=None, ids=(), mws_auth_token=None):\n products_api = mws.Products(mws_access_key, mws_secret_key, mws_account_id, auth_token=mws_auth_token)\n response = products_api.get_matching_product_for_id(mws_marketplace_id, id_type, ids)\n return cls.load(response.original)", "def products():\n try:\n return jsonify(get_product_caching_service().jsonofied_product_map)\n except Exception as exception:\n return jsonify({'Something went wrong: ': exception})", "def test_get_product_list(self):\n self._create_products(5)\n resp = self.app.get(\"/products\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 5)", "def get(self, request, page_no=None):\n queryset = Productlisting.objects.using('scraped').all()\n if page_no is None:\n page_no = 1\n if page_no <= 0:\n return Response(\"Page Number must be >= 1\", status=status.HTTP_400_BAD_REQUEST)\n \n ITEMS_PER_PAGE = 10\n queryset = queryset[(page_no - 1) * ITEMS_PER_PAGE : (page_no) * ITEMS_PER_PAGE]\n \n serializer = ProductListingSerializer(queryset, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def products_for_influencers(parameters, page=0, page_size=50):\n product_ids, _, total_hits = elastic_search_helpers.es_product_query_runner_v2(\n parameters, page, page_size)\n\n num_pages = int(math.ceil(float(total_hits) / page_size))\n\n return product_ids, total_hits, num_pages", "def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})", "def go_product_page(self, driver, product_id, website):\n link = self.product_url(website, product_id)\n self.go_and_assert(driver, link, website)", "def start(self, module=None, app=None, delay=None):\n with (app or flask.current_app).test_request_context():\n path = self.url(delay=delay)\n\n url = 'https://{module}-dot-{hostname}{path}'.format(\n module=module or self.module_name,\n hostname=app_identity.get_default_version_hostname(),\n path=path)\n\n urlfetch.fetch(url)", "async def limited_req(path, session, semaphore, **kwargs):\n async with semaphore:\n async with session.get('https://iceprod2-api.icecube.wisc.edu'+path, params=kwargs) as response:\n return await response.json()", "def get_download_info_API(request):\n update_metrics(request)\n\n session_id = request.session.session_key\n\n product_types = request.GET.get('types', 'none')\n product_types = product_types.split(',')\n\n previews = request.GET.get('previews', 'none')\n previews = previews.split(',')\n\n # since we are assuming this is coming from user interaction\n # if no filters exist then none of this product type is wanted\n if product_types == ['none'] and previews == ['none']:\n # ie this happens when all product types are unchecked in the interface\n return HttpResponse(json.dumps({'size':'0', 'count':'0'}), content_type='application/json')\n\n if previews == ['all']:\n previews = [i[0] for i in settings.image_sizes]\n\n # now get the files and download size / count for this cart\n urls = []\n from results.views import *\n files = getFiles(collection=True, session_id=session_id, fmt=\"raw\", loc_type=\"url\", product_types=product_types, previews=previews)\n download_size, count = get_download_info(files)\n\n # make pretty size string\n download_size = nice_file_size(download_size)\n\n return HttpResponse(json.dumps({'size':download_size, 'count':count}), content_type='application/json')", "def get_filled(product='all', limit=100):\n\n params = {\n 'limit': limit,\n 'product_id': product,\n }\n\n log.info('getting all FILLED ORDERS')\n\n resp = httpapi.get(\n common.api_url + 'fills',\n params=params,\n auth=common.auth,\n )\n\n return resp.json(), resp", "def render_get(self, request):\n try:\n remote = request.remote[0]\n except TypeError:\n remote = request.remote.sockaddr[0]\n\n logger.debug(\"CoAP GET received from {}\".format(remote))\n\n firmware_binary = self._controller.\\\n get_latest_firmware_binary(self._appid, self._slot)\n\n return Message(code=CONTENT, payload=firmware_binary)", "def get_request(req_context, uri):\n headers = { 'Accept': \"application/json\", \n 'User-Agent': \"testApp\"\n }\n if config.ENVIRONMENT == \"Sandbox\":\n base_url = \"https://sandbox-quickbooks.api.intuit.com/v3/company/\"\n else:\n base_url = \"https://quickbooks.api.intuit.com/v3/company/\"\n url = base_url + req_context.realm_id + uri\n print(url)\n if config.AUTH_TYPE == \"OAuth2\":\n headers['Authorization'] = \"Bearer \" + req_context.access_token\n req = requests.get(url, headers=headers)\n else:\n auth = OAuth1(req_context.consumer_key, req_context.consumer_secret, req_context.access_key, req_context.access_secret)\n req = requests.get(url, auth=auth, headers=headers)\n return req", "def API_request(self, search_term, search_type):\n url = \"\"\n if search_type == \"product\":\n url = self.url_product.format(search_term)\n elif search_type == \"substitute\":\n url = self.url_substitute.format(search_term)\n r = requests.get(url)\n response = r.json()\n return response[\"products\"]", "async def buy(self, product_id: int):\n ee = self.request.request(\n url=f'https://economy.roblox.com/v2/user-products/{product_id}/purchase',\n method='post')\n return ee", "def test_get_rate_plan_by_product(self):\n pass", "def product(request, product_id):\n\n u = request.user\n try:\n p = Product.objects.get(id=product_id)\n request.POST['sku'] = p.sku\n result = item(u, p.sku)\n except Product.DoesNotExist:\n result = {'result':'0'}\n return JSONHttpResponse( result )", "def product_view(request, product):\n product = Products.objects.get(product=product)\n\n context = {\n \"product\": product,\n }\n\n return render(request, \"products/product_detail.html\", context)", "def get_frame(request, tid, frame):\n\n try:\n # Follow symbol links if the frame is a link on a real image otherwise\n # mimetype detection inside sendfile will work incorrectly.\n path = os.path.realpath(task.get_frame_path(tid, frame))\n return sendfile(request, path)\n except Exception as e:\n slogger.task[tid].error(\"cannot get frame #{}\".format(frame), exc_info=True)\n return HttpResponseBadRequest(str(e))", "def one_click(self, adi):\r\n products = self.get_products(adi)\r\n self.sc.stack = [adi]\r\n self.sc.check(products, {\"$ref\" : \"ResponseBankingProductList\"})\r\n for product in products[\"data\"][\"products\"]:\r\n self.sc.stack = [adi, product[\"name\"]]\r\n self.check_product_detail_schema(adi, product[\"productId\"], product[\"name\"])", "def list_products(self):\n return self._make_get_request(self._urls['products'])", "def dispatch_frame(self, frame):", "def parse_PRODUCT(self, response: Union[SplashJsonResponse, ScrapyHttpResponse]) -> None:\n\n if meta_information := response.meta.get(\"meta_data\"):\n meta_information |= response.meta.get(\"request_meta_information\", {})\n else:\n meta_information = response.meta.get(\"request_meta_information\")\n\n meta_information[\"original_URL\"] = response.meta.get(\"original_URL\", None)\n\n scraped_page = ScrapedPage(\n timestamp=self.timestamp,\n source=self.source,\n merchant=self.merchant,\n country=self.country,\n url=response.url,\n html=response.body.decode(\"utf-8\"),\n page_type=PageType.PRODUCT.value,\n category=response.meta.get(\"category\"),\n gender=response.meta.get(\"gender\"),\n consumer_lifestage=response.meta.get(\"consumer_lifestage\"),\n meta_information=meta_information,\n )\n\n self.message_queue.add_scraping(table_name=self.table_name, scraped_page=scraped_page)", "def _getSymbols(self):\n path = '/products'\n\n try:\n r = requests.get(settings['abucoins_exporter']['url'] + path, verify=True) # Doesn't need authentication\n except (\n requests.exceptions.ConnectionError,\n requests.exceptions.ReadTimeout,\n requests.packages.urllib3.exceptions.ReadTimeoutError\n ) as e:\n log.warning(e)\n r = False\n if r and r.status_code == 200:\n for symbol in r.json():\n if symbol['id'] not in self.symbols:\n self.symbols.append(symbol['id'])\n\n log.debug('Found the following symbols: {}'.format(self.symbols))", "async def get_device_info_w_slice(session: ClientSession, graphql_instance: object, **kwargs: object) -> list:\n request_results = []\n sub_request_kwargs = kwargs\n req_num = sub_request_kwargs.pop(\"req_num\", 0)\n\n sub_request_kwargs[\"category\"] = graphql_instance.__class__.__name__\n sub_request_kwargs[\"req_num\"] = f\"{req_num}\"\n\n http_response = await asyncio.create_task(send_request(session, **sub_request_kwargs))\n\n if not http_response:\n cur_devices = sub_request_kwargs[\"json\"][\"variables\"][\"productIds\"]\n if len(cur_devices) > 1:\n parts = ([\n cur_devices[:len(cur_devices) // 2],\n cur_devices[len(cur_devices) // 2:]\n ])\n for i, part in enumerate(parts):\n sub_request_kwargs_copy = copy.deepcopy(sub_request_kwargs)\n sub_request_kwargs_copy[\"json\"][\"variables\"][\"productIds\"] = part\n sub_request_kwargs_copy[\"req_num\"] = f\"{req_num}.{i}\"\n request_results.append(await get_device_info_w_slice(\n session,\n graphql_instance,\n **sub_request_kwargs_copy\n ))\n else:\n request_results.append(http_response)\n return request_results", "def retrieve(self, request, pk=None):\n try:\n order_product = Order_Products.objects.get(pk=pk)\n serializer = Order_Products_Serializer(\n order_product, context={'request': request}\n )\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def test_product_search(self):\n\n flag = \"user\"\n api = \"product.product.add\"\n current_page = 1\n search_info = json.dumps({\n 'name': \"可爱的小蓝牙呀\"\n })\n print('start------------------------>add')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def get_products(classification):\n call = build_call('attr', classification)\n return request_data(call)", "def products_view(request, product_id):\n if not product_id:\n return JsonResponse({\"error\": \"Product id is not provided\"}, 400)\n if request.method == \"GET\":\n response, status_code = get_products(request, product_id)\n if status_code != 200:\n return JsonResponse(response, status=status_code)\n else:\n serialize_data = ProductSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=200, safe=False)\n else:\n response, status_code = update_product(request, product_id)\n if status_code != 204:\n return JsonResponse(response, status=status_code)\n serialize_data = ProductSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=status_code, safe=False)", "def request():\n return face_client.face.detect_with_stream(image=open(\"frame.png\", 'rb'),\n return_face_attributes=[emotion_attribute],\n recognition_model='recognition_02')", "def fetch(self, data: Any, *args: Any, **kwargs: Any):\n product = None\n next_args = (data, *args)\n next_kwargs = kwargs\n for name, method, outlet, description in self.steps:\n product, new_args, next_kwargs = method(*next_args, **next_kwargs)\n next_args = (product, *new_args)\n if isinstance(product, self.outlet):\n return product\n else:\n raise RuntimeError(\"Process was not completed according to specification.\")", "def run(self, bundle_fqids: typing.List[str], bundle_fqids_url: str, format: str, genus_species: str):\n logger.debug(f\"Driver running with parameters: bundle_fqids={bundle_fqids}, \"\n f\"bundle_fqids_url={bundle_fqids_url}, format={format}, \"\n f\"genus_species={genus_species}, \"\n f\"bundles_per_worker={self.bundles_per_worker}\")\n\n # 10/17/19: Bundle UUIDs will be used in favor of FQIDs in v0 in order to loosen\n # the data parity restriction with Data Browser, enabling project availability\n # while DCP components respond to simple updates at different frequencies.\n if bundle_fqids_url:\n response = self._get_bundle_manifest(bundle_fqids_url)\n resolved_bundle_uuids = self._parse_download_manifest(response.text)\n if len(resolved_bundle_uuids) == 0:\n error_msg = \"no bundles found in the supplied bundle manifest\"\n logger.info(error_msg)\n self.request_tracker.log_error(error_msg)\n return\n else:\n resolved_bundle_uuids = [fqid.split(\".\", 1)[0] for fqid in bundle_fqids]\n logger.debug(f\"resolved bundle uuids: {resolved_bundle_uuids}\")\n\n self.dynamo_handler.set_table_field_with_value(DynamoTable.REQUEST_TABLE,\n self.request_id,\n RequestTableField.NUM_BUNDLES,\n len(resolved_bundle_uuids))\n s3_obj_keys = self._format_and_store_queries_in_s3(resolved_bundle_uuids, genus_species)\n\n analysis_table_bundle_count = self._fetch_bundle_count_from_analysis_table(resolved_bundle_uuids)\n if analysis_table_bundle_count != len(resolved_bundle_uuids):\n error_msg = \"resolved bundles in request do not match bundles available in matrix service\"\n logger.info(error_msg)\n self.request_tracker.log_error(error_msg)\n return\n\n for key in s3_obj_keys:\n self._add_request_query_to_sqs(key, s3_obj_keys[key])\n\n self.request_tracker.complete_subtask_execution(Subtask.DRIVER)", "async def request(self, multiplier: Optional[int]=None):\n # TODO: validate the multiplier\n message = Message(self.name_path, multiplier)\n await self.issue_command(Command(message))", "def api_all():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tall_products = cur.execute('SELECT * FROM products WHERE inventory_count>0;').fetchall()\r\n\treturn jsonify(all_products)", "def openProduct(self, product_path):\r\n\r\n product = None\r\n variables = self.readVariables(product)\r\n attributes = self.readAttributes(product)\r\n return product, variables, attributes", "def pull(self, where=\"\", parameters={}, verify_hash=False):\n queue = self.search(where=where, parameters=parameters)\n for product in queue:\n if 'archive_path' in product.core:\n raise Error(\"cannot pull local products\")\n if 'remote_url' not in product.core:\n raise Error(\"cannot pull products that have no remote_url\")\n\n plugin = self.product_type_plugin(product.core.product_type)\n product.core.archive_path = plugin.archive_path(product)\n\n # set archive_path and deactivate while we pull it in\n metadata = {'active': False, 'archive_path': product.core.archive_path}\n self.update_properties(Struct({'core': metadata}), product.core.uuid)\n\n # pull product\n try:\n remote.pull(self, product)\n except:\n # reset active/archive_path values\n metadata = {'active': True, 'archive_path': None}\n self.update_properties(Struct({'core': metadata}), product.core.uuid)\n raise\n\n # reactivate and update size\n size = util.product_size(self._product_path(product))\n metadata = {'active': True, 'archive_date': self._backend.server_time_utc(), 'size': size}\n self.update_properties(Struct({'core': metadata}), product.core.uuid)\n\n # verify product hash.\n if verify_hash and 'hash' in product.core:\n if self.verify_hash(\"uuid == @uuid\", {\"uuid\": product.core.uuid}):\n raise Error(\"pulled product '%s' (%s) has incorrect hash\" %\n (product.core.product_name, product.core.uuid))\n\n # Run the post pull hook (if defined by the product type plug-in).\n if hasattr(plugin, \"post_pull_hook\"):\n plugin.post_pull_hook(self, product)\n\n return len(queue)", "def GetBlock(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _subpage_crawler(self):\n while True:\n try:\n \tfamily = self.product_families.pop()\n except IndexError:\n\t\t\t\tbreak\n\n with closing(urlopen(self.base_url + family)) as product_list_page:\n product_list_soup = BeautifulSoup(product_list_page, 'html.parser')\n product_list = product_list_soup.find_all(self.product_list_tag[\"name\"], self.product_list_tag[\"attrs\"])\n for product in product_list:\n \tproduct_url = product.find(self.product_url_tag[\"name\"]).get('href')\n \tself.product_links[product_url] = family\n \t#print product_url", "def get_influencer_products_v2(influencer_ids,\n parameters,\n page_size,\n db_only=False,\n limit=10):\n from debra.models import ProductModelShelfMap\n\n # updating influencers to list and moving it to parameters for ES query builder\n if type(influencer_ids) != list:\n influencer_ids = [influencer_ids, ]\n\n # storing influencer's id in parameters for query builder\n if influencer_ids:\n parameters['influencer_ids'] = influencer_ids\n\n # Retrieving page number for ES query\n try:\n page = int(parameters.get('page', 1))\n page -= 1\n except TypeError:\n page = 0\n\n if db_only:\n db_params = {}\n highlighted_product_ids, total = [], None\n else:\n # Getting list of post ids from ES depending on search parameters\n product_ids, highlighted_product_ids, total = es_product_query_runner_v2(parameters,\n page,\n page_size,\n highlighted_first=True)\n db_params = dict(product_model__id__in=product_ids,)\n if settings.DEBUG:\n print('* Item IDS: %s' % product_ids)\n print('* Highlighted item IDs: %s' % highlighted_product_ids)\n print('* Total items: %s' % total)\n\n items_from_db = ProductModelShelfMap.objects.filter(\n influencer__id__in=influencer_ids,\n img_url_feed_view__isnull=False,\n **db_params\n ).prefetch_related('product_model') # .order_by('-product_model__insert_date')\n\n # list of ids for ProductModelShelfMap, not for product_model get from ES\n highlighted_items_ids = []\n\n if db_only:\n items = items_from_db[:limit]\n else:\n # sorting posts by list of ids from ES\n items = dict()\n for item in items_from_db:\n items[item.product_model.id] = item\n if item.product_model.id in highlighted_product_ids:\n highlighted_items_ids.append(item.id)\n\n items = [items[product_id] for product_id in product_ids if product_id in items]\n\n items_data = serialize_items_data_v2(items, highlighted_items_ids)\n\n return items_data, len(items_data)", "def get_product_info_from_url(url):\n try:\n # Step 1: Sending a HTTP request to a index_url\n reqs = requests.get(url)\n except Exception as Ex:\n print('report url execption')\n with open('error_log_file.json', 'a') as outfile:\n json.dump({\"exception_text\": str(Ex), \"url\": str(url), \"methode\": \"get_product_info_from_url\"}, outfile)\n outfile.write(\"\\n\")\n return {}\n # Step 2: Parse the html content\n soup = BeautifulSoup(reqs.text, 'lxml')\n product_info = {}\n try:\n # Step 4: Analyze the HTML tag to extract product infos\n\n for tag in soup.find_all('div', attrs={'class': \"row no-padding FicheArticleRspv\"}):\n title=tag.find('div', attrs={'class': \"col-sm-12\"}).find('h1').text\n t= tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-7 description-produit\"})\n marque=t.find('div', attrs={'class': \"art_marque\"}).text\n r = tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-5\"}).find('div', attrs={'class': \"promobri bri_rj\"})\n promo = r.text if r else \"N/A\"\n info = tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-7 description-produit\"}).text\n info = \" \".join(info.split(\"\\n\"))\n price = tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-5\"}).find('div', attrs={'class': \"art_prix\"}).text\n volume_price = tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-5\"}).find('div', attrs={'class': \"art_prix_volume\"}).text\n product_info = dict(Product=marque,Title=title,product_info=info,promo=promo,price=price,volume_price=volume_price)\n except Exception as ex:\n print('report url execption')\n with open('error_log_file.json', 'a') as outfile:\n json.dump({\"exception_text\": str(ex), \"url\": str(url), \"methode\": \"get_product_info_from_url\"}, outfile)\n outfile.write(\"\\n\")\n pass\n return product_info", "async def get_inventory(request: web.Request, ) -> web.Response:\n return web.Response(status=200)", "def flip_details(bsoup, product, header):\n\n container = bsoup.find_all(\"div\", class_=\"_1AtVbE col-12-12\")\n prod_links = []\n base_url = \"https://www.flipkart.com\"\n for i in container[1:]:\n try:\n link = i.find(\"a\")[\"href\"]\n except Exception as e:\n continue\n if link.find(\"pid\") == -1:\n continue\n prod_links.append(base_url+link)\n\n total_results = len(prod_links)*10\n req = requests.get(prod_links[0], headers=header).text\n soup = bs(req, \"lxml\")\n try:\n name = soup.find(\"div\", class_=\"aMaAEs\")\n product_name = name.div.h1.span.text\n except:\n product_name = \" \".join(product).title()\n try:\n rating = name.find(\"div\", class_=\"_3LWZlK\")\n product_rating = rating.text + \" out of 5 stars\"\n except Exception as e:\n product_rating = \"Not Available\"\n try:\n price = name.find(\"div\", class_=\"_30jeq3 _16Jk6d\")\n product_price = price.text + \".00\"\n except Exception as e:\n product_price = \"Sold by Third Party. Please visit the website for more info.\"\n\n return [[\"Product Name\", product_name],\n [\"Product Price\", product_price],\n [\"Product Rating\", product_rating],\n [\"Total Fetched Results\", total_results]]", "def products_list(driver, login_action, open_products_page, products_page, logger):\n try:\n return products_page.all_products_list()\n except logger.on_exception(exception, driver):\n print(exception)", "def get(self):\n\n return {\n 'product': self.request.matchdict['product_name'],\n 'version': self.request.matchdict['product_version'],\n }", "def show_available_products(): # {{{\n products_available = {}\n try:\n with MONGO:\n product_collection = MONGO.connection.assignment_07[\"product\"].find(\n )\n\n for product in product_collection:\n if int(product[\"quantity_available\"]) > 0:\n products_available[product[\"product_id\"]] = {\n \"description\": product[\"description\"],\n \"product_type\": product[\"product_type\"],\n \"quantity_available\": product[\"quantity_available\"],\n }\n except TypeError as excep:\n LOGGER.warning(\"Error looking up available products\")\n LOGGER.warning(excep)\n else:\n if not products_available:\n LOGGER.info('No products found')\n else:\n LOGGER.info(\"Available products retrieved successfully.\")\n return products_available # }}}" ]
[ "0.62623674", "0.61581105", "0.55546266", "0.55507904", "0.5484503", "0.5441134", "0.5407963", "0.5333419", "0.5302095", "0.5175288", "0.5174018", "0.5140395", "0.5135881", "0.51053953", "0.5032891", "0.50136805", "0.50059825", "0.49878338", "0.4946766", "0.49369532", "0.49003386", "0.48848337", "0.48820823", "0.48459545", "0.4839109", "0.48346123", "0.48236543", "0.48001984", "0.47829416", "0.47597197", "0.4758047", "0.47391766", "0.4714977", "0.47119173", "0.47037905", "0.47025117", "0.47023964", "0.4698723", "0.46787244", "0.46710077", "0.46707305", "0.46679646", "0.46610105", "0.4658855", "0.4654967", "0.46430653", "0.46300435", "0.46298474", "0.4628519", "0.462553", "0.46166253", "0.4614536", "0.45983112", "0.45955545", "0.45890626", "0.45874503", "0.4585168", "0.4584029", "0.4582883", "0.45753312", "0.45747715", "0.45746574", "0.45709953", "0.45694825", "0.4560708", "0.4559543", "0.45580828", "0.45538226", "0.45518896", "0.4550101", "0.4545891", "0.4543381", "0.45425108", "0.45367125", "0.45298156", "0.45103788", "0.45098588", "0.45017943", "0.4499601", "0.44993377", "0.4492562", "0.44899046", "0.44868585", "0.44820592", "0.44809985", "0.4479813", "0.44778085", "0.44748777", "0.44701222", "0.44698566", "0.44693804", "0.44659442", "0.4460914", "0.44602978", "0.44591177", "0.44554776", "0.44551605", "0.44524205", "0.44499558", "0.44465783" ]
0.5573586
2
Requests frames for a product.
def find(cls, forecast_id, start=None, end=None, limit=None, sort=None, reruns=None, **kwargs): return super(ForecastFrame, cls).find(forecast_id=forecast_id, start=start, end=end, limit=limit, sort=sort, reruns=reruns, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _request_frame(self):\n self._send_command('GET_FRAME')", "def GetProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def find(cls, product_id, start=None, end=None, limit=None, sort=None, reruns=None, **kwargs):\n return super(ProductFrame, cls).find(product_id=product_id, start=start, end=end,\n limit=limit, sort=sort, reruns=reruns, **kwargs)", "def products(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/products'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def ListProducts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getProducts():\n return \"http://api.tcgplayer.com/catalog/products\"", "def all_frames(request):\n frames = Product.objects.filter(department=\"2\")\n\n context = {\n 'frames': frames,\n }\n\n return render(request, 'products/frames.html', context)", "def get_product_web_page(product = None):\n \n products_list = get_product_list()\n modis_url_dict = {prod: '{}v006'.format(prod.lower()) for prod in \n products_list if prod[0] == 'M'}\n viirs_url_dict = {prod: '{}v001'.format(prod.lower()) \n for prod in products_list if prod[:3] == 'VNP'}\n modis_url_dict.update(viirs_url_dict)\n base_addr = ('https://lpdaac.usgs.gov/products/{0}')\n if product is None or not product in modis_url_dict.keys():\n print 'Product not found... redirecting to data discovery page'\n addr = ('https://lpdaac.usgs.gov')\n else:\n addr = base_addr.format(modis_url_dict[product])\n webbrowser.open(addr)", "def BuyProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def WatchProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def processProductsRequest(self):\n\n\t\t# Use the simple page renderer to create the body content\n\t\treturn self.render_simple_page('Products')", "def test_products_get(mocker, data):\n mocker.patch(\"sps.request.fetch\", autospec=True)\n request.fetch.return_value = data\n assert products.get(None, \"fake-file-name\", False, False)\n request.fetch.assert_called_with(\"https://scc.suse.com/api/package_search/products\")", "def stream(self) -> None:\n\n # Get the current frame and scan it for barcodes.\n products: Any # TODO: actual type\n (frame, products) = self.barcode_scanner.get_and_scan_current_frame()\n\n # Display frame.\n self.image.update_image(frame)\n\n if not products:\n # Continue streaming video.\n self.image.after(self.stream_delay, self.stream)\n self.confirm_button.config(state=tk.DISABLED)\n else:\n self.confirm_button.config(state=tk.NORMAL)\n\n # Show product information in GUI.\n for product in products:\n self.barcode_text.set(product.barcode)\n\n if type(product) == prdct.OFFProduct:\n self.brand_text.set(product.brands)\n self.name_text.set(product.name)\n else:\n self.name_text.set(\"Product not found.\")", "def openproducts(self):\n\n print \"Open products\"\n self.combo_product_list.setEnabled(True)\n frame=self.combo_area_list.currentText()\n self.combo_product_list.clear()\n self.combo_dataset_list.clear()\n self.combo_variable_list.clear()\n print str(frame)\n list_glo=[]\n if str(frame) == \"GLOBAL\":\n for key in self.dict_prod.keys():\n if str(frame) in key :\n list_glo.append(str(key))\n ind=0\n #print \"Frame %s \" %(frame)\n for key in self.dict_prod.keys():\n if str(frame) == \"BAL\":\n frame1=\"_BAL_\"\n frame2=\"-BAL-\"\n if frame1 in key or frame2 in key :\n self.combo_product_list.addItem(str(key))\n elif str(frame) == \"NWS\":\n frame1=\"NORTHWESTSHELF_\"\n frame2=\"NWS\"\n if frame1 in key or frame2 in key :\n self.combo_product_list.addItem(str(key))\n elif str(frame) == \"GLOBAL\":\n if str(frame) in key :\n if ind == 0 :\n self.combo_product_list.addItem(list_glo[5])\n elif ind == 5 : \n self.combo_product_list.addItem(list_glo[0])\n else : \n self.combo_product_list.addItem(list_glo[ind])\n ind+=1\n else :\n if str(frame) in key :\n self.combo_product_list.addItem(str(key))\n self.combo_dataset_list.setEnabled(True)", "def product_card(driver, open_login_page):\n return ProductPage(driver)", "def get_product(name):\n\n products = get_products({'producer': name})\n\n return response(\n 'Successfully retreived all the products for company ' + name,\n 200,\n { 'data':\n { 'products': products }\n }\n )", "def get_product_info(self, product):\n\n product_link = self.url + product.a['href']\n product_page = self.get_response(product_link)\n product_soup = BeautifulSoup(product_page.content, 'html.parser')\n\n # get product details\n product_brand = product_soup.find('h2').text.strip()\n product_name = product_soup.find('h1').text.strip()\n\n product_details = product_soup.find('div', id='z-pdp-detailsSection')\n\n product_attributes = []\n for detail_section in product_details.find_all('div', class_='h-container h-flex-no-shrink h-tabs__panel h-align-left'):\n for tag in detail_section.find_all('p'):\n product_attributes.append(tag.text.strip())\n\n # get product image\n product_img_thumbs = product_soup.find('div', id='z-pdp-topSection')\n product_img_thumbs = product_img_thumbs.find(\n 'div', class_='h-container h-carousel h-carousel-thumbnail vertical h-align-left')\n\n img_links = []\n product_img_link = ''\n for img_thumb in product_img_thumbs.find_all('picture'):\n img_link = img_thumb.find('img')['src'].replace('thumb', 'zoom')\n if 'packshot' in img_link:\n product_img_link = img_link\n else:\n img_links.append(img_link)\n\n # product_img_link = 'https:' + product_img.split('\"')[1].split('?')[0]\n product_img_id = product_img_link.split('/')[-1].split('@')[0]\n\n return {'name': product_name,\n 'brand': product_brand,\n 'id': product_img_id,\n 'img_url': product_img_link,\n 'model_img_urls': ', '.join(img_links),\n 'attributes': ', '.join(product_attributes)}", "def apiquery(self, product_url, params={}):\n requesturl = self.config['host'] + product_url\n timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n signature = hmac.new(self.config['key'],\n ''.join([self.config['username'], timestamp, product_url]),\n digestmod=hashlib.sha1).hexdigest()\n params['timestamp'] = timestamp\n params['signature'] = signature\n params['api_username'] = self.config['username']\n if 'format' not in params.keys():\n params['format'] = self.config['dataformat']\n req = requests.get(requesturl, params=params)\n if req.status_code != requests.codes.ok:\n try:\n json_response = req.json()\n self.raise_best_exception(json_response)\n except KeyError:\n raise UnexpectedError(req.status_code, req.text)\n return req", "def on_get(self, req, resp):\n sample = load_data()\n\n print(sample.base.describe())\n resp.status = falcon.HTTP_200\n\n frame_str = str(sample.base.describe()) + \"\\n\"\n resp.body = frame_str\n # resp.body = sample.base.describe()", "def printProductsFromServer():\n server_response = getRequest(getProducts())\n\n print('SERVER RESPONSE')\n # pprint(server_response) # prints the JSON from the server\n\n # Print out all the products and corresponding IDs\n for item in server_response[\"results\"]:\n id = item[\"productId\"]\n name = item[\"productName\"]\n print('Product Name' , name , \" | Product ID: \", id)", "async def run(product_ids):\n url = \"http://challenge-api.luizalabs.com/api/product/{}\"\n tasks = []\n\n # Fetch all responses within one Client session,\n # keep connection alive for all requests.\n async with ClientSession() as session:\n for product_id in product_ids:\n task = asyncio.ensure_future(utils.fetch(url.format(product_id), session))\n tasks.append(task)\n\n self.responses = await asyncio.gather(*tasks)", "def products():\n username = session['username']\n api_key = session['api_key']\n url = 'https://consumernotebook.com/api/v1/products/?username={0}&apikey={1}'.format(username, api_key)\n r = requests.get(url)\n products = []\n if r.status_code != 200:\n error = \"{0} error. Are you sure you entered a valid API key?\".format(r.status_code)\n return render_template('products.html', error=error)\n else:\n products_json = json.loads(r.content)\n for product in products_json[u'objects']:\n products.append(product[u'title'])\n return render_template('products.html', products=products)", "def api_id():\n if 'productName' in request.args:\n productName = request.args['productName']\n if utils.specialCharCheck(productName):\n return jsonify({ \"status\": \"200\",\"data\" : \"Special Character is not allowed in the search!\" })\n else:\n return jsonify({ \"status\": \"200\",\"data\" : \"Please specify product name\" })\n\n itemList = []\n try :\n # below path needs to come from the configuration file\n path='E:/Project/Bar Raiser/11 July - Scrapping Hathagon/chromedriver.exe'\n url='https://www.amazon.in/s?k='+productName\n browser= utils.getDriverInfo(path)\n htmlSourceSoup=utils.getHtmlSource(url, browser)\n linkDiv = htmlSourceSoup.find_all('div', {'class' : 'sg-col-inner'})\n itemList=utils.getJsonFromHtml(linkDiv)\n\n if not itemList:\n result = {\n \"status\" : \"200\",\n \"productList\" : \"Product not found!\"\n }\n else:\n result = { \n \"status\" : \"200\",\n \"productList\" : itemList\n }\n\n return jsonify(result)\n except Exception as ex:\n return jsonify({ \"status\": \"500\",\"data\" : \"Server error while processing the request\", \"error\":ex })", "def get_products_from_page(url):\n\n def get_data_from_book(book):\n \"\"\"Return data from one book.\"\"\"\n src_img = book.find(\"img\").get(\"src\")\n src_img = src_img.replace(\"../\", \"\")\n image = \"http://books.toscrape.com/\" + src_img\n\n in_stock = False\n in_stock_or_not = book.find(\"p\", {\"class\", \"instock\"}).text\n if \"In stock\" in in_stock_or_not:\n in_stock = True\n\n name = book.find(\"h3\").find(\"a\").text\n\n price = book.find(\"p\", {\"class\", \"price_color\"}).text\n price = price.replace(\"Â\", \"\")\n\n rating = book.find(\"p\", {\"class\", \"star-rating\"}).get(\"class\")[1]\n rating = w2n.word_to_num(rating)\n\n return {\n \"image\": image,\n \"in_stock\": in_stock,\n \"name\": name,\n \"price\": price,\n \"rating\": rating,\n }\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n books = soup.find_all(\"article\", {\"class\", \"product_pod\"})\n\n result = list(map(get_data_from_book, books))\n return result", "def get_product(self, page_size=10, pages_number=1):\n products = []\n params = self.params.copy()\n params[\"page_size\"] = page_size\n\n try:\n response = requests.get(self.url, params=params, timeout=3)\n response.json()\n except requests.ConnectionError:\n print(\"Error when fetching the API\")\n for i in range(pages_number):\n params[\"page\"] = i + 1\n response = requests.get(self.url, params=params)\n if response.status_code == 200:\n products.extend(response.json()[\"products\"])\n return products", "def get_open(product=None, limit=100):\n\n params = {\n 'limit': limit,\n 'product_id': product,\n }\n\n log.info('getting all OPEN ORDERS')\n\n resp = httpapi.get(\n common.api_url + 'orders',\n params=params,\n auth=common.auth,\n )\n\n return resp.json(), resp", "def RegisterProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_get_product(self):\n # get the id of a product\n test_product = self._create_products(1)[0]\n resp = self.app.get(\n \"/products/{}\".format(test_product.id), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"name\"], test_product.name)\n \n # print the repr of a product\n rep = \"%s\" % test_product", "def get_product(params, freezer_id, token):\n # Avoid SQL injection before doing requests\n # with the token and check the validity of it\n token = MySQLdb.escape_string(token)\n if not validator_db.valid_token(token):\n return custom_response(400, responseMessage.BAD_TOKEN)\n\n if params == 'all':\n if freezer_id == 0:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('all'),\n (token,),\n header=True))\n else:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('all-one'),\n (token,\n freezer_id,),\n header=True))\n\n if params == 'inside':\n if freezer_id == 0:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('inside'),\n (token,),\n header=True))\n else:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('inside-one'),\n (token,\n freezer_id,),\n header=True))\n\n if params == 'outside':\n if validator_db.valid_token(token):\n if freezer_id == 0:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('outside'),\n (token,),\n header=True))\n else:\n return jsonify(query_db.get_query_db(mysqlRequests.generate_product_query('outside-one'),\n (token,\n freezer_id,),\n header=True))\n\n return custom_response(400, responseMessage.BAD_PARAMETER)", "def do_features_request(module_type=None, version=None, software=None):\n\n #  connect to database\n cur_db = connect_db(\"172.20.38.50\", \"mvelay\", \"user\", \"sandbox\")\n cursor = cur_db.cursor()\n\n # build whole query\n cur_query = \"\"\" SELECT feature, supported FROM t_feature\n WHERE module=\"%s\" AND version=\"%s\" AND sw=\"%s\";\"\"\" \\\n % (module_type[0], version[0], software[0])\n\n print cur_query\n cursor.execute(cur_query)\n results = cursor.fetchall()\n cursor.close()\n\n if results:\n results = results[:1000] # Limit to first 1000 results\n else:\n results = None\n\n return module_type[0], version[0], software[0], results", "def test_list_available_product(self):\n view = AvailableProductListView.as_view({'get': 'list'})\n uri = reverse('products:list-available-products')\n request = self.factory.get(uri, HTTP_AUTHORIZATION='Token {}'.format(self.token_user.key))\n request.user = self.user['user']\n response = view(request)\n self.assertEqual(response.status_code, 200,\n f'Expected Response Code 200, received {response.status_code} instead.')", "def retrieve_product_infos(self):\n\n # PRODUCT NAME\n try:\n product_name = self.product['product_name'].capitalize()\n except KeyError:\n product_name = None\n\n # PRODUCT CODE\n try:\n product_code = self.product['code'].capitalize()\n except KeyError:\n product_code = None\n\n # URL\n try:\n product_url = self.product['url'].lower()\n except KeyError:\n product_url = None\n\n # IMAGE URL\n try:\n image_url = self.product['image_url'].lower()\n except KeyError:\n image_url = None\n\n # QUANTITY\n try:\n quantity = self.product['quantity'].capitalize()\n except KeyError:\n quantity = None\n\n # INGREDIENTS\n try:\n ingredients = self.product['ingredients_text_fr'].capitalize()\n except KeyError:\n ingredients = None\n\n # BRAND\n brands = []\n try:\n for brand in self.product['brands'].split(','):\n brand = brand.strip().capitalize()\n if (\n brand != ''\n and brand not in brands\n ):\n brands.append(brand)\n except KeyError:\n pass\n\n # STORES\n stores = []\n try:\n for store in self.product['stores'].split(','):\n store = store.strip().capitalize()\n if (\n store != ''\n and store not in stores\n ):\n stores.append(store)\n except KeyError:\n pass\n\n # COUNTRY\n try:\n countries = self.product['countries'].capitalize()\n except KeyError:\n countries = None\n if 'France' in countries:\n countries = 'France'\n else:\n countries = None\n\n # COMPARE TO CATEGORY\n try:\n compare_to = self.product['compared_to_category'].capitalize().split(':')[1]\n except KeyError:\n compare_to = None\n try:\n Categories.objects.get(\n name=compare_to\n )\n except Categories.DoesNotExist:\n compare_to = None\n except:\n importable = False\n\n # CATEGORIES HIERARCHY\n try:\n categories_hierarchy = [\n category.split(':')[1] for category in self.product['categories_hierarchy']\n ]\n except KeyError:\n categories_hierarchy = None\n\n # NUTRISCORE GRADE\n nutriscore_labels = [\n 'nutrition_grade_fr',\n 'nutriscore_grade'\n ]\n nutriscore = 'F'\n i = 0\n while (\n i < len(nutriscore_labels)\n and nutriscore == 'F'\n ):\n try:\n nutriscore = self.product[nutriscore_labels[i]].upper()\n except KeyError:\n i += 1\n\n product_infos = {\n 'product_name': product_name,\n 'product_code': product_code,\n 'product_url': product_url,\n 'image_url': image_url,\n 'quantity': quantity,\n 'ingredients': ingredients,\n 'brands': brands,\n 'stores': stores,\n 'countries': countries,\n 'compare_to': compare_to,\n 'categories_hierarchy': categories_hierarchy,\n 'nutriscore': nutriscore\n }\n\n nutriments = self.product['nutriments']\n for nutriment in self.list_nutriments:\n try:\n product_infos[nutriment] = float(nutriments[nutriment])\n except KeyError:\n product_infos[nutriment] = 0\n\n return product_infos", "def test_get_product(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/1/')\n self.assertEqual(response.data['name'], 'Producto 1')\n self.assertEqual(response.data['description'], 'Descripcion producto 1')\n self.assertEqual(response.data['selling'], True)\n self.assertEqual(response.data['price'], '24.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser1')\n self.assertEqual(response.data['category']['name'], 'general')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.client.get('/api/1.0/products/2/')\n self.assertEqual(response.data['name'], 'Producto 2')\n self.assertEqual(response.data['description'], 'Descripcion producto 2')\n self.assertEqual(response.data['selling'], False)\n self.assertEqual(response.data['price'], '312.0')\n self.assertEqual(response.data['seller']['user']['username'], 'testuser2')\n self.assertEqual(response.data['category']['name'], 'deportes')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def Get(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def fusion_api_get_firmware_driver(self, uri=None, api=None, headers=None, param=''):\n return self.driver.get(uri, api, headers, param)", "def test_with_request_factory(self):\n\n request = self.factory.get(\"/watch\")\n engine = import_module(settings.SESSION_ENGINE)\n request.session = engine.SessionStore()\n response = product_all(request)\n html = response.content.decode(\"utf8\")\n self.assertInHTML(\"<title>Book Store</title>\", html)\n self.assertEqual(response.status_code, 200)", "def view_products():\n min_id = (Product.select().order_by(Product.product_id.asc()).get()).product_id\n max_id = (Product.select().order_by(Product.product_id.desc()).get()).product_id\n print(f\"\\nPlease select id between {min_id} & {max_id}\")\n id = int(input(\"Select product id: \"))\n while id not in range(min_id, max_id+1):\n print(\"Your selection must be between {} and {}\".format(min_id, max_id))\n id = int(input(\"Select product id: \"))\n print(f\"\"\"\\n-Product: {Product.get_by_id(id).product_name}\n-Quantity: {Product.get_by_id(id).product_quantity}\n-Price: {Product.get_by_id(id).product_price} cents\n-Date updated: {Product.get_by_id(id).date_updated}\\n\"\"\")\n input(\"\\nPress ENTER to continue\")\n clear()", "def fetch(self):\n try:\n self.genre = 'Review'\n log.debug(self.log_msg(\"Fetching the prouct page url %s\"%self.currenturi))\n res=self._getHTML(self.currenturi) # Assuming self.currenturi is at the product page\n self.rawpage=res['result']\n self._setCurrentPage()\n try:\n self.parent_page_title = stripHtml(self.soup.find('h1',{'id':'pgTitleDetail'}).renderContents())\n except:\n self.parent_page_title =''\n try:\n self.__product_price = self.soup.find('tbody',{'class':'prices'}).td.renderContents().replace('$','')\n except:\n log.exception(\"Error in fetching product_price\")\n self.__product_price = None\n\n parent_page_url = self.task.instance_data['uri']\n review_first_page_url = self.soup.find('a',text=\"Show All Customer Reviews &#187; \").parent['href']\n review_url_order = \"&sortReviewsBy=DateDescending\"\n self.currenturi = self.base_url + review_first_page_url + review_url_order\n log.info(self.log_msg('current_uri :: %s'%(self.currenturi)))\n self._getParentPage()\n self.next_url_links=[]\n self.fetch_next_link = True\n while self.fetch_next_link:\n self._iterateReviewPages(parent_page_url)\n return True\n except Exception,e:\n log.exception(self.log_msg(\"Exception occured in fetch()\"))\n return False", "def subproduct_add_case(request):\n session = request.session.get('new_product', {})\n if request.GET.get('gtin', None):\n session['gtin'] = request.GET.get('gtin')\n if request.GET.get('package_level', None):\n session['package_level'] = request.GET.get('package_level')\n if request.GET.get('package_type', None):\n session['package_type'] = request.GET.get('package_type')\n request.session['new_product'] = session\n\n if not session:\n raise Http404()\n gtin = session.get('gtin', 0)\n prefix = prefix_service.find_item(\n user=request.user,\n starting_from=str(gtin)\n )\n if not prefix:\n raise Http404()\n pl = session.get('package_level', None)\n if not pl:\n flash(request, 'Choose a package level', 'danger')\n return redirect(reverse('products:add_product'))\n\n try:\n page = int(request.GET.get('page', '1'))\n except (ValueError, TypeError):\n page = 1\n\n settings_per_page = getattr(settings, 'PRODUCTS_PER_PAGE', 10)\n try:\n per_page = int(request.GET.get('per_page'))\n except (ValueError, TypeError):\n per_page = None\n if per_page:\n session['per_page'] = per_page\n else:\n per_page = session.get('per_page', settings_per_page)\n\n prefixes = prefix_service.all(user=request.user)\n package_level = package_level_service.get(pl)\n products = Product.service.get_available_subproducts(\n owner=request.user,\n package_level=package_level\n )\n\n target_markets = gtin_target_market_service.get_by_products_list(products)\n target_market_choices = [['', '']]\n for target_market in target_markets:\n try:\n if target_market_choices[-1][0] == target_market.target_market.code:\n continue\n except Exception:\n pass\n target_market_choices.append([\n target_market.target_market.code,\n target_market.target_market.market\n ])\n\n filter_data = {}\n form = SubProductsForm()\n filterform = FilterForm()\n if request.method == 'POST':\n form = SubProductsForm(request.POST)\n if form.is_valid():\n if request.POST.get('filtersubmit'):\n filterform = FilterForm(request.POST)\n if filterform.is_valid():\n products = product_helper.filter_list(products, filterform)\n session['adding_filter'] = filter_data\n else:\n # form = forms.SubProductsForm(request.form)\n # we no longer use data from form but from session\n sub_products = session.get('sub_products', [])\n sub_products.sort()\n\n if len(sub_products) > 0:\n sub_products_data = Product.service.check_subproducts(\n sub_product_gtins=sub_products,\n package_level=package_level,\n owner=request.user\n )\n if sub_products_data['is_valid']:\n # we have subproducts, we move to the next step\n session['sub_products'] = sub_products\n # return redirect(reverse('products:subproduct_add_case_details'))\n return redirect('/products/js-add/#/details?package_level=%s&package_type=%s' % (session['package_level'], session['package_type']))\n else:\n # we have incorrect subproducts\n flash(request, sub_products_data['error'], 'danger')\n return redirect(reverse('products:subproduct_add_case'))\n else:\n # we do not have subproducts - we reselect\n flash(request, 'You must choose products before proceeding to next form', 'danger')\n return redirect(reverse('products:subproduct_add_case'))\n else:\n session['sub_products'] = []\n\n if request.GET.get('clear_filter'):\n if session.get('adding_filter'):\n del session['adding_filter']\n if session.get('adding_filter'):\n filter_data = session['adding_filter']\n else:\n filterform = FilterForm()\n filterform.initial['pallet'] = False\n if package_level.id >= PackageLevel.CASE:\n filterform.initial['case'] = False\n if package_level.id >= PackageLevel.PACK:\n filterform.initial['pack'] = False\n\n # products = ProductFilter(filter_data, queryset=products).qs\n filterform.set_prefixes(prefixes)\n\n if products:\n paginator = Paginator(products, per_page)\n try:\n paginator_page = paginator.page(page)\n except InvalidPage:\n paginator_page = paginator.page(1)\n object_list = paginator_page.object_list\n else:\n paginator_page = None\n object_list = None\n\n assoc_products = get_assoc_products_by_products_list(object_list)\n\n filterform.fields['target_market'].choices = target_market_choices\n filterform.base_fields['target_market'].choices = target_market_choices\n filterform.declared_fields['target_market'].choices = target_market_choices\n context = {\n 'products': object_list,\n 'assoc_products': assoc_products,\n 'prefix': prefix,\n 'form': form,\n 'filterform': filterform,\n 'pagination': paginator_page,\n 'per_page': per_page,\n 'ppp': settings_per_page,\n 'enable_leading': True # user.profile.enable_leading\n }\n\n return render(request, 'products/subproduct_add_case.html', context=context)", "def GetCart(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['density'] = response.css('#description strong::text').get()\n item['available'] = not response.css('.msgSoldOut')\n\n\n for selector in response.css('.fpBktParam'):\n item['raw_string'] = selector.css('span::text').get()\n item['price'] = selector.css('div::text').getall()[1]\n yield item", "def __init__(self):\n\n self.session = requests.session()\n self.current_user_agent_index = 0\n self.headers = {\n 'Host': 'www.amazon.com',\n 'User-Agent': _USER_AGENT_LIST[0],\n 'Accept': 'text/html,application/xhtml+xml,\\\n application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n }\n self.product_dict_list = []", "def products(self):\n response = requests.get(self._url(self._PRODUCTS_PATH), headers=self._headers)\n return response.json()", "def page1(self):\n result = request101.GET('/Cars_Sample_App/supercars.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ))\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'manu'\n # 15 different values for token_mid found in response, using the first one.\n self.token_mid = \\\n httpUtilities.valueFromBodyURI('mid') # '3'\n\n grinder.sleep(124)\n request102.GET('/Cars_Sample_App/images/enquire_but.gif')\n\n request103.GET('/Cars_Sample_App/images/line.gif')\n\n request104.GET('/Cars_Sample_App/images/manufacturers/Bmw.gif')\n\n request105.GET('/Cars_Sample_App/images/manufacturers/AstonMartin.gif')\n\n request106.GET('/Cars_Sample_App/images/manufacturers/Ferrari.gif')\n\n request107.GET('/Cars_Sample_App/images/insurance_but.gif')\n\n grinder.sleep(90)\n request108.GET('/Cars_Sample_App/images/manufacturers/Porsche.gif')\n\n request109.GET('/Cars_Sample_App/images/manufacturers/Jaguar.gif')\n\n request110.GET('/Cars_Sample_App/images/pipe.gif')\n\n request111.GET('/Cars_Sample_App/images/manufacturers/Lotus.gif')\n\n return result", "def products():\n\n\treturn render_template(\"products.html\")", "def get(self, request, product_id=None, page_no=None):\n if product_id is None:\n return Response(\"product_id cannot be null\", status=status.HTTP_400_BAD_REQUEST)\n \n if page_no is None:\n page_no = 1\n if page_no <= 0:\n return Response(\"Page Number must be >= 1\", status=status.HTTP_400_BAD_REQUEST)\n\n queryset = Qanda.objects.using('scraped').filter(product_id=product_id)\n if queryset.count() == 0:\n return Response(f\"No QandA exists for this product - {product_id}\", status=status.HTTP_404_NOT_FOUND)\n \n ITEMS_PER_PAGE = 10\n queryset = queryset[(page_no - 1) * ITEMS_PER_PAGE : (page_no) * ITEMS_PER_PAGE]\n \n serializer = QandASerializer(queryset, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def sync_product_with_gmc(self, products):\n def get_names(cat):\n \"\"\" Return the list [cat.name, cat.parent_id.name, ...] \"\"\"\n res = []\n while cat:\n res.append(cat.name)\n cat = cat.parent_id\n return res\n\n currency = self.env.user.company_id.currency_id.name\n\n service = self.gmc_flow()\n\n count = 1\n for product in products:\n if product.google_mcid:\n offerId = product.google_mcid\n product.write({'google_sync_date': fields.Date.today()})\n else:\n offerId = 'CM%s' % get_unique_id()\n product.write({'google_mcid': offerId, 'google_sync_date': fields.Date.today()})\n #Display ads id\n if product.google_display_ads_id:\n displayAdsId = product.google_display_ads_id\n else:\n displayAdsId = 'ADS%s' % get_unique_id()\n product.write({'google_display_ads_id': displayAdsId})\n product_data = {\n 'offerId': offerId,\n 'displayAdsId': displayAdsId,\n 'title': product.name,\n 'description': product.description_sale,\n #Use product template url as variants are not shown sepratly.\n 'link': product.google_merchant_center_id.website + \"/shop/product/%s\" % (product.product_tmpl_id.id,),\n 'imageLink': product.google_merchant_center_id.website + '/web/image/%s/%s/%s/image.jpg' % ('product.template', product.product_tmpl_id.id, 'image'),\n #Note: Instead of passing website url passsed backend URl because Store not accept image without type\n 'contentLanguage': product.google_content_language,\n 'targetCountry': product.google_target_country,\n 'channel': product.google_channel,\n 'availability': product.google_availability,\n 'condition': product.google_condition,\n 'googleProductCategory': \" > \".join(reversed(get_names(product.google_product_category_id))),\n 'productType': \" > \".join(reversed(get_names(product.categ_id))),\n 'brand': product.google_product_brand_id and product.google_product_brand_id.name or '',\n 'price': {\n 'value': product.list_price,\n 'currency': currency},\n 'shipping': [{\n 'country': product.google_target_country,\n 'service': product.google_shipping,\n 'price': {'value': product.google_shipping_amount,\n 'currency': currency}\n }],\n 'taxes': [\n {\n 'rate': product.google_tax_rate,\n 'country': product.google_target_country,\n }],\n 'shippingWeight': {\n 'value': product.weight * 1000, \n 'unit': 'grams'\n },\n }\n\n #Check if identifierExists than only add mpn\n if product.google_identifier_exists:\n product_data.update({'mpn': product.default_code})\n if not product.google_barcode_as_gtin and product.google_gtin:\n product_data.update({'gtin': product.google_gtin})\n elif product.google_barcode_as_gtin and product.barcode:\n product_data.update({'gtin': product.barcode})\n else:\n product_data.update({'identifierExists': 'no'})\n\n #add some optional attributes\n if product.google_gender:\n product_data.update({'gender': product.google_gender})\n if product.google_age_group:\n product_data.update({'ageGroup': product.google_age_group})\n if product.google_product_size_id:\n product_data.update({'sizes': [product.google_product_size_id and product.google_product_size_id.name or '']})\n if product.google_product_color_id:\n product_data.update({'color': product.google_product_color_id and product.google_product_color_id.name or '',})\n if product.google_expiration_date:\n #pass date in perticular formate\n expiration_date = product.google_expiration_date.strftime('%Y-%m-%d')\n product_data.update({'expirationDate': expiration_date})\n\n #Optionla Attributes for Remarketing\n if product.google_display_ads_similar_ids:\n product_data.update({'displayAdsSimilarIds': [prod.google_display_ads_id for prod in product.google_display_ads_similar_ids]})\n if product.google_display_ads_title:\n product_data.update({'displayAdsTitle': product.google_display_ads_title})\n if product.google_display_ads_link:\n product_data.update({'displayAdsLink': product.google_display_ads_link})\n if product.google_display_ads_value:\n product_data.update({'displayAdsValue': product.google_display_ads_value})\n if product.google_excluded_destination:\n product_data.update({'destinations': {\n 'destinationName': 'DisplayAds', \n 'intention': 'excluded'}\n })\n\n # Add product.\n request = service.products().insert(merchantId=product.google_merchant_center_id.name, body=product_data)\n\n try:\n result = request.execute()\n _logger.info('Count: %s------- Product: %s', count, product)\n count += 1\n self.env.cr.commit()\n except errors.HttpError as e:\n error = simplejson.loads(e.content.decode('utf-8'))\n raise UserError(_(\"%s when syncronizing %s.\") % (error['error'].get('message'),product.name))", "def sli_get(obj, product_name, name):\n client = get_client(obj)\n\n product = client.product_list(name=product_name)\n if not product:\n fatal_error('Product {} does not exist'.format(product_name))\n\n slis = client.sli_list(product[0], name=name)\n if not slis:\n fatal_error('SLI {} does not exist'.format(name))\n\n print(json.dumps(slis[0], indent=4))", "def enumerate_products(self, dataframe, context_col, fragment_col):\n # create dict\n self.enumerated_products_smi = {}\n for (ctx, frag) in zip(dataframe[context_col].tolist(), dataframe[fragment_col].tolist()):\n self.enumerated_products_smi[(ctx, frag)] = None\n\n # function to return value from this dict:\n def get_product(row):\n return self.enumerated_products_smi[(row[context_col], row[fragment_col])]\n\n # function to return value for novelty\n def get_novelty(row):\n if row['ENUMERATED_PRODUCT'] in self.mol_smi_dict:\n return self.mol_smi_dict[row['ENUMERATED_PRODUCT']]\n else:\n return True\n\n # write rxn files\n enum_object = enum_mols.MMPEnumerateNewMols(self.logger)\n enum_object.write_rxn_files()\n enum_object.write_reactants_simple_dict(self.enumerated_products_smi)\n enum_object.do_reactions()\n\n for cut_type, rtn_ctx, rtn_frag, new_mol in enum_object.yield_products_simple_dict_input():\n if (rtn_ctx, rtn_frag) in self.enumerated_products_smi:\n self.enumerated_products_smi[(rtn_ctx, rtn_frag)] = new_mol\n else:\n self.logger.debug(\"got a return I was not expecting: %s, %s -> %s\" % (rtn_ctx, rtn_frag, new_mol))\n\n # standardise\n temp_smifi = tempfile.NamedTemporaryFile(suffix=\".smi\", delete=False, encoding='utf-8', mode='wt')\n std_smi_lookup = {}\n # take the product smi and store as product_smi => None\n arbitary_id = 0\n for smi_no_std in list(self.enumerated_products_smi.values()):\n arbitary_id += 1\n std_smi_lookup[smi_no_std] = arbitary_id\n std_smi_lookup[arbitary_id] = smi_no_std\n temp_smifi.write(smi_no_std + \" \" + str(arbitary_id) + \"\\n\")\n temp_smifi.close()\n\n #\n # send in a dict of old_smi => old_smi\n # should turn this into old_smi => new_smi\n self.logger.debug(\"Requested standardisation on %s\" % temp_smifi.name)\n new_smi_dict = self.generate_std_smiles(temp_smifi.name, smi_id_map='id_smi')\n std_smi_lookup.update(new_smi_dict)\n\n #\n for key, value in list(self.enumerated_products_smi.items()):\n # print value, \" >> \", std_smi_lookup[std_smi_lookup[value]]\n self.enumerated_products_smi[key] = std_smi_lookup[std_smi_lookup[value]]\n\n # add the return enumerated mols to the df\n dataframe['ENUMERATED_PRODUCT'] = dataframe.apply(lambda row: get_product(row), axis=1)\n dataframe['NOVEL'] = dataframe.apply(lambda row: get_novelty(row), axis=1)\n #\n del enum_object\n #\n return dataframe", "def parse(self, response):\n product_urls = response.css(\n '.product-li .product-image a::attr(href)'\n ).getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n\n next_page_number = 2\n if '?' in response.url:\n return\n while next_page_number < 37:\n # import logging\n # logging.log(logging.WARNING, f\"This is a warning {len(product_urls)} : {product_urls[0]}\")\n next_page_url = f'{response.url}?p={next_page_number}'\n yield scrapy.Request(response.urljoin(next_page_url))\n next_page_number += 1", "def get_all_product():\r\n\r\n with mysql.db_session() as session:\r\n product = session.query(Product).all()\r\n\r\n if not product:\r\n return response.create_not_found_response()\r\n\r\n response_data = [each.to_dict() for each in product]\r\n\r\n return response.Response(message=response_data)", "def product_list(request):\n error = {\n 'status': False,\n 'name': None,\n 'text': None,\n 'level': None,\n 'debug': None\n }\n\n limit, error = get_limit(request, error)\n\n serializer = FreshSerializer()\n queryset = Product.objects.all()[:limit]\n\n if not queryset:\n error = {\n \"status\": True,\n \"name\": \"No Products\",\n \"text\": \"No Products found\",\n \"level\": \"Information\",\n \"debug\": \"\"\n }\n\n data = {\n \"products\": json.loads(serializer.serialize(queryset)),\n \"error\": error\n }\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")", "def request_pack_stock(proxy, headers):\n query = \"\"\"\n query SearchPackListings($input: SearchPackListingsInput!) {\n searchPackListings(input: $input) {\n data {\n searchSummary {\n data {\n ... on PackListings {\n data {\n id\n price\n title\n remaining\n totalPackCount\n expiryDate\n preorder\n images {\n type\n url\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n }\n \"\"\"\n variables = {'input': {'searchInput': {'pagination': {'cursor': \"\", 'direction': \"RIGHT\", 'limit': '100'}}}}\n\n url = 'https://api.nbatopshot.com/marketplace/graphql?SearchPackListings'\n r = requests.post(url, json={'query': query, 'variables': variables})\n packs = json.loads(r.text)\n #df_data = packs['data']['searchPackListings']['data']['searchSummary']['data']['data']\n #df = pd.DataFrame(df_data)\n return packs", "def FetchLicense(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def request(self, http_method, url, payload=None, querystring=None,\r\n all_pages=None):\r\n\r\n try:\r\n\r\n response = self.fetch_response(\r\n http_method, url, payload=payload, querystring=querystring)\r\n try:\r\n if self.is_valid_response(response):\r\n response_json = None\r\n if response.status_code != 204:\r\n if response.status_code == 201 and response.content == b'':\r\n pass\r\n else:\r\n response_json = response.json()\r\n # check 'all_pages' required, response received is\r\n # partial(code 206) and contains info about total size of\r\n # the collection\r\n content_range = response.headers.get('content-range')\r\n if all_pages and response.status_code == 206 and\\\r\n content_range:\r\n # 'content-range': '0-99/789'\r\n total_size = self.get_total_size_from_content_range(\r\n content_range)\r\n myranges = [\r\n \"{0}-{1}\".format(i, i + constants.MAX_LIMIT)\r\n for i in range(constants.OFFSET, total_size,\r\n constants.MAX_LIMIT)]\r\n for myrange in myranges:\r\n response = self.fetch_response(\r\n http_method, url, payload=payload,\r\n querystring=querystring, myrange=myrange)\r\n if self.is_valid_response(response):\r\n response_json.extend(response.json())\r\n else:\r\n self.raise_http_exception(response)\r\n\r\n return response_json\r\n else:\r\n self.raise_http_exception(response)\r\n\r\n except ValueError as ex:\r\n # its low-level or response level error caused by\r\n # response.json() and not in requests.exceptions\r\n error_msg = \"ValueError: '{0}' for Method: '{1}' URL: '{2}'\"\\\r\n \" PayLoad: '{3}' QueryString: '{4}'\".format(\r\n str(ex), http_method, url, payload, querystring)\r\n LOG.error(error_msg)\r\n raise PowerStoreException(PowerStoreException.VALUE_ERROR,\r\n error_msg)\r\n except socket.error as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(PowerStoreException.SOCKET_ERR,\r\n str(exception))\r\n except SSLError as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(PowerStoreException.SSL_ERROR,\r\n str(exception))\r\n except ConnectionError as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(PowerStoreException.CONNECTION_ERROR,\r\n str(exception))\r\n except TooManyRedirects as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(\r\n PowerStoreException.TOO_MANY_REDIRECTS_ERROR, str(exception))\r\n except Timeout as exception:\r\n LOG.error(str(exception))\r\n raise PowerStoreException(PowerStoreException.TIMEOUT_ERROR,\r\n str(exception))", "def get_product_details(product_url: str) -> dict:\n def get_available_sizes(postID, sizeStr, product_mainID):\n \"\"\"\n List size haye mojood va tamoom shode ro ba API mide\n POST: https://store.lining.com/ajax/goods_details.htm\n \"\"\"\n api_url = 'https://store.lining.com/ajax/goods_details.html'\n data = {\n 'postID': postID,\n 'sizeStr': sizeStr,\n 'product_mainID': product_mainID\n }\n r = get_json(api_url, data=data)\n onsale_sizes = r['data']['onSale']\n logging.debug('Onsale Sizes: ' + repr(onsale_sizes))\n return onsale_sizes\n\n def get_pid_from_url(url):\n \"\"\" ID mahsool ro az URL darmiare \"\"\"\n return re.findall(r'store.lining.com/shop/goods-(\\w+).html\\w*', url)[0]\n\n def translate_keyword(keyword):\n \"\"\" tarjome key marboot be desctioption \"\"\"\n define = {\n '运动类型': 'Sport Type',\n '性别': 'Sex',\n '颜色': 'Color',\n '鞋透气指数': 'Shoes breathability index',\n '鞋软硬指数': 'Shoe soft and hard index',\n }\n if keyword in define:\n return define[keyword]\n else:\n return keyword\n ###########################################################\n\n details = dict()\n soup = get_soup(product_url)\n\n # product ID\n pid = get_pid_from_url(product_url)\n logging.debug('PID: ' + pid)\n details['pid'] = pid\n\n # product name\n name = soup.find('h1', {'id': 'product_name'}).text.strip()\n logging.debug('Name: ' + name)\n details['name'] = name\n\n # part number\n sku = soup.find('span', {'id': 'partNumber'}).find('span', {'class': 'v'}).text.strip()\n part_number = sku[0:sku.find('-')]\n logging.debug('Part Number: ' + part_number)\n details['sku'] = sku\n details['part_number'] = part_number\n\n # price\n price = soup.find('span', {'id': 'listPrice'}).find('span', {'class': 'v'}).text.strip().replace('¥', '')\n price_offer = soup.find('span', {'id': 'offerPrice'}).find('span', {'class': 'v'}).text.strip().replace('¥', '')\n logging.debug('Price: %s [offer]-> %s' % (price, price_offer))\n details['price'] = price\n details['price_offer'] = price_offer\n\n # all sizes\n all_sizes = list()\n for tag in soup.find('div', {'id': 'sizelist'}).find_all('div', 'size-layer'):\n tag = tag.find('input')\n # all_size -> [(id, size, status), ...]\n all_sizes.append(\n (\n tag.get('id').replace('size_list_', ''),\n tag.get('value'),\n None,\n )\n )\n available_sizes = get_available_sizes(\n postID=pid,\n product_mainID=part_number,\n # first element of all_sizes list\n # all_size -> [(id, size, status), ...]\n sizeStr=','.join([s[0] for s in all_sizes]),\n )\n # update all sizes status\n for i in range(len(all_sizes)):\n if all_sizes[i][1] in available_sizes:\n all_sizes[i] = (\n all_sizes[i][0],\n all_sizes[i][1],\n 'onsale',\n )\n else:\n all_sizes[i] = (\n all_sizes[i][0],\n all_sizes[i][1],\n 'stockout',\n )\n logging.debug('All Sizes: %s' % repr(all_sizes))\n details['all_sizes'] = all_sizes\n\n # description images\n description_images = list()\n desc = soup.find('div', {'id': 'PD_desc_picture'})\n for img in desc.find_all('img'):\n img = img.get('orginalsrc')\n logging.debug('description_images[]: ' + img)\n description_images.append(img)\n details['description_images'] = description_images\n\n # description key/value\n description = dict()\n for li in soup.find('ul', {'id': 'p_spec'}).find_all('li'):\n key = li.find('span', {'class': 't'}).text.replace(':', '').strip()\n key = translate_keyword(key)\n value = li.find('span', {'class': 'v'}).text.strip()\n description[key] = value\n logging.debug('%s -> %s' % (key, value))\n details['description'] = description\n\n # slider images\n slider_images = list()\n for li in soup.find('div', {'class': 'box'}).find_all('li'):\n img = li.find('img').get('big')\n logging.debug('slider_images[]: ' + img)\n slider_images.append(img)\n details['slider_images'] = slider_images\n\n # related products\n related_products_id = list()\n for li in soup.find('div', {'id': 'f_litimg'}).find_all('li'):\n url = li.find('a').get('href')\n url = 'store.lining.com' + url\n pid = get_pid_from_url(url)\n logging.debug('related_products_id[]: %s -> %s' % (pid, url))\n related_products_id.append(pid)\n details['related_products_id'] = related_products_id\n\n return details", "def request(cls, mws_access_key, mws_secret_key, mws_account_id,\n mws_marketplace_id, id_type=None, ids=(), mws_auth_token=None):\n products_api = mws.Products(mws_access_key, mws_secret_key, mws_account_id, auth_token=mws_auth_token)\n response = products_api.get_matching_product_for_id(mws_marketplace_id, id_type, ids)\n return cls.load(response.original)", "def products():\n try:\n return jsonify(get_product_caching_service().jsonofied_product_map)\n except Exception as exception:\n return jsonify({'Something went wrong: ': exception})", "def test_get_product_list(self):\n self._create_products(5)\n resp = self.app.get(\"/products\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 5)", "def get(self, request, page_no=None):\n queryset = Productlisting.objects.using('scraped').all()\n if page_no is None:\n page_no = 1\n if page_no <= 0:\n return Response(\"Page Number must be >= 1\", status=status.HTTP_400_BAD_REQUEST)\n \n ITEMS_PER_PAGE = 10\n queryset = queryset[(page_no - 1) * ITEMS_PER_PAGE : (page_no) * ITEMS_PER_PAGE]\n \n serializer = ProductListingSerializer(queryset, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def products_for_influencers(parameters, page=0, page_size=50):\n product_ids, _, total_hits = elastic_search_helpers.es_product_query_runner_v2(\n parameters, page, page_size)\n\n num_pages = int(math.ceil(float(total_hits) / page_size))\n\n return product_ids, total_hits, num_pages", "def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})", "def go_product_page(self, driver, product_id, website):\n link = self.product_url(website, product_id)\n self.go_and_assert(driver, link, website)", "def start(self, module=None, app=None, delay=None):\n with (app or flask.current_app).test_request_context():\n path = self.url(delay=delay)\n\n url = 'https://{module}-dot-{hostname}{path}'.format(\n module=module or self.module_name,\n hostname=app_identity.get_default_version_hostname(),\n path=path)\n\n urlfetch.fetch(url)", "async def limited_req(path, session, semaphore, **kwargs):\n async with semaphore:\n async with session.get('https://iceprod2-api.icecube.wisc.edu'+path, params=kwargs) as response:\n return await response.json()", "def get_download_info_API(request):\n update_metrics(request)\n\n session_id = request.session.session_key\n\n product_types = request.GET.get('types', 'none')\n product_types = product_types.split(',')\n\n previews = request.GET.get('previews', 'none')\n previews = previews.split(',')\n\n # since we are assuming this is coming from user interaction\n # if no filters exist then none of this product type is wanted\n if product_types == ['none'] and previews == ['none']:\n # ie this happens when all product types are unchecked in the interface\n return HttpResponse(json.dumps({'size':'0', 'count':'0'}), content_type='application/json')\n\n if previews == ['all']:\n previews = [i[0] for i in settings.image_sizes]\n\n # now get the files and download size / count for this cart\n urls = []\n from results.views import *\n files = getFiles(collection=True, session_id=session_id, fmt=\"raw\", loc_type=\"url\", product_types=product_types, previews=previews)\n download_size, count = get_download_info(files)\n\n # make pretty size string\n download_size = nice_file_size(download_size)\n\n return HttpResponse(json.dumps({'size':download_size, 'count':count}), content_type='application/json')", "def get_filled(product='all', limit=100):\n\n params = {\n 'limit': limit,\n 'product_id': product,\n }\n\n log.info('getting all FILLED ORDERS')\n\n resp = httpapi.get(\n common.api_url + 'fills',\n params=params,\n auth=common.auth,\n )\n\n return resp.json(), resp", "def render_get(self, request):\n try:\n remote = request.remote[0]\n except TypeError:\n remote = request.remote.sockaddr[0]\n\n logger.debug(\"CoAP GET received from {}\".format(remote))\n\n firmware_binary = self._controller.\\\n get_latest_firmware_binary(self._appid, self._slot)\n\n return Message(code=CONTENT, payload=firmware_binary)", "def get_request(req_context, uri):\n headers = { 'Accept': \"application/json\", \n 'User-Agent': \"testApp\"\n }\n if config.ENVIRONMENT == \"Sandbox\":\n base_url = \"https://sandbox-quickbooks.api.intuit.com/v3/company/\"\n else:\n base_url = \"https://quickbooks.api.intuit.com/v3/company/\"\n url = base_url + req_context.realm_id + uri\n print(url)\n if config.AUTH_TYPE == \"OAuth2\":\n headers['Authorization'] = \"Bearer \" + req_context.access_token\n req = requests.get(url, headers=headers)\n else:\n auth = OAuth1(req_context.consumer_key, req_context.consumer_secret, req_context.access_key, req_context.access_secret)\n req = requests.get(url, auth=auth, headers=headers)\n return req", "def API_request(self, search_term, search_type):\n url = \"\"\n if search_type == \"product\":\n url = self.url_product.format(search_term)\n elif search_type == \"substitute\":\n url = self.url_substitute.format(search_term)\n r = requests.get(url)\n response = r.json()\n return response[\"products\"]", "async def buy(self, product_id: int):\n ee = self.request.request(\n url=f'https://economy.roblox.com/v2/user-products/{product_id}/purchase',\n method='post')\n return ee", "def test_get_rate_plan_by_product(self):\n pass", "def product(request, product_id):\n\n u = request.user\n try:\n p = Product.objects.get(id=product_id)\n request.POST['sku'] = p.sku\n result = item(u, p.sku)\n except Product.DoesNotExist:\n result = {'result':'0'}\n return JSONHttpResponse( result )", "def product_view(request, product):\n product = Products.objects.get(product=product)\n\n context = {\n \"product\": product,\n }\n\n return render(request, \"products/product_detail.html\", context)", "def get_frame(request, tid, frame):\n\n try:\n # Follow symbol links if the frame is a link on a real image otherwise\n # mimetype detection inside sendfile will work incorrectly.\n path = os.path.realpath(task.get_frame_path(tid, frame))\n return sendfile(request, path)\n except Exception as e:\n slogger.task[tid].error(\"cannot get frame #{}\".format(frame), exc_info=True)\n return HttpResponseBadRequest(str(e))", "def one_click(self, adi):\r\n products = self.get_products(adi)\r\n self.sc.stack = [adi]\r\n self.sc.check(products, {\"$ref\" : \"ResponseBankingProductList\"})\r\n for product in products[\"data\"][\"products\"]:\r\n self.sc.stack = [adi, product[\"name\"]]\r\n self.check_product_detail_schema(adi, product[\"productId\"], product[\"name\"])", "def list_products(self):\n return self._make_get_request(self._urls['products'])", "def dispatch_frame(self, frame):", "def parse_PRODUCT(self, response: Union[SplashJsonResponse, ScrapyHttpResponse]) -> None:\n\n if meta_information := response.meta.get(\"meta_data\"):\n meta_information |= response.meta.get(\"request_meta_information\", {})\n else:\n meta_information = response.meta.get(\"request_meta_information\")\n\n meta_information[\"original_URL\"] = response.meta.get(\"original_URL\", None)\n\n scraped_page = ScrapedPage(\n timestamp=self.timestamp,\n source=self.source,\n merchant=self.merchant,\n country=self.country,\n url=response.url,\n html=response.body.decode(\"utf-8\"),\n page_type=PageType.PRODUCT.value,\n category=response.meta.get(\"category\"),\n gender=response.meta.get(\"gender\"),\n consumer_lifestage=response.meta.get(\"consumer_lifestage\"),\n meta_information=meta_information,\n )\n\n self.message_queue.add_scraping(table_name=self.table_name, scraped_page=scraped_page)", "def _getSymbols(self):\n path = '/products'\n\n try:\n r = requests.get(settings['abucoins_exporter']['url'] + path, verify=True) # Doesn't need authentication\n except (\n requests.exceptions.ConnectionError,\n requests.exceptions.ReadTimeout,\n requests.packages.urllib3.exceptions.ReadTimeoutError\n ) as e:\n log.warning(e)\n r = False\n if r and r.status_code == 200:\n for symbol in r.json():\n if symbol['id'] not in self.symbols:\n self.symbols.append(symbol['id'])\n\n log.debug('Found the following symbols: {}'.format(self.symbols))", "async def get_device_info_w_slice(session: ClientSession, graphql_instance: object, **kwargs: object) -> list:\n request_results = []\n sub_request_kwargs = kwargs\n req_num = sub_request_kwargs.pop(\"req_num\", 0)\n\n sub_request_kwargs[\"category\"] = graphql_instance.__class__.__name__\n sub_request_kwargs[\"req_num\"] = f\"{req_num}\"\n\n http_response = await asyncio.create_task(send_request(session, **sub_request_kwargs))\n\n if not http_response:\n cur_devices = sub_request_kwargs[\"json\"][\"variables\"][\"productIds\"]\n if len(cur_devices) > 1:\n parts = ([\n cur_devices[:len(cur_devices) // 2],\n cur_devices[len(cur_devices) // 2:]\n ])\n for i, part in enumerate(parts):\n sub_request_kwargs_copy = copy.deepcopy(sub_request_kwargs)\n sub_request_kwargs_copy[\"json\"][\"variables\"][\"productIds\"] = part\n sub_request_kwargs_copy[\"req_num\"] = f\"{req_num}.{i}\"\n request_results.append(await get_device_info_w_slice(\n session,\n graphql_instance,\n **sub_request_kwargs_copy\n ))\n else:\n request_results.append(http_response)\n return request_results", "def retrieve(self, request, pk=None):\n try:\n order_product = Order_Products.objects.get(pk=pk)\n serializer = Order_Products_Serializer(\n order_product, context={'request': request}\n )\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def test_product_search(self):\n\n flag = \"user\"\n api = \"product.product.add\"\n current_page = 1\n search_info = json.dumps({\n 'name': \"可爱的小蓝牙呀\"\n })\n print('start------------------------>add')\n result = self.access_api(flag = flag, api = api, current_page = current_page, product_info = search_info)", "def get_products(classification):\n call = build_call('attr', classification)\n return request_data(call)", "def products_view(request, product_id):\n if not product_id:\n return JsonResponse({\"error\": \"Product id is not provided\"}, 400)\n if request.method == \"GET\":\n response, status_code = get_products(request, product_id)\n if status_code != 200:\n return JsonResponse(response, status=status_code)\n else:\n serialize_data = ProductSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=200, safe=False)\n else:\n response, status_code = update_product(request, product_id)\n if status_code != 204:\n return JsonResponse(response, status=status_code)\n serialize_data = ProductSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=status_code, safe=False)", "def request():\n return face_client.face.detect_with_stream(image=open(\"frame.png\", 'rb'),\n return_face_attributes=[emotion_attribute],\n recognition_model='recognition_02')", "def fetch(self, data: Any, *args: Any, **kwargs: Any):\n product = None\n next_args = (data, *args)\n next_kwargs = kwargs\n for name, method, outlet, description in self.steps:\n product, new_args, next_kwargs = method(*next_args, **next_kwargs)\n next_args = (product, *new_args)\n if isinstance(product, self.outlet):\n return product\n else:\n raise RuntimeError(\"Process was not completed according to specification.\")", "def run(self, bundle_fqids: typing.List[str], bundle_fqids_url: str, format: str, genus_species: str):\n logger.debug(f\"Driver running with parameters: bundle_fqids={bundle_fqids}, \"\n f\"bundle_fqids_url={bundle_fqids_url}, format={format}, \"\n f\"genus_species={genus_species}, \"\n f\"bundles_per_worker={self.bundles_per_worker}\")\n\n # 10/17/19: Bundle UUIDs will be used in favor of FQIDs in v0 in order to loosen\n # the data parity restriction with Data Browser, enabling project availability\n # while DCP components respond to simple updates at different frequencies.\n if bundle_fqids_url:\n response = self._get_bundle_manifest(bundle_fqids_url)\n resolved_bundle_uuids = self._parse_download_manifest(response.text)\n if len(resolved_bundle_uuids) == 0:\n error_msg = \"no bundles found in the supplied bundle manifest\"\n logger.info(error_msg)\n self.request_tracker.log_error(error_msg)\n return\n else:\n resolved_bundle_uuids = [fqid.split(\".\", 1)[0] for fqid in bundle_fqids]\n logger.debug(f\"resolved bundle uuids: {resolved_bundle_uuids}\")\n\n self.dynamo_handler.set_table_field_with_value(DynamoTable.REQUEST_TABLE,\n self.request_id,\n RequestTableField.NUM_BUNDLES,\n len(resolved_bundle_uuids))\n s3_obj_keys = self._format_and_store_queries_in_s3(resolved_bundle_uuids, genus_species)\n\n analysis_table_bundle_count = self._fetch_bundle_count_from_analysis_table(resolved_bundle_uuids)\n if analysis_table_bundle_count != len(resolved_bundle_uuids):\n error_msg = \"resolved bundles in request do not match bundles available in matrix service\"\n logger.info(error_msg)\n self.request_tracker.log_error(error_msg)\n return\n\n for key in s3_obj_keys:\n self._add_request_query_to_sqs(key, s3_obj_keys[key])\n\n self.request_tracker.complete_subtask_execution(Subtask.DRIVER)", "async def request(self, multiplier: Optional[int]=None):\n # TODO: validate the multiplier\n message = Message(self.name_path, multiplier)\n await self.issue_command(Command(message))", "def api_all():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tall_products = cur.execute('SELECT * FROM products WHERE inventory_count>0;').fetchall()\r\n\treturn jsonify(all_products)", "def openProduct(self, product_path):\r\n\r\n product = None\r\n variables = self.readVariables(product)\r\n attributes = self.readAttributes(product)\r\n return product, variables, attributes", "def pull(self, where=\"\", parameters={}, verify_hash=False):\n queue = self.search(where=where, parameters=parameters)\n for product in queue:\n if 'archive_path' in product.core:\n raise Error(\"cannot pull local products\")\n if 'remote_url' not in product.core:\n raise Error(\"cannot pull products that have no remote_url\")\n\n plugin = self.product_type_plugin(product.core.product_type)\n product.core.archive_path = plugin.archive_path(product)\n\n # set archive_path and deactivate while we pull it in\n metadata = {'active': False, 'archive_path': product.core.archive_path}\n self.update_properties(Struct({'core': metadata}), product.core.uuid)\n\n # pull product\n try:\n remote.pull(self, product)\n except:\n # reset active/archive_path values\n metadata = {'active': True, 'archive_path': None}\n self.update_properties(Struct({'core': metadata}), product.core.uuid)\n raise\n\n # reactivate and update size\n size = util.product_size(self._product_path(product))\n metadata = {'active': True, 'archive_date': self._backend.server_time_utc(), 'size': size}\n self.update_properties(Struct({'core': metadata}), product.core.uuid)\n\n # verify product hash.\n if verify_hash and 'hash' in product.core:\n if self.verify_hash(\"uuid == @uuid\", {\"uuid\": product.core.uuid}):\n raise Error(\"pulled product '%s' (%s) has incorrect hash\" %\n (product.core.product_name, product.core.uuid))\n\n # Run the post pull hook (if defined by the product type plug-in).\n if hasattr(plugin, \"post_pull_hook\"):\n plugin.post_pull_hook(self, product)\n\n return len(queue)", "def GetBlock(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _subpage_crawler(self):\n while True:\n try:\n \tfamily = self.product_families.pop()\n except IndexError:\n\t\t\t\tbreak\n\n with closing(urlopen(self.base_url + family)) as product_list_page:\n product_list_soup = BeautifulSoup(product_list_page, 'html.parser')\n product_list = product_list_soup.find_all(self.product_list_tag[\"name\"], self.product_list_tag[\"attrs\"])\n for product in product_list:\n \tproduct_url = product.find(self.product_url_tag[\"name\"]).get('href')\n \tself.product_links[product_url] = family\n \t#print product_url", "def get_influencer_products_v2(influencer_ids,\n parameters,\n page_size,\n db_only=False,\n limit=10):\n from debra.models import ProductModelShelfMap\n\n # updating influencers to list and moving it to parameters for ES query builder\n if type(influencer_ids) != list:\n influencer_ids = [influencer_ids, ]\n\n # storing influencer's id in parameters for query builder\n if influencer_ids:\n parameters['influencer_ids'] = influencer_ids\n\n # Retrieving page number for ES query\n try:\n page = int(parameters.get('page', 1))\n page -= 1\n except TypeError:\n page = 0\n\n if db_only:\n db_params = {}\n highlighted_product_ids, total = [], None\n else:\n # Getting list of post ids from ES depending on search parameters\n product_ids, highlighted_product_ids, total = es_product_query_runner_v2(parameters,\n page,\n page_size,\n highlighted_first=True)\n db_params = dict(product_model__id__in=product_ids,)\n if settings.DEBUG:\n print('* Item IDS: %s' % product_ids)\n print('* Highlighted item IDs: %s' % highlighted_product_ids)\n print('* Total items: %s' % total)\n\n items_from_db = ProductModelShelfMap.objects.filter(\n influencer__id__in=influencer_ids,\n img_url_feed_view__isnull=False,\n **db_params\n ).prefetch_related('product_model') # .order_by('-product_model__insert_date')\n\n # list of ids for ProductModelShelfMap, not for product_model get from ES\n highlighted_items_ids = []\n\n if db_only:\n items = items_from_db[:limit]\n else:\n # sorting posts by list of ids from ES\n items = dict()\n for item in items_from_db:\n items[item.product_model.id] = item\n if item.product_model.id in highlighted_product_ids:\n highlighted_items_ids.append(item.id)\n\n items = [items[product_id] for product_id in product_ids if product_id in items]\n\n items_data = serialize_items_data_v2(items, highlighted_items_ids)\n\n return items_data, len(items_data)", "def get_product_info_from_url(url):\n try:\n # Step 1: Sending a HTTP request to a index_url\n reqs = requests.get(url)\n except Exception as Ex:\n print('report url execption')\n with open('error_log_file.json', 'a') as outfile:\n json.dump({\"exception_text\": str(Ex), \"url\": str(url), \"methode\": \"get_product_info_from_url\"}, outfile)\n outfile.write(\"\\n\")\n return {}\n # Step 2: Parse the html content\n soup = BeautifulSoup(reqs.text, 'lxml')\n product_info = {}\n try:\n # Step 4: Analyze the HTML tag to extract product infos\n\n for tag in soup.find_all('div', attrs={'class': \"row no-padding FicheArticleRspv\"}):\n title=tag.find('div', attrs={'class': \"col-sm-12\"}).find('h1').text\n t= tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-7 description-produit\"})\n marque=t.find('div', attrs={'class': \"art_marque\"}).text\n r = tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-5\"}).find('div', attrs={'class': \"promobri bri_rj\"})\n promo = r.text if r else \"N/A\"\n info = tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-7 description-produit\"}).text\n info = \" \".join(info.split(\"\\n\"))\n price = tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-5\"}).find('div', attrs={'class': \"art_prix\"}).text\n volume_price = tag.find('div', attrs={'class': \"row no-padding fa_infos\"}).find('div', attrs={'class': \"col-sm-5\"}).find('div', attrs={'class': \"art_prix_volume\"}).text\n product_info = dict(Product=marque,Title=title,product_info=info,promo=promo,price=price,volume_price=volume_price)\n except Exception as ex:\n print('report url execption')\n with open('error_log_file.json', 'a') as outfile:\n json.dump({\"exception_text\": str(ex), \"url\": str(url), \"methode\": \"get_product_info_from_url\"}, outfile)\n outfile.write(\"\\n\")\n pass\n return product_info", "async def get_inventory(request: web.Request, ) -> web.Response:\n return web.Response(status=200)", "def flip_details(bsoup, product, header):\n\n container = bsoup.find_all(\"div\", class_=\"_1AtVbE col-12-12\")\n prod_links = []\n base_url = \"https://www.flipkart.com\"\n for i in container[1:]:\n try:\n link = i.find(\"a\")[\"href\"]\n except Exception as e:\n continue\n if link.find(\"pid\") == -1:\n continue\n prod_links.append(base_url+link)\n\n total_results = len(prod_links)*10\n req = requests.get(prod_links[0], headers=header).text\n soup = bs(req, \"lxml\")\n try:\n name = soup.find(\"div\", class_=\"aMaAEs\")\n product_name = name.div.h1.span.text\n except:\n product_name = \" \".join(product).title()\n try:\n rating = name.find(\"div\", class_=\"_3LWZlK\")\n product_rating = rating.text + \" out of 5 stars\"\n except Exception as e:\n product_rating = \"Not Available\"\n try:\n price = name.find(\"div\", class_=\"_30jeq3 _16Jk6d\")\n product_price = price.text + \".00\"\n except Exception as e:\n product_price = \"Sold by Third Party. Please visit the website for more info.\"\n\n return [[\"Product Name\", product_name],\n [\"Product Price\", product_price],\n [\"Product Rating\", product_rating],\n [\"Total Fetched Results\", total_results]]", "def products_list(driver, login_action, open_products_page, products_page, logger):\n try:\n return products_page.all_products_list()\n except logger.on_exception(exception, driver):\n print(exception)", "def get(self):\n\n return {\n 'product': self.request.matchdict['product_name'],\n 'version': self.request.matchdict['product_version'],\n }", "def show_available_products(): # {{{\n products_available = {}\n try:\n with MONGO:\n product_collection = MONGO.connection.assignment_07[\"product\"].find(\n )\n\n for product in product_collection:\n if int(product[\"quantity_available\"]) > 0:\n products_available[product[\"product_id\"]] = {\n \"description\": product[\"description\"],\n \"product_type\": product[\"product_type\"],\n \"quantity_available\": product[\"quantity_available\"],\n }\n except TypeError as excep:\n LOGGER.warning(\"Error looking up available products\")\n LOGGER.warning(excep)\n else:\n if not products_available:\n LOGGER.info('No products found')\n else:\n LOGGER.info(\"Available products retrieved successfully.\")\n return products_available # }}}" ]
[ "0.62623674", "0.61581105", "0.5573586", "0.55546266", "0.55507904", "0.5484503", "0.5441134", "0.5407963", "0.5333419", "0.5302095", "0.5175288", "0.5174018", "0.5140395", "0.5135881", "0.51053953", "0.5032891", "0.50136805", "0.50059825", "0.49878338", "0.4946766", "0.49369532", "0.49003386", "0.48848337", "0.48820823", "0.48459545", "0.4839109", "0.48346123", "0.48236543", "0.48001984", "0.47829416", "0.47597197", "0.4758047", "0.47391766", "0.4714977", "0.47119173", "0.47037905", "0.47025117", "0.47023964", "0.4698723", "0.46787244", "0.46710077", "0.46707305", "0.46679646", "0.46610105", "0.4658855", "0.4654967", "0.46430653", "0.46300435", "0.46298474", "0.4628519", "0.462553", "0.46166253", "0.4614536", "0.45983112", "0.45955545", "0.45890626", "0.45874503", "0.4585168", "0.4584029", "0.4582883", "0.45753312", "0.45747715", "0.45746574", "0.45709953", "0.45694825", "0.4560708", "0.4559543", "0.45580828", "0.45538226", "0.45518896", "0.4550101", "0.4545891", "0.4543381", "0.45425108", "0.45367125", "0.45298156", "0.45103788", "0.45098588", "0.45017943", "0.4499601", "0.44993377", "0.4492562", "0.44899046", "0.44868585", "0.44820592", "0.44809985", "0.4479813", "0.44778085", "0.44748777", "0.44701222", "0.44698566", "0.44693804", "0.44659442", "0.4460914", "0.44602978", "0.44591177", "0.44554776", "0.44551605", "0.44524205", "0.44499558", "0.44465783" ]
0.0
-1
calculate total residual for fits to several data sets held in a 2D array, and modeled by Gaussian functions
def objective(self, params, x, data): # make residual per data set ndata, nx = data.shape resid = 0.0*data[:] resid[0, :] = data[0, :] - self.thermo(params, x) resid[1, :] = data[1, :] - self.density(params, x) # now flatten this to a 1D array, as minimize() needs return resid.flatten()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model[interp_model == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k )\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\t\t\t#interp_model[interp_model == 0] = np.nan\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()", "def postfit_residuals(self) -> NONEARRAY:\n pass", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n #p, success = optimize.leastsq(errorfunction, params)\n bnds = (0,30)\n p = optimize.least_squares(errorfunction, params, bounds = bnds).x\n #least square fitting(minimizes raw data and fit)\n return p", "def fitgaussian(self, data):\n params = self.moments(data)\n errorfunction = lambda p: ravel(self.Gauss(*p)(*indices(data.shape)) - data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def residuals(self) -> npt.NDArray[np.float64]:\n return self.data - self.theory", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: ravel(gaussian(*p)(*indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def residuals(self, p, data, X):\n err = data - self.fitfunc(X,p)\n return err", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = leastsq(errorfunction, params)\n return p", "def residualsG(p, data):\n \n x, y, err = data\n return (y-funcG(p,x)) / err", "def fit_gaussian_rl(img_dataset,image_data_variable_to_fit='PSF',beam_set_name='RESTORE_PARMS',fit_method='rm_fit',npix_window=[21,21],sampling=[401,401],cutoff=0.5,cutoff_sensitivity=0.003):\n import xarray as xr\n import matplotlib.pyplot as plt\n import dask.array as da\n from scipy.interpolate import interpn\n \n sampling = np.array(sampling)\n npix_window = np.array(npix_window)\n delta = np.array(img_dataset.incr[0:2])*3600*180/np.pi\n chunks = img_dataset[image_data_variable_to_fit].data.chunks[2:] + (3,)\n ellipse_parms = da.map_blocks(rm_fit,img_dataset[image_data_variable_to_fit].data,npix_window,sampling,cutoff,cutoff_sensitivity,delta,dtype=np.double,drop_axis=[0,1],new_axis=[2],chunks=chunks)\n\n img_dataset[beam_set_name] = xr.DataArray(ellipse_parms,dims=['chan','pol','elps_index'])\n return img_dataset", "def residual ( self , dataset , **kwargs ) :\n hdata = self.make_histo ( **kwargs )\n dataset.project ( hdata , ( self.yvar.name , self.xvar.name ) )\n return self.residual_histo ( hdata )", "def gaussian_fit(self):\r\n\r\n self.df5 = pd.DataFrame(columns=['Slit Number', 'Centre', 'Centre_err', 'Sigma', 'Sigma_err', 'FWHM', 'FWHM_err', 'Height', 'Height_err'])\r\n QDot_slits = self.QDot_detection()\r\n\r\n if len(QDot_slits) > 0: \r\n self.plot_data = pd.DataFrame(columns=[f\"{QDot_slits[0]}\"], index=self.energies)\r\n else:\r\n self.plot_data = pd.DataFrame(index=self.energies)\r\n\r\n for slit_number in QDot_slits:\r\n sel = self.df4[f'{slit_number}']\r\n self.plot_data[f'{slit_number}'] = sel\r\n \r\n # Makes a good first guess for the fit values of the gaussian\r\n max_intensity = max(sel)\r\n central_energy = sel[sel==max_intensity].index.values\r\n central_energy = central_energy[0]\r\n\r\n # Fits a gaussian model to the selected data and shows the output\r\n gauss = models.GaussianModel()\r\n fit = gauss.fit(sel, x=self.energies, weights=1 / np.sqrt(sel), center = central_energy, amplitude = max_intensity, sigma = 1, nan_policy= 'omit')\r\n \r\n self.plot_data[f'{slit_number} best fit'] = fit.best_fit\r\n\r\n # Appends the fit data for the variables to a new dataframe and shows the fit results with errors\r\n fit_variables = [slit_number]\r\n for key in fit.params:\r\n if key in ['center', 'sigma', 'fwhm', 'height']:\r\n fit_variables.append(fit.params[key].value)\r\n fit_variables.append(fit.params[key].stderr)\r\n \r\n self.df5 = self.df5.append({'Slit Number': fit_variables[0], 'Centre': fit_variables[1], 'Centre_err': fit_variables[2], 'Sigma': fit_variables[3], 'Sigma_err': fit_variables[4], 'FWHM': fit_variables[5], 'FWHM_err': fit_variables[6], 'Height': fit_variables[7], 'Height_err': fit_variables[8]}, ignore_index=True)\r\n \r\n return self.plot_data, self.df5", "def _residuals(params: List[float], xs: np.ndarray, ys: np.ndarray) -> float:\n return _model(params=params, xs=xs) - ys", "def residLike(self):\n\n # --------------------------------------------------------------------------------------------- #\n # Compute the residuals\n if self.csys == 'GAL':\n # Redo some file computations with this coordinate system\n self.outbinexp = os.path.join(self.workpath, 'BinExpMapGAL'+self.suffix+'.fits')\n self.outbincub = os.path.join(self.workpath, 'BinCubeGAL'+self.suffix+'.fits')\n self.outsrcmap = os.path.join(self.workpath, 'SrcMapsGAL'+self.suffix+'.fits')\n self.outresid = os.path.join(self.workpath, 'ResidGAL'+self.suffix+'.fits')\n self.outresig = os.path.join(self.workpath, 'ResSigmaGAL'+self.suffix+'.fits')\n\n self._gtExpmap()\n self._gtBincube()\n self._gtSrcmap()\n else:\n # Nothing to add\n pass\n \n self._gtBinmap()\n self._gtModel()\n # Create the residual count map (count_map - model_map)\n if not os.path.isfile(self.outresid):\n os.popen(\"farith {} {} {} ops=SUB\".format(self.outbinmap, self.outgtmod,\n self.outresid))\n # Create the sigma-residual map (residual_map/sqrt(model_map))\n if not os.path.isfile(self.outresig):\n os.popen(\"ftpixcalc {} '(a-b)/sqrt(b)' a={} b={}\".format(self.outresig,\n self.outbinmap, self.outgtmod))\n\n # --------------------------------------------------------------------------------------------- #\n # Get the sources to overplot\n srcs = self.getSrc()\n srcs = srcs[(srcs['Separation'] <= 3.) & ([not i.endswith('c') for i in srcs['Name']])]\n # Plot the residuals\n resplt1 = FermiMap()\n resplt1.savepath = self.workpath\n resplt1.image = self.outresig\n resplt1.figname = 'ResSigma.pdf'\n dmin, dmax = np.abs(resplt1.datamin), resplt1.datamax\n resplt1.datamin = - min(dmin, dmax)\n resplt1.datamax = + min(dmin, dmax)\n resplt1.cbarlabel = r'Residual $\\sigma$/pixel'\n resplt1.mapSky()\n resplt1.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n resplt1.save()\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(resplt1.savepath, resplt1.figname) ))\n\n resplt2 = FermiMap()\n resplt2.savepath = self.workpath\n resplt2.image = self.outresid\n resplt2.figname = 'Residuals.pdf'\n dmin, dmax = np.abs(resplt2.datamin), resplt2.datamax\n resplt2.datamin = - min(dmin, dmax)\n resplt2.datamax = + min(dmin, dmax)\n resplt2.cbarlabel = r'Residual counts/pixel'\n resplt2.mapSky()\n resplt2.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n resplt2.save()\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(resplt2.savepath, resplt2.figname) ))\n return", "def run_many_fits(spectrum,rms,guesses,nruns):\n tk_fit = []\n tex_fit = []\n ntot_fit = []\n width_fit = []\n for i in range(nruns):\n noisy_spectrum = add_noise(spectrum,rms)\n noisy_spectrum.specfit(fittype='cold_ammonia',guesses=guesses,fixed=[F,F,F,F,F,T])\n parcopy = copy.deepcopy(noisy_spectrum.specfit.parinfo)\n tk_fit = np.append(tk_fit,parcopy[0].value)\n tex_fit = np.append(tex_fit,parcopy[1].value)\n ntot_fit = np.append(ntot_fit,parcopy[2].value)\n width_fit = np.append(width_fit,parcopy[3].value)\n return tk_fit,tex_fit,ntot_fit,width_fit", "def plotAllResiduals():\n #Joint fits\n files = g.glob('results/J*.fits')\n individuals = [file for file in files if 'datafit' in file]\n for file in individuals:\n id = file.replace('results/', '').replace('datafit.fits', '')\n print 'processing:', id\n _plotModelResiduals(id=id, folder='results/', out='results/%sResidual.pdf' % id)\n\n #Individual fits\n files = g.glob('results/I*.fits')\n individuals = [file for file in files if 'model' in file]\n for file in individuals:\n id = file.replace('results/', '').replace('model.fits', '')\n print 'processing:', id\n _plotModelResiduals(id=id, folder='results/', out='results/%sResidual.pdf' % id, individual=True)", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian_norot(*p)(*np.indices(data.shape)) -\n data)\n #scipy rihjt\n # Levenberg-Marquadt algorithm -> leastsq\n #bnds = None\n height, x, y, width_x, width_y, he1, x1,y1, wi1, wi2 = params\n #p, success = optimize.leastsq(errorfunction, params)\n bnds = (0,30)\n p = optimize.least_squares(errorfunction, params, bounds = bnds).x\n \n #least square fitting(minimizes raw data and fit)\n\n if(p[0] < 1 and p[5] < 1 and p[1] > 0 and p[1] < 30 and p[2] > 0 and p[2] < 30 and p[6] > 0 and p[6] < 30 and p[7] > 0 and p[7] < 30):\n #print(\"pass case\")\n return(p)\n else:\n print(\"failed case\")\n print(\"height1\", p[0],\"height2\", p[5], \"X\", p[1],\"Y\", p[2],\"Y1\", p[6], \"Y2\", p[7])\n print(\"bounding error\" + str(numero)) \n\n return p", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def getResiduals(self):\n X = np.zeros((self.N*self.P,self.n_fixed_effs))\n ip = 0\n for i in range(self.n_terms):\n Ki = self.A[i].shape[0]*self.F[i].shape[1]\n X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i])\n ip += Ki\n y = np.reshape(self.Y,(self.Y.size,1),order='F')\n RV = regressOut(y,X)\n RV = np.reshape(RV,self.Y.shape,order='F')\n return RV", "def fit_gaussian(self, mask=None):\n data = self.data\n mask = numpy.logical_or(mask, numpy.ma.getmaskarray(data))\n fdata = data[~mask].data\n xdata = numpy.asarray([cm[~mask]\n for cm in self.bset.cmesh]).transpose()\n scale, mean, cov = fit_ndgaussian(xdata, fdata)\n return scale, mean, cov", "def postfit_residuals(self) -> NONEARRAY:\n if self._successful:\n return self._postfit_residuals\n else:\n return None", "def rls_fit0(xdata: np.ndarray,\n ydata: np.ndarray | ma.MaskedArray) -> tuple:\n if xdata.size < 2:\n raise RuntimeError('too few points for a fit')\n if xdata.size != ydata.shape[-1]:\n raise RuntimeError('number of samples not equal for xdata, ydata')\n\n # perform all computations on 2 dimensional arrays\n img_shape = ydata.shape[:-1]\n yy1 = ydata.reshape(-1, xdata.size)\n\n # calculate weights\n if ma.isMaskedArray(ydata):\n wghts = calc_ma_weights(xdata, ma.getmaskarray(yy1))\n else:\n buff = np.concatenate(([2 * (xdata[1] - xdata[0])],\n xdata[2:] - xdata[0:-2],\n [2 * (xdata[-1] - xdata[-2])]))\n wghts = np.repeat([buff], yy1.shape[0], axis=0)\n wx1 = wghts / xdata\n wx2 = wghts / xdata ** 2\n\n # calculate the Q elements\n q00 = wghts.sum(axis=1)\n q11 = (wx1 * yy1).sum(axis=1)\n q22 = (wx2 * yy1 ** 2).sum(axis=1)\n\n # calculate fit parameter and its variance\n num = yy1.count(axis=1) if ma.isMaskedArray(ydata) else len(xdata)\n cc1 = q11 / q00\n if ma.isMaskedArray(ydata):\n cc1[num < 1] = ma.masked\n chi2 = ma.abs(q22 - q00 * cc1 ** 2) / np.clip(num - 1, 1, None)\n chi2[num <= 1] = ma.masked\n sc1 = ma.sqrt(chi2 / q00)\n return (cc1.reshape(img_shape).filled(np.nan),\n sc1.reshape(img_shape).filled(np.nan))\n\n # using only non-MaskedArray functions\n cc1[num < 1] = np.nan\n chi2 = np.abs(q22 - q00 * cc1 ** 2) / np.clip(num - 1, 1, None)\n chi2[num <= 1] = np.nan\n sc1 = np.sqrt(chi2 / q00)\n return cc1.reshape(img_shape), sc1.reshape(img_shape)", "def fit_gaussian(x, y, z):\n\n def sym_gaussian(p):\n \"\"\"\n Returns a Gaussian function:\n a**2 * exp(-((x - x_0)**2 + (y - y_0)**2) / (2 * sigma**2))\n p = [a, x_0, y_0, sigma]\n \"\"\"\n a, x_0, y_0, sigma = p\n return a**2 \\\n * np.exp(-((x - x_0)**2 + (y - y_0)**2) / (2.0 * sigma**2))\n\n def sym_gaussian_resids(p):\n \"\"\"Residuals to be sent into leastsq\"\"\"\n return z - sym_gaussian(p)\n\n def guess_fit_gaussian():\n \"\"\"\n return a, x_0, y_0, and sigma based on computing moments of data\n \"\"\"\n a = z.max()\n\n # Compute moments\n total = z.sum()\n x_0 = np.dot(x, z) / total\n y_0 = np.dot(y, z) / total\n\n # Approximate sigmas\n sigma_x = np.dot(x**2, z) / total\n sigma_y = np.dot(y**2, z) / total\n sigma = np.sqrt(sigma_x * sigma_y)\n\n # Return guess\n return (a, x_0, y_0, sigma)\n\n # Get guess\n p0 = guess_fit_gaussian()\n\n # Perform optimization using nonlinear least squares\n popt, junk_output, info_dict, mesg, ier = \\\n scipy.optimize.leastsq(sym_gaussian_resids, p0, full_output=True)\n\n # Check to make sure leastsq was successful. If not, return centroid\n # estimate.\n if ier in (1, 2, 3, 4):\n return (popt[0]**2, popt[1], popt[2], popt[3])\n else:\n return p0", "def amasslinregress(*args):\r\n TINY = 1.0e-20\r\n if len(args) == 1: # more than 1D array?\r\n args = args[0]\r\n if len(args) == 2:\r\n x = N.ravel(args[0])\r\n y = args[1]\r\n else:\r\n x = N.ravel(args[:,0])\r\n y = args[:,1]\r\n else:\r\n x = args[0]\r\n y = args[1]\r\n x = x.astype(N.float_)\r\n y = y.astype(N.float_)\r\n n = len(x)\r\n xmean = amean(x)\r\n ymean = amean(y,0)\r\n shp = N.ones(len(y.shape))\r\n shp[0] = len(x)\r\n x.shape = shp\r\n print x.shape, y.shape\r\n r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0)\r\n r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0)))\r\n zerodivproblem = N.equal(r_den,0)\r\n r_den = N.where(zerodivproblem,1,r_den) # avoid zero-division in 1st place\r\n r = r_num / r_den # need to do this nicely for matrix division\r\n r = N.where(zerodivproblem,0.0,r)\r\n z = 0.5*N.log((1.0+r+TINY)/(1.0-r+TINY))\r\n df = n-2\r\n t = r*N.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))\r\n prob = abetai(0.5*df,0.5,df/(df+t*t))\r\n\r\n ss = float(n)*ass(x)-asquare_of_sums(x)\r\n s_den = N.where(ss==0,1,ss) # avoid zero-division in 1st place\r\n slope = r_num / s_den\r\n intercept = ymean - slope*xmean\r\n sterrest = N.sqrt(1-r*r)*asamplestdev(y,0)\r\n return slope, intercept, r, prob, sterrest, n", "def fit_gaussian(array):\n\n shape = array.shape\n xmean, ymean = numpy.array(shape) / 2.\n\n xx, yy = numpy.mgrid[:shape[0], :shape[1]]\n\n g_init = astropy.modeling.models.Gaussian2D(amplitude=1., x_mean=xmean, y_mean=ymean,\n x_stddev=1., y_stddev=1.)\n\n f2 = astropy.modeling.fitting.LevMarLSQFitter()\n\n gg = f2(g_init, xx, yy, array)\n\n return gg", "def rls_fit(xdata: np.ndarray,\n ydata: np.ndarray | ma.MaskedArray) -> tuple:\n # pylint: disable=too-many-locals\n if xdata.size < 2:\n raise RuntimeError('too few sample points for a fit')\n if xdata.size != ydata.shape[-1]:\n raise RuntimeError('number of samples not equal for xdata, ydata')\n\n # perform all computations on 2 dimensional arrays\n img_shape = ydata.shape[:-1]\n yy1 = ydata.reshape(-1, xdata.size)\n\n # calculate weights\n if ma.isMaskedArray(ydata):\n wghts = calc_ma_weights(xdata, ma.getmaskarray(yy1))\n else:\n buff = np.concatenate(([2 * (xdata[1] - xdata[0])],\n xdata[2:] - xdata[0:-2],\n [2 * (xdata[-1] - xdata[-2])]))\n wghts = np.repeat([buff], yy1.shape[0], axis=0)\n wx1 = wghts / xdata\n wx2 = wghts / xdata ** 2 # is wx1 / xdata faster?\n\n # calculate the Q elements\n q00 = wghts.sum(axis=1)\n q01 = wx1.sum(axis=1)\n q02 = wx2.sum(axis=1)\n\n q11 = (wx1 * yy1).sum(axis=1)\n q12 = (wx2 * yy1).sum(axis=1)\n q22 = (wx2 * yy1 ** 2).sum(axis=1)\n\n # calculate the Z elements\n zz1 = q00 * q02 - q01 ** 2\n zz2 = q00 * q12 - q01 * q11\n zz3 = q02 * q11 - q01 * q12\n\n # calculate fit parameters and their uncertainties\n num = yy1.count(axis=1) if ma.isMaskedArray(ydata) else len(xdata)\n cc0 = zz2 / zz1\n cc1 = zz3 / zz1\n if ma.isMaskedArray(ydata):\n chi2 = ma.abs(q22 - q12 * cc0 - q11 * cc1) / np.clip(num - 2, 1, None)\n chi2[num <= 2] = 0\n sc0 = ma.sqrt(q00 * chi2 / zz1)\n sc1 = ma.sqrt(q02 * chi2 / zz1)\n\n return (cc0.reshape(img_shape).filled(np.nan),\n cc1.reshape(img_shape).filled(np.nan),\n sc0.reshape(img_shape).filled(np.nan),\n sc1.reshape(img_shape).filled(np.nan))\n\n # using only non-MaskedArray functions\n chi2 = np.abs(q22 - q12 * cc0 - q11 * cc1) / np.clip(num - 2, 1, None)\n chi2[num <= 2] = 0\n sc0 = np.sqrt(q00 * chi2 / zz1)\n sc1 = np.sqrt(q02 * chi2 / zz1)\n\n return (cc0.reshape(img_shape), cc1.reshape(img_shape),\n sc0.reshape(img_shape), sc1.reshape(img_shape))", "def _fit_err(x_data, y_data, formula_function, coefficient_vector):\n\tsum = 0;\n\tm = 1.0 / len(y_data)\n\tfor n in range(0,len(x_data)):\n\t\tt = x_data[n]\n\t\tobs = y_data[n]\n\t\tsim = formula_function(*( t, coefficient_vector ) )\n\t\ter = sim - obs\n\t\tsum += er * er * m\n\treturn sum", "def test_regress_residuals(self):\r\n x = [1.0, 2.0, 3.0, 4.0, 5.0]\r\n y = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = regress_residuals(x, y)\r\n self.assertFloatEqual(result, [-0.1, 0.08, -0.14, 0.44, -0.28])", "def fit(self, X, Y):\n\n # copy since this will contains the residuals (deflated) matrices\n check_consistent_length(X, Y)\n X = check_array(X, dtype=np.float64, copy=True)\n Y = check_array(Y, dtype=np.float64, copy=True, ensure_2d=False)\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n\n n = X.shape[0]\n p = X.shape[1]\n q = Y.shape[1]\n\n if self.n_components < 1 or self.n_components > p:\n raise ValueError('Invalid number of components: %d' %\n self.n_components)\n # Scale (in place)\n X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (\n _center_scale_xy(X, Y, self.scale))\n # Residuals (deflated) matrices\n Xk = X.copy()\n Yk = Y.copy()\n# STEP 1\n self.x_params = params_initialize(kind=self.x_kind)\n self.y_params = params_initialize(kind=self.y_kind)\n # Results matrices\n# STEP 2\n self.x_scores_ = np.zeros((n, self.n_components))\n self.y_scores_ = np.zeros((n, self.n_components))\n self.x_weights_ = np.zeros((p, self.n_components))\n self.y_weights_ = np.zeros((q, self.n_components))\n self.x_loadings_ = np.zeros((p, self.n_components))\n self.y_loadings_ = np.zeros((q, self.n_components))\n self.n_iter_ = []\n\n # NIPALS algo: outer loop, over components\n# STEP 3\n for k in range(self.n_components):\n if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):\n # Yk constant\n warnings.warn('Y residual constant at iteration %s' % k)\n break\n # 1) weights estimation (inner loop)\n # -----------------------------------\n# STEP 17\n x_weights, y_weights, n_iter_ = \\\n _nipals_twoblocks_inner_loop(\n X=Xk, Y=Yk, max_iter=self.max_iter,\n tol=self.tol, x_kind=self.x_kind, y_kind=self.y_kind,\n x_params=self.x_params, y_params=self.y_params, flag_first_iter=(k == 0),\n learning_rate=self.learning_rate)\n self.n_iter_.append(n_iter_)\n # Forces sign stability of x_weights and y_weights\n # Sign undeterminacy issue from svd if algorithm == \"svd\"\n # and from platform dependent computation if algorithm == 'nipals'\n x_weights, y_weights = svd_flip(x_weights, y_weights.T)\n y_weights = y_weights.T\n # compute scores\n \n Xk_hat = f(Xk, kind=self.x_kind, params=self.x_params)\n Yk_hat = f(Yk, kind=self.y_kind, params=self.y_params)\n \n x_scores = np.dot(Xk_hat, x_weights)\n y_ss = np.dot(y_weights.T, y_weights)\n y_scores = np.dot(Yk_hat, y_weights) / y_ss\n # test for null variance\n if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:\n warnings.warn('X scores are null at iteration %s' % k)\n break\n # 2) Deflation (in place)\n # ----------------------\n # Possible memory footprint reduction may done here: in order to\n # avoid the allocation of a data chunk for the rank-one\n # approximations matrix which is then subtracted to Xk, we suggest\n # to perform a column-wise deflation.\n #\n# STEP 19\n x_loadings = np.dot(Xk_hat.T, x_scores) / np.dot(x_scores.T, x_scores)\n y_loadings = (np.dot(Yk_hat.T, x_scores)\n / np.dot(x_scores.T, x_scores))\n # - regress Xk's on x_score\n # - subtract rank-one approximations to obtain remainder matrix\n# STEP 22\n Xk_hat -= np.dot(x_scores, x_loadings.T)\n # - regress Yk's on x_score, then subtract rank-one approx.\n# STEP 23\n Yk_hat -= np.dot(x_scores, y_loadings.T)\n# STEP 24\n Xk = finv(Xk_hat, kind=self.x_kind, params=self.x_params)\n Yk = finv(Yk_hat, kind=self.y_kind, params=self.y_params)\n # 3) Store weights, scores and loadings # Notation:\n self.x_scores_[:, k] = x_scores.ravel() # T\n self.y_scores_[:, k] = y_scores.ravel() # U\n self.x_weights_[:, k] = x_weights.ravel() # W\n self.y_weights_[:, k] = y_weights.ravel() # C\n self.x_loadings_[:, k] = x_loadings.ravel() # P\n self.y_loadings_[:, k] = y_loadings.ravel() # Q\n # Such that: X = TP' + Err and Y = UQ' + Err\n\n # 4) rotations from input space to transformed space (scores)\n # T = X W(P'W)^-1 = XW* (W* : p x k matrix)\n # U = Y C(Q'C)^-1 = YC* (C* : q x k matrix)\n self.x_rotations_ = np.dot(\n self.x_weights_,\n pinv2(np.dot(self.x_loadings_.T, self.x_weights_),\n check_finite=False))\n if Y.shape[1] > 1:\n self.y_rotations_ = np.dot(\n self.y_weights_,\n pinv2(np.dot(self.y_loadings_.T, self.y_weights_),\n check_finite=False))\n else:\n self.y_rotations_ = np.ones(1)\n\n # Estimate regression coefficient\n # Regress Y on T\n # Y = TQ' + Err,\n # Then express in function of X\n # Y = X W(P'W)^-1Q' + Err = XB + Err\n # => B = W*Q' (p x q)\n self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)\n # self.coef_ = self.coef_ * self.y_std_\n return self", "def residuals(x, y, filename):\n empirical_data = y\n #print(x)\n # call convert function\n\n ans = (empirical_data - run_model(x, filename))/empirical_data * 100\n #print(ans)\n return ans", "def residuals(data: DataVector, theory: TheoryVector) -> npt.NDArray[np.float64]:\n assert isinstance(data, DataVector)\n assert isinstance(theory, TheoryVector)\n return (data - theory).view(np.ndarray)", "def residuals_Arr(self, p, data, x):\n err = data - self.Arr(x,p)\n return err", "def residual_G1D(pars, x, data=None, eps=None):\n\tparvals = pars.valuesdict() # a Parameters() object is passed as \"pars\"\n\tintensity_max = parvals[\"I_zero\"]\n\tcenterposition = parvals[\"r_zero\"]\n\tbeamwidth = parvals[\"omega_zero\"]\n\tbgr = parvals[\"backgr\"]\n\tmodel = intensity_max*np.exp(-2*np.power(x-centerposition,2)/beamwidth**2) + bgr\n\tif data is None:\n\t\treturn np.array(model)\n\tif eps is None:\n\t\treturn np.array(model - data)\n\treturn np.array((model - data)/eps)", "def fitting_residual(x: np.ndarray, y: np.ndarray, func, args, mask_min: float = None,\n standardized: bool = False) -> np.ndarray:\n y_predict = func(x, *args)\n res = np.subtract(y, y_predict)\n norm = np.std(res)\n if mask_min is not None:\n res[np.where(abs(res) < mask_min)] = 0\n if standardized and norm != 0:\n res /= norm\n return res", "def fit(self, X):", "def _generate_residuals(exog, endog, bandwidth=0.05):\n # Turn input data into np.ndarrays.\n exog = np.array(exog)\n endog = np.array(endog)\n\n # Determine number of observations and number of columns of the\n # outcome variable.\n n = endog.shape[0]\n\n # *y* is a column vector\n if endog.ndim == 1:\n y_fit = loess(exog, endog, span=bandwidth, degree=1)\n y_fit.fit()\n res = y_fit.outputs.fitted_residuals\n\n else:\n columns = endog.shape[1]\n res = np.zeros([n, columns])\n\n for col in range(columns):\n y_fit = loess(exog, endog[:, col], span=bandwidth, degree=1)\n y_fit.fit()\n res[:, col] = y_fit.outputs.fitted_residuals\n\n return res", "def residuals(self):\r\n return self.__residuals", "def fit(x_array, y_array, function, A_start):\n param = (x_array, y_array, function)\n\n A_final, cov_x, infodict, mesg, ier = leastsq(minimize, A_start, args=param, full_output=True)#, warning=True)\n \n return A_final", "def _compute_residuals(self):\n residuls = self.I - self.E\n return residuls", "def error_MSE(resid):\n if resid.ndim == 2:\n return (norm(np.asarray(resid).ravel())**2)/float(resid.shape[1])\n elif resid.ndim == 1:\n return (norm(np.asarray(resid).ravel())**2)\n else:\n raise Exception(\"array passed to error_MSE has incorrect shape\")", "def reg2():\n data2 = np.load(\"./data/measure4_1.npy\")[2:]\n\n x2 = np.arange(0,len(data2),1)\n\n fit = True \n redistribute = True \n\n #x2 = 1.3149710372035508*x2 -22.617788714272098\n c2 = np.where(x2 < 135)\n\n data = data2[c2] \n x = x2[c2]\n print(\"datapoints:\",len(data))\n\n mass = 79/251/6080*52658\n if redistribute == True:\n\n # conserving the mass\n total_mass = mass * len(data)\n remaining = (data > 0)\n\n while True:\n print(\"new redistributing ...\")\n print(\"total mass:\",total_mass)\n # find those which are smaller\n q = (data[remaining] <= mass)\n remaining = ~q\n if len(np.nonzero(q)[0]) == 0:\n data[remaining] -= mass\n break\n print(\"number of smaller values:\",len(np.nonzero(q)[0]),\"\\n\")\n # subtract the mass of this data\n total_mass -= np.sum(data[q])\n mass = total_mass / len(np.nonzero(~remaining)[0]) \n data[q] = 0\n\n # redistribute total remaining mass to single channels\n print(\"number of nonzero:\",len(np.nonzero(data)[0]))\n\n c = np.nonzero(data) \n data = data[c]\n x = x[c]\n\n #scaling to time units\n x = 6.3149710372035508*x -22.617788714272098\n c = (x>0)\n x = x[c]\n data = data[c]\n\n x = x[::-1] - min(x)\n\n\n error = np.sqrt(data) \n # only fit for x < 135\n fig = plt.figure()\n ax = plt.subplot(111)\n plt.grid(True)\n\n if fit==True:\n\n def func(x, *p):\n a,b,c = p\n return a + b * c**x\n\n # p0 is the initial guess for the fitting coefficients \n p0 = [1., 1., 1.]\n\n p, cov = curve_fit(func, x, data, p0=p0, sigma = error)\n p_uc = uc.correlated_values(p, cov)\n c = p_uc[2]\n\n T12_lit = 98 \n lamb_lit = -(np.log(2)/T12_lit)\n print(\"lit\",lamb_lit)\n \n\n lamb = umath.log(c)\n print(lamb)\n T12 = -np.log(2) /lamb \n print(\"t12=\",T12)\n\n x_fit = np.linspace(min(x),max(x))\n\n data_fit = func(x_fit,*p) \n pmin = (p - np.sqrt(np.diag(cov)))\n pmax = (p + np.sqrt(np.diag(cov)))\n\n data_fit_min = func(x_fit, *pmin)\n data_fit_max = func(x_fit, *pmax)\n\n plt.plot(x_fit,data_fit)\n plt.plot(x_fit,90*np.exp(x_fit * lamb_lit))\n plt.fill_between(x_fit, data_fit_min , data_fit_max,facecolor=\"r\", color=\"b\", alpha=0.3 )\n\n # place a text box in upper left in axes coords\n props = dict(boxstyle='round', facecolor='white', alpha=0.5)\n textstr = '$a + b \\cdot c^x$ with\\n$a=%.2f$\\n$b=%.2f$\\n$c=%.2f$'%(p[0], p[1],p[2])\n ax.text(0.6, 0.85, textstr, transform=ax.transAxes, fontsize=18, va='top', bbox=props)\n\n ax.xaxis.set_tick_params(labelsize = 14)\n ax.yaxis.set_tick_params(labelsize = 14)\n\n ax.add_patch(plt.Rectangle((0,0.1),155,100,alpha = 0.2))\n\n plt.errorbar(x,data, yerr=error,fmt=\"x\")\n #plt.scatter(x,data,c=\"blue\",alpha = 0.9,s=100, marker=\"x\")\n plt.ylim(min(data)*0.8,max(data))\n #plt.yscale(\"log\")\n plt.xlim(min(x)*0.8,max(x))\n plt.xlabel(\"time in $ns$\", fontsize = 14)\n plt.ylabel(\"counts\", fontsize = 14)\n make_fig(fig,1,1,name=\"plot4_1_reg\")", "def calcResiduals(self, params)->np.ndarray:\r\n if self._selectedIdxs is None:\r\n self._updateSelectedIdxs()\r\n dataArr = ModelFitterCore.runSimulationNumpy(parameters=params,\r\n modelSpecification=self.roadrunnerModel,\r\n startTime=self.observedTS.start,\r\n endTime=self.endTime,\r\n numPoint=self.numPoint,\r\n selectedColumns=self.selectedColumns,\r\n _logger=self.logger,\r\n _loggerPrefix=self._loggerPrefix)\r\n if dataArr is None:\r\n residualsArr = np.repeat(LARGE_RESIDUAL, len(self._observedArr))\r\n else:\r\n truncatedArr = dataArr[self._selectedIdxs, 1:]\r\n truncatedArr = truncatedArr.flatten()\r\n residualsArr = self._observedArr - truncatedArr\r\n if self._isObservedNan:\r\n residualsArr = np.nan_to_num(residualsArr)\r\n return residualsArr", "def residuals_(self):\n return self._residuals", "def gauss_fit(seld, data=''):\n mean, std = norm.fit(data)\n return mean, std", "def _fit_ridge_alpha(trn_fs,trn_data,val_fs,val_data,alphas=DEFAULT_ALPHAS,\n chunk_sz=5000,is_efficient=True,dtype=np.single, is_verbose=False, pthr=0.005,\n square_alpha=False,return_resids=False): \n n_tps,n_voxels = trn_data.shape\n n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)\n cc = np.zeros((n_voxels,len(alphas)),dtype=dtype)\n if return_resids:\n resids = np.zeros((n_tps,n_voxels,len(alphas)),dtype=dtype)\n pred_A = []\n if is_efficient:\n # Efficient Ridge regression from A. Huth, Part (1):\n # Full multiplication for validation (here, random split of\n # training data) prediction is: \n # pred = (Xval*Vx) * Dx * (pinv(Ux)*Ychunk) # NOTE: pinv(Ux) = Ux'\n # We will pre-compute the first and third terms in parentheses:\n # pred = XvalVx * Dx * UxYchunk\n if is_verbose: \n print('->Doing SVD of stimulus design matrix')\n t0 = time.time()\n #time.sleep(.01); # To ensure printing?\n m,n = trn_fs.shape\n if m>n:\n Ux,Sx,Vx = _utils._svd(trn_fs,full_matrices=False)\n else:\n Vx,Sx,Ux = _utils._svd(trn_fs.T,full_matrices=False)\n # Switcheroo of Vx and Ux due to transpose of input matrix\n Ux = Ux.T\n Vx = Vx.T\n\n if is_verbose:\n t1 = time.time()\n print('->Done with SVD in %0.2f sec'%(t0-t1))\n # For more efficient computation:\n #k = len(Sx) \n ## OR: \n ## singcutoff = (XX);\n ## k = sum(sx > singcutoff);\n ## sx = sx(1:k);\n XvalVx = val_fs.dot(Vx.T) # NOTE: IN MATLAB, No Vx', because Matlab leaves V in transposed form!\n else:\n raise NotImplementedError(\"Sorry, not done yet!\")\n\n for iChunk in range(n_chunks):\n print('Running chunk %d of %d...\\n'%(iChunk+1,n_chunks))\n ChIdx = np.arange(chunk_sz) + chunk_sz*iChunk\n ChIdx = ChIdx[ChIdx<n_voxels] # clip extra voxels in last run.\n Ychunk = trn_data[:,ChIdx]\n\n # Fit model with all lambdas (for subset of voxels)\n if not is_efficient:\n raise Exception('LAME! no slow reliable ridge implemented.')\n #[Wt L] = ridgemulti(X,Ychunk,params.lambdas);\n else:\n # Efficient Ridge regression from A. Huth, part (2)\n # NOTE: weights are never explicitly computed!\n UxYchunk = Ux.T.dot(Ychunk)\n \n if is_verbose:\n print('Checking model predictions...')\n for iA,A in enumerate(alphas):\n if not is_efficient:\n pred = np.cast(np.single)[Xval.dot(Wt[:,:,iA])]\n else:\n # Efficient Ridge regression from A. Huth, part (3)\n # Normalize lambda by Frobenius norm for stim matrix\n aX = A # * norm(X,'fro'); # ... or not\n # Need to decide for final whether aX**2 or not\n if square_alpha:\n Dx = Sx/(Sx**2 + aX**2) \n else:\n Dx = Sx/(Sx**2 + aX) \n # Compute predicitons (XvalVx and UxYchunk computed above)\n # (mult diag is slightly faster than matrix multiplication in timing tests)\n pred = _utils.mult_diag(Dx, XvalVx, left=False).dot(UxYchunk) \n # Compute prediction accuracy (correlations)\n cc[ChIdx,iA]=_sutils.column_corr(pred,val_data[:,ChIdx])\n if return_resids:\n resids[:,ChIdx,iA] = val_data[:,ChIdx]-pred\n if return_resids:\n return cc,resids\n else:\n return cc", "def _residuals(self):\n if self.model_fit is None:\n self._uvts_cls_logger.error(\"No model has been fitted, residuals cannot be computed!\")\n sys.exit(\"STOP\")\n\n try:\n # use fittedvalues to fill in the model dictionary\n self.residuals = pd.Series(np.asarray(self._train_dt['y']) - np.asarray(self.fittedvalues).flatten(),\n index=self._train_dt['y'].index)\n self.upper_whisker_res = self.residuals.mean() + 1.5 * (\n self.residuals.quantile(0.75) - self.residuals.quantile(0.25))\n except (KeyError, AttributeError):\n self._uvts_cls_logger.exception(\"Exception occurred: Model was not fitted or ts has other structure\")\n\n return self", "def residmapComparison():\n srcmap001 = fits.open('dataFiles/6gev_srcmap_001.fits')\n srcmap03 = fits.open('dataFiles/6gev_srcmap_03.fits')\n\n image_data = fits.getdata('dataFiles/6gev_image.fits')\n filename = get_pkg_data_filename('dataFiles/6gev_image.fits')\n hdu = fits.open(filename)[0]\n wcs = WCS(hdu.header)\n\n #Given the results of the fit, calculate the model\n modelData001 = np.zeros(srcmap001[0].shape)\n modelData03 = np.zeros(srcmap03[0].shape)\n\n file = open('plotsData/fitResults001.pk1','rb')\n fit001 = pickle.load(file)\n file.close()\n\n file = open('plotsData/fitResults03.pk1','rb')\n fit03 = pickle.load(file)\n file.close()\n\n\n for source in fit001:\n the_index = srcmap001.index_of(source)\n\n modelData001 += fit001[source][:, None, None]*srcmap001[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap001[the_index].data, axis=2), axis=1)[:-1, None, None]\n for source in fit03:\n the_index = srcmap03.index_of(source)\n modelData03 += fit03[source][:, None, None]*srcmap03[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap03[the_index].data, axis=2), axis=1)[:-1, None, None]\n\n fig = plt.figure(figsize=[12, 4.5])\n\n vmin = -25.0\n vmax = 25.0\n cbStep = 5.0\n ax = fig.add_subplot(121, projection=wcs)\n ax=plt.gca()\n ax.tick_params(direction='in')\n c = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax.get_transform('galactic'))\n ax.add_patch(c)\n mappable=plt.imshow((image_data-np.sum(modelData001,axis=0)),cmap='seismic',origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')#\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('GC Point Source ($>6$ GeV)')\n cb = plt.colorbar(mappable, label='Residual counts per pixel', pad=0.01,ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb.ax.tick_params(width=0)\n\n\n ax2=fig.add_subplot(122, projection=wcs)\n ax2 = plt.gca()\n c2 = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax2.get_transform('galactic'))\n ax2.add_patch(c2)\n mappable2 = plt.imshow((image_data-np.sum(modelData03,axis=0)), cmap='seismic',origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('GC Extended Source ($>6$ GeV)')\n cb2 = plt.colorbar(mappable2, label='Residual counts per pixel', pad=0.01, ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb2.ax.tick_params(width=0)\n fig.tight_layout()\n plt.subplots_adjust(wspace = 0.13, left=0.04, bottom=0.13, top=0.92)\n #plt.savefig('plots/residComparison.pdf',bbox_inches='tight')\n plt.show()", "def fitULD(tholds):\n \n # array to hold fit parameters\n \n mevs = numarray.zeros((3, calConstant.NUM_TEM, calConstant.NUM_ROW, calConstant.NUM_END,\n calConstant.NUM_FE, 3), numarray.Float32)\n \n for erng in range(3):\n for tem in range(calConstant.NUM_TEM):\n for row in range(calConstant.NUM_ROW):\n for end in range(calConstant.NUM_END):\n for fe in range(calConstant.NUM_FE):\n \n # remove saturation plateau before fit\n \n th = tholds[erng,tem,row,end,fe,:] \n sat = th[-1]\n q = th < sat\n a = numarray.compress(q, th)\n d = numarray.compress(q, D0) \n \n try:\n import ROOTFit\n import ROOT\n (fitParms, fitErrs, chisq) = ROOTFit.ROOTFit(ROOT.TF1(\"p1\",\"pol1\"),\n d,\n a,\n P0)\n \n except ValueError, e:\n log.error(\"mpfit excep on %s,T%d,%s%s,%d: %s,%s,%s\", calConstant.CRNG[erng],\n tem, calConstant.CROW[row], calConstant.CPM[end], fe, e, d, a)\n \n mevs[erng,tem,row,end,fe,0] = fitParms[1]\n mevs[erng,tem,row,end,fe,1] = fitParms[0] \n mevs[erng,tem,row,end,fe,2] = sat\n \n return mevs", "def hessian_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n # Blocks for the fixed and random effects parameters.\n hess_fe = 0.\n hess_re = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n hess_fere = np.zeros((self.k_re2, self.k_fe),\n dtype=np.float64)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n rvir = 0.\n xtvix = 0.\n xtax = [0.,] * self.k_re2\n B = np.zeros(self.k_re2, dtype=np.float64)\n D = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n F = [[0.,]*self.k_re2 for k in range(self.k_re2)]\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xtvix += np.dot(exog.T, viexog)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n rvir += np.dot(resid, vir)\n\n for jj1,mat1 in self._gen_dV_dPsi(ex_r):\n\n hess_fere[jj1,:] += np.dot(viexog.T,\n np.dot(mat1, vir))\n if self.reml:\n xtax[jj1] += np.dot(viexog.T, np.dot(mat1, viexog))\n\n B[jj1] += np.dot(vir, np.dot(mat1, vir))\n E = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n mat1)\n\n for jj2,mat2 in self._gen_dV_dPsi(ex_r, jj1):\n Q = np.dot(mat2, E)\n Q1 = Q + Q.T\n vt = np.dot(vir, np.dot(Q1, vir))\n D[jj1, jj2] += vt\n if jj1 != jj2:\n D[jj2, jj1] += vt\n R = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, Q)\n rt = np.trace(R) / 2\n hess_re[jj1, jj2] += rt\n if jj1 != jj2:\n hess_re[jj2, jj1] += rt\n if self.reml:\n F[jj1][jj2] += np.dot(viexog.T,\n np.dot(Q, viexog))\n\n hess_fe -= fac * xtvix / rvir\n\n hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)\n\n hess_fere = -fac * hess_fere / rvir\n\n if self.reml:\n for j1 in range(self.k_re2):\n Q1 = np.linalg.solve(xtvix, xtax[j1])\n for j2 in range(j1 + 1):\n Q2 = np.linalg.solve(xtvix, xtax[j2])\n a = np.trace(np.dot(Q1, Q2))\n a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))\n a *= 0.5\n hess_re[j1, j2] += a\n if j1 > j2:\n hess_re[j2, j1] += a\n\n # Put the blocks together to get the Hessian.\n m = self.k_fe + self.k_re2\n hess = np.zeros((m, m), dtype=np.float64)\n hess[0:self.k_fe, 0:self.k_fe] = hess_fe\n hess[0:self.k_fe, self.k_fe:] = hess_fere.T\n hess[self.k_fe:, 0:self.k_fe] = hess_fere\n hess[self.k_fe:, self.k_fe:] = hess_re\n\n return hess", "def fit_gaussian(arr):\n\tif isinstance(arr, ac.kernels.Kernel):\n\t\tarr = arr.array\n\telif isinstance(arr, np.ndarray):\n\t\tpass\n\telse: \n\t\traise Exception(\"[psfmatch] input needs to be a kernel or array\")\n\n\tnx, ny = arr.shape\n\tx, y = get_xy_grid(nx, ny)\n\n\tmodel_init = am.functional_models.Gaussian2D(amplitude=arr.max(), x_mean=0., y_mean=0., x_stddev=5., y_stddev=5., theta=0.)\n\tfitter = am.fitting.LevMarLSQFitter()\n\n\t# with warnings.catch_warnings():\n\t\t# warnings.simplefilter('ignore')\n\tmodel_best = fitter(model_init, x, y, arr)\n\n\treturn model_best", "def fit_ndgaussian(xdata, fdata):\n m, n = xdata.shape\n n2 = 2 * n\n fsuminv = 1 / numpy.sum(fdata)\n\n # Estimate initial parameters\n mean = fsuminv * numpy.sum(fdata * xdata.transpose(), axis=1)\n dx = (xdata - mean).transpose()\n cov = fsuminv * (fdata * dx).dot(dx.transpose())\n\n evals, evecs = linalg.eigh(cov)\n covdet = numpy.prod(evals)\n\n scale = fdata.max() * numpy.sqrt(covdet * (2 * numpy.pi) ** n)\n\n # Make sure the matrix of eigenvectors is orthogonal and proper (det +1)\n if linalg.det(evecs) < 0:\n evecs[:, 0] = -evecs[:, 0]\n\n ## Use the Cayley transform to extract n(n - 1) / 2 independent parameters\n ## from the orthogonal eigenvector matrix\n #eye = numpy.eye(n)\n #evecs_c = (eye - evecs).dot(linalg.inv(eye + evecs))\n #upper = numpy.triu_indices(n, k=1)\n\n # Use the parametrization in orthogonal_matrix()\n angles = angles_from_orthogonal_matrix(evecs)\n\n # Make a list with the minimal number of parameters to specify a Gaussian\n #params = numpy.hstack((scale, mean, numpy.sqrt(evals), evecs_c[upper]))\n params = numpy.hstack((scale, mean, numpy.sqrt(evals), angles))\n #params = numpy.hstack((numpy.sqrt(scale), mean, numpy.sqrt(evals),\n # angles))\n #params = numpy.hstack((scale, mean, evals, angles))\n\n def params_to_scale_mean_cov(params_):\n \"\"\"\n Extract the scale, mean and covariance matrix from the minimal\n parameter array\n\n \"\"\"\n # Extract scale and mean\n #scale_sqrt_ = params_[0]\n #scale_ = scale_sqrt_ * scale_sqrt_\n scale_ = params_[0]\n\n mean_ = params_[1:n + 1]\n\n # Get eigenvalues\n evals_sqrt_ = numpy.array(params_[n + 1:n2 + 1])\n evals_ = evals_sqrt_ * evals_sqrt_\n #evals_ = numpy.array(params_[n + 1:n2 + 1])\n\n ## Reconstruct the transformed eigenvector matrix\n #cov_c_ = numpy.zeros((n, n))\n #cov_c_[upper] = params_[n2 + 1:]\n #cov_c_.transpose()[upper] = -cov_c_[upper]\n #\n ## Use an inverse Cayley transform to get the true eigenvector matrix\n #evecs_ = (eye - cov_c_).dot(linalg.inv(eye + cov_c_))\n\n # Get eigenvector matrix from orthogonal_matrix()\n evecs_ = orthogonal_matrix_from_angles(n, params_[n2 + 1:])\n\n # Get the covariance matrix from the eigenvectors and eigenvalues\n cov_ = evecs_.dot(numpy.diag(evals_).dot(evecs_.transpose()))\n\n return scale_, mean_, cov_\n\n def param_gauss(xdata_, *params_):\n \"\"\"\n Define a Gaussian function specified by a minimal number of parameters\n\n \"\"\"\n scale_, mean_, cov_ = params_to_scale_mean_cov(params_)\n return scale_ * gaussian(xdata_, mean=mean_, cov=cov_)\n\n def error(params_):\n eps = fdata - param_gauss(xdata, *params_)\n return numpy.sum(eps * eps)\n\n # Find the parameter array that solves the least-squares fitting problem\n #params, __ = optimize.curve_fit(param_gauss, xdata, fdata, p0=params)\n l = n * (n - 1) // 2\n bounds = ([(0.0, None)] + # Scale must be positive\n [(None, None)] * n + # Means for each axis -- any value\n [(None, None)] * n + # Square roots of evals -- any value\n [(0.0, 2 * numpy.pi)] * l) # Angles constrained to one cycle\n params = optimize.minimize(error, params, bounds=bounds).x\n\n scale, mean, cov = params_to_scale_mean_cov(params)\n\n return scale, mean, cov", "def convergence_gmres_A():\n global conv_residuals\n def compute_residuals(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n n_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_n = np.zeros(n_search.size)\n\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n # To average, we loop over 10 times\n for j in range(10):\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n steps_till_conv_n[i] += len(conv_residuals)\n\n # Divide by 10 to take the average:\n steps_till_conv_n /= 10\n\n fig220 = plt.figure(figsize=(13, 8))\n plt.plot(n_search, steps_till_conv_n)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps Taken to Converge\")\n plt.title(\"Figure 220 - Steps Taken for GMRES to Converge for Varying N\",\n fontsize=13)\n plt.grid()\n plt.savefig(\"figures/figure220.png\")\n plt.show()\n\n n_search = np.array([10, 50, 100, 150])\n\n fig221 = plt.figure(figsize=(13, 8))\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n plt.semilogy(range(len(conv_residuals)), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Step Taken to Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 221 - GMRES Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(\"figures/figure221.png\")\n plt.show()\n return", "def residual1(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n a = parvals['a']\n b = parvals['b']\n c = parvals['c']\n d = parvals['d']\n model = a + b*x + c*x**2 + d*x**3\n return data - model", "def residual(params, model_func, x, data, min_x_param=None, max_x_param=None,\n eps=None):\n # Crop the X data according to a fit parameter\n if min_x_param is not None or max_x_param is not None:\n min_x = params.get(min_x_param, None)\n max_x = params.get(max_x_param, None)\n x, data = crop_x_y(x, data, min_x=min_x, max_x=max_x,\n include_bounds=False)\n\n # Calculate data according to the model function\n model = model_func(x, **params)\n\n # Calculate the residuals of the model and the given data\n if eps is None:\n return model - data\n return (model - data) / eps", "def residual(self, x, y, num_targets):\n \n x = x/sum(x) # normalize weights\n\n # RUN IM-SRG(2)\n ref = self._refs.T.dot(x)\n main(self._n_holes,self._n_particles, \n g=self._g_val, \n pb=self._pb_val, \n ref=ref, \n verbose=0, \n generator=self._generator,\n output_root = self._coeffs_root)\n\n # LOAD EVOLVED COEFFICIENTS\n H0B, H1B, H2B, eta1B_vac, eta2B_vac = pickle.load(open(self._coeffs_root+'/vac_coeffs_evolved.p', 'rb'))\n\n # PERFORM FULL CI AND GET EIGENVALUES\n hme = pyci.matrix(self._n_holes,self._n_particles, H0B, H1B, H2B, H2B, imsrg=True)\n ev_eigs = np.linalg.eigvalsh(hme)\n\n #return np.sqrt(np.mean((ev_eigs-y)**2))\n #return abs(ev_eigs[0:num_targets] - y[0:num_targets])\n #return abs(ev_eigs[1] - y[1])\n #return abs(ev_eigs[0] - y[0])\n return np.sqrt(0.80*(ev_eigs[0]-y[0])**2 + 0.20/35*((ev_eigs[1::]-y[1::]).T.dot(ev_eigs[1::]-y[1::])))", "def residual(t, x, xdot, result):\n result[0] = x[2]-xdot[0]\n result[1] = x[3]-xdot[1]\n result[2] = -xdot[2]+x[4]*x[0]/m\n result[3] = -xdot[3]+x[4]*x[1]/m-g\n result[4] = x[2]**2 + x[3]**2 \\\n + (x[0]**2 + x[1]**2)/m*x[4] - x[1] * g\n print(result)", "def apply_gaussian_resolution(self,params,data,fwhm=1,dE=0.01,E_max=100):\n print('\\n################### CONVOLUTION #####################\\n')\n print(f'\\n\\tConvolution with Gaussian function, FWHM = {fwhm} meV\\n')\n\n data.fwhm = fwhm\n c = fwhm/2.35482\n\n data.dE = dE\n data.E_max = E_max\n data.spectra_E = np.arange(0,data.E_max+data.dE,data.dE)\n data.spectra_num_E = len(data.spectra_E)\n data.spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n data.smooth_spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n structure_factors = []\n energies = []\n\n ### sum intensity of degenerate bands\n if params.sum_degenerate_bands == True:\n print('\\n\\tSumming degenerate bands before convolution (using convolution dE as tolerance)\\n')\n for q in range(params.num_Qpoints):\n sfac = data.structure_factors[:,q]\n energy = data.frequencies[f'{q}']\n reduced_energies = []\n summed_sfac = []\n while True:\n if len(energy) == 0:\n break\n test_energy = energy[0]\n reduced_energies.append(test_energy)\n indicies = np.intersect1d(np.argwhere(energy <= (test_energy+data.dE)),\n np.argwhere(energy > (test_energy-data.dE)))\n summed_sfac.append(sfac[indicies].sum())\n sfac = np.delete(sfac,indicies)\n energy = np.delete(energy,indicies)\n energies.append(reduced_energies)\n structure_factors.append(summed_sfac)\n else:\n print('\\n\\tWARNING: You should definitely sum degenerate bands!!!\\n')\n for q in range(params.num_Qpoints):\n energies.append(data.frequencies[f'{q}'])\n structure_factors.append(data.structure_factors[:,q])\n\n ### populate array for heatmap\n ### try statement takes care of negative energies\n for q in range(params.num_Qpoints):\n for b in range(len(structure_factors[q][:])):\n try: # if there are negative modes, argwhere returns an empty vector and the slice crashes\n data.spectra[np.argwhere(data.spectra_E <= \n energies[q][b]).max(),q] = structure_factors[q][b]\n except:\n continue\n\n if params.bose_factor == True:\n print('\\n\\tWARNING: Bose factor isnt verified. Need to compare to SNAXS.\\n')\n if params.temperature < 5:\n temperature = 5\n else:\n temperature = params.temperature\n inds = np.argwhere(data.spectra_E <= 0.5)\n tmp_e = np.copy(data.spectra_E)\n tmp_e[inds] = 0.5\n bose = 1+1/(np.exp(tmp_e/(constants.kb*1000*temperature))-1)\n bose = np.tile(bose.reshape((data.spectra_num_E,1)),reps=(1,params.num_Qpoints))\n data.spectra = np.multiply(data.spectra,bose)\n data.spectra = data.spectra/np.max(data.spectra)\n\n ### gaussian convolution using for loops, slow but very little memory utilization\n g_energy = np.append(data.spectra_E-data.spectra_E.max(),data.spectra_E[1:])\n gaussian = np.exp(-0.5*g_energy**2/c**2)/c/np.sqrt(2*np.pi)\n gaussian = np.tile(gaussian.reshape((gaussian.shape[0],1)),(1,data.num_Qpoints))\n tmp = np.append(data.spectra,data.spectra,axis=0)[1:,:]\n for e in range(data.spectra_num_E):\n if e%50 == 0:\n print(f'\\t------ {e}/{data.spectra_num_E} -------')\n data.smooth_spectra[e,:] = np.trapz(tmp*np.roll(gaussian,shift=e,axis=0),g_energy,axis=0)\n print('\\n\\tDone convolving!\\n')\n data.smooth_spectra = data.smooth_spectra/np.max(data.smooth_spectra)\n\n# if params.random_background == True:\n# data.smooth_spectra = data.smooth_spectra+(np.random.normal(0,1,\n# (data.smooth_spectra.shape[0],data.smooth_spectra.shape[1])))*0.001\n \n plt.imshow(data.smooth_spectra,origin='lower',aspect='auto',cmap='hot')\n plt.show()", "def residuals_detail(self):\n if self._residuals_detail is None:\n if not self.parametric:\n unscaled = self.unscaled_residuals.values.ravel()\n adjusted = self.adjusted_residuals.values.ravel()\n unscaled = unscaled[~np.isnan(unscaled)]\n adjusted = adjusted[~np.isnan(adjusted)]\n unscaled = unscaled[unscaled != 0]\n adjusted = adjusted[adjusted != 0]\n unscaled_size = unscaled.size\n unscaled_sum = unscaled.sum(axis=0)\n unscaled_ssqr = np.sum(unscaled**2, axis=0)\n unscaled_min = unscaled.min(axis=0)\n unscaled_max = unscaled.max(axis=0)\n unscaled_mean = unscaled.mean(axis=0)\n unscaled_skew = stats.skew(unscaled, axis=0, nan_policy=\"omit\")\n unscaled_mode = stats.mode(unscaled, axis=0, nan_policy=\"omit\").mode[0]\n unscaled_cvar = stats.variation(unscaled, axis=0, nan_policy=\"omit\")\n unscaled_kurt = stats.kurtosis(unscaled, axis=0, nan_policy=\"omit\")\n unscaled_var = unscaled.var(ddof=1, axis=0)\n unscaled_std = unscaled.std(ddof=1, axis=0)\n unscaled_med = np.median(unscaled, axis=0)\n adjusted_size = adjusted.size\n adjusted_sum = adjusted.sum(axis=0)\n adjusted_ssqr = np.sum(adjusted**2, axis=0)\n adjusted_min = adjusted.min(axis=0)\n adjusted_max = adjusted.max(axis=0)\n adjusted_mean = adjusted.mean(axis=0)\n adjusted_skew = stats.skew(adjusted, axis=0, nan_policy=\"omit\")\n adjusted_mode = stats.mode(adjusted, axis=0, nan_policy=\"omit\").mode[0]\n adjusted_cvar = stats.variation(adjusted, axis=0, nan_policy=\"omit\")\n adjusted_kurt = stats.kurtosis(adjusted, axis=0, nan_policy=\"omit\")\n adjusted_var = adjusted.var(ddof=1, axis=0)\n adjusted_std = adjusted.std(ddof=1, axis=0)\n adjusted_med = np.median(adjusted, axis=0)\n self._residuals_detail = pd.DataFrame({\n \"unscaled\": [\n unscaled_size, unscaled_sum , unscaled_ssqr, unscaled_min,\n unscaled_max, unscaled_mean, unscaled_skew, unscaled_mode,\n unscaled_cvar, unscaled_kurt, unscaled_var , unscaled_std,\n unscaled_med\n ],\n \"adjusted\": [\n adjusted_size, adjusted_sum , adjusted_ssqr, adjusted_min,\n adjusted_max, adjusted_mean, adjusted_skew, adjusted_mode,\n adjusted_cvar, adjusted_kurt, adjusted_var , adjusted_std,\n adjusted_med\n ],\n },\n index=[\n \"size\", \"sum\", \"sum_of_squares\", \"minimum\", \"maximum\", \"mean\",\n \"skew\", \"mode\", \"cov\", \"kurtosis\", \"variance\",\n \"standard_deviation\", \"median\"\n ]\n )\n\n return(self._residuals_detail)", "def score_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n score_fe = np.zeros(self.k_fe, dtype=np.float64)\n score_re = np.zeros(self.k_re2, dtype=np.float64)\n\n # Handle the covariance penalty.\n if self.cov_pen is not None:\n score_re -= self.cov_pen.grad(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty.\n if self.fe_pen is not None:\n score_fe -= self.fe_pen.grad(fe_params)\n\n # resid' V^{-1} resid, summed over the groups (a scalar)\n rvir = 0.\n\n # exog' V^{-1} resid, summed over the groups (a k_fe\n # dimensional vector)\n xtvir = 0.\n\n # exog' V^{_1} exog, summed over the groups (a k_fe x k_fe\n # matrix)\n xtvix = 0.\n\n # V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th\n # covariance parameter.\n xtax = [0.,] * self.k_re2\n\n # Temporary related to the gradient of log |V|\n dlv = np.zeros(self.k_re2, dtype=np.float64)\n\n # resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)\n rvavr = np.zeros(self.k_re2, dtype=np.float64)\n\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n if self.reml:\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, exog)\n xtvix += np.dot(exog.T, viexog)\n\n # Contributions to the covariance parameter gradient\n jj = 0\n vex = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n ex_r)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n for jj,mat in self._gen_dV_dPsi(ex_r):\n dlv[jj] = np.trace(_smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, mat))\n rvavr[jj] += np.dot(vir, np.dot(mat, vir))\n if self.reml:\n xtax[jj] += np.dot(viexog.T, np.dot(mat, viexog))\n\n # Contribution of log|V| to the covariance parameter\n # gradient.\n score_re -= 0.5 * dlv\n\n # Needed for the fixed effects params gradient\n rvir += np.dot(resid, vir)\n xtvir += np.dot(exog.T, vir)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n score_fe += fac * xtvir / rvir\n score_re += 0.5 * fac * rvavr / rvir\n\n if self.reml:\n for j in range(self.k_re2):\n score_re[j] += 0.5 * np.trace(np.linalg.solve(\n xtvix, xtax[j]))\n\n score_vec = np.concatenate((score_fe, score_re))\n\n if self._freepat is not None:\n return self._freepat.get_packed() * score_vec\n else:\n return score_vec", "def residuals(self):\n\n if np.size(self.iceicehorizons_depth1) > 0:\n resi_iceice = (self.site1.fct_age(self.iceicehorizons_depth1)-\\\n self.site2.fct_age(self.iceicehorizons_depth2))/self.iceicehorizons_sigma\n if self.iceicehorizons_correlation_bool:\n resi_iceice = lu_solve(self.iceicehorizons_lu_piv, resi_iceice)\n resi = [resi_iceice]\n else:\n resi = [np.array([])]\n\n if self.site1.archive == 'icecore' and self.site2.archive == 'icecore' and \\\n np.size(self.airairhorizons_depth1) > 0:\n resi_airair = (self.site1.fct_airage(self.airairhorizons_depth1)-\\\n self.site2.fct_airage(self.airairhorizons_depth2))/\\\n self.airairhorizons_sigma\n if self.airairhorizons_correlation_bool:\n resi_airair = lu_solve(self.airairhorizons_lu_piv, resi_airair)\n resi.append(resi_airair)\n\n if self.site2.archive == 'icecore' and np.size(self.iceairhorizons_depth1) > 0:\n resi_iceair = (self.site1.fct_age(self.iceairhorizons_depth1)-\\\n self.site2.fct_airage(self.iceairhorizons_depth2))/\\\n self.iceairhorizons_sigma\n if self.iceairhorizons_correlation_bool:\n resi_iceair = lu_solve(self.iceairhorizons_lu_piv, resi_iceair)\n resi.append(resi_iceair)\n\n if self.site1.archive == 'icecore' and np.size(self.airicehorizons_depth1) > 0:\n resi_airice = (self.site1.fct_airage(self.airicehorizons_depth1)-\\\n self.site2.fct_age(self.airicehorizons_depth2))/self.airicehorizons_sigma\n if self.airicehorizons_correlation_bool:\n resi_airice = lu_solve(self.airicehorizons_lu_piv, resi_airice)\n resi.append(resi_airice)\n\n return np.concatenate(resi)", "def residuals(self,x=None,y=None,retdata=False):\n if x is None or y is None:\n if self.data is None:\n raise ValueError('must either specify data or save fitted data')\n x,y,weights = self.data\n\n if self(x).shape != y.shape:\n raise ModelTypeError('y array does not match output of model for input x')\n if retdata:\n return x,y,y-self(x)\n else:\n return y-self(x)", "def compute_residual_stats(problem):\n if len(problem.constraints) == 0:\n return (None, None)\n sum_residuals = 0\n max_residual = 0\n n_residuals = 0\n\n for constraint in problem.constraints:\n # print(constraint.residual.is_constant())\n res = constraint.residual.value\n # print(\"1\")\n thismax = 0\n\n # Compute average absolute residual:\n if isinstance(res, np.matrix):\n n_residuals += np.prod(res.size)\n sum_residuals += res.sum()\n thismax = np.absolute(res).max()\n elif isinstance(res, float) or isinstance(res, int):\n # res is a float\n n_residuals += 1\n thismax = np.absolute(res)\n elif isinstance(res, type(None)):\n pass\n else:\n print(\"Unknown residual type: {}\".format(type(res)))\n\n # Get max absolute residual:\n if max_residual < thismax:\n max_residual = thismax\n if n_residuals == 0:\n return (None, None)\n return (sum_residuals/n_residuals, max_residual)", "def Dres(var):\r\n zeropred = residuals(var)\r\n derivparams = []\r\n results=[]\r\n delta = m.sqrt(np.finfo(float).eps) #Stolen from the leastsq code\r\n for i in range(len(var)): #fixme: This loop is probably sub-optimal. Have a look at what does leastsq to improve this.\r\n copy = np.array(var)\r\n copy[i] += delta\r\n derivparams.append(copy)\r\n# results.append(residuals(derivparams))\r\n if __name__ == \"__main__\":\r\n pool = multiprocessing.Pool(nb_nodes)\r\n results = pool.map(residuals, derivparams)\r\n derivs = [ (r - zeropred)/delta for r in results ]\r\n return derivs", "def regress(pts):\n # split points in list of x- and y-values\n xs, ys = split(pts)\n # adjust x- and y-values (subtract the mean)\n _, xsadj = adjust(xs)\n _, ysadj = adjust(ys)\n # calculate variances (spread) in x- and y-direction\n # If one of the two variances is (nearly) 0,\n # we short circuit our logic\n # -> points then have to be on a straight line\n # (either horizontal or vertical)\n sx = variance(xsadj, m=0)\n if near_zero(sx): # == 0\n return ysadj, xsadj\n sy = variance(ysadj, m=0)\n if near_zero(sy): # == 0\n return xsadj, ysadj\n # calculate covariance\n sxy = cov(xsadj, ysadj, 0, 0)\n # get list of eigenvalues and vectors\n # these are sorted based on size of eigenvalues\n eig_sorted = sorted(zip(*eig_2x2(sx, sxy, sxy, sy)), reverse=True)\n vecs = [vec for _, vec in eig_sorted]\n newxs, newys = [], []\n for pt in zip(xsadj, ysadj):\n # no need to transpose the vectors as they are\n xnew = vecs[0][0] * pt[0] + vecs[0][1] * pt[1]\n ynew = vecs[1][0] * pt[0] + vecs[1][1] * pt[1]\n newxs.append(xnew)\n # residuals?!\n # These should be squared distances to regression line that was fitted\n newys.append(ynew)\n return newxs, newys", "def compute_residuals(r):\n global conv_residuals\n conv_residuals.append(r)\n return", "def residual4(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n B0 = parvals['B0']\n E = parvals['E']\n Eh = parvals['Eh']\n Th = parvals['Th']\n model = np.log((B0*np.exp((-E/k)*((1/x)-(1/283.15)))) / (1+(np.exp((Eh/k)*((1/Th)-(1/x))))))\n return data - model", "def solve(mat, y):\n reduced = gaussian_elim(mat)\n sol = np.zeros(shape=(mat.shape[0]))\n S = 0\n for i in reversed(range(len(sol))):\n sol[i] = (y[i]-S) / reduced[i][i]\n S += y[i] - S\n return sol", "def fitgaussian3d(data):\n \n params = moments3d(data)\n # Error function is simple difference between gaussian function and data.\n errorfunction = lambda p: np.ravel(gaussian3d(*p)(*np.indices(data.shape)) -\n data)\n opt = optimize.least_squares(errorfunction, params, bounds=([0,0,0,-np.inf, -np.inf,-np.inf,-np.inf],[data.shape[0]-1,data.shape[1]-1,data.shape[2]-1,np.inf,np.inf,np.inf,np.inf]))\n # Make all widths positive (negative values are equally valid but less useful downstream).\n for i in range(4,7):\n opt.x[i] = abs(opt.x[i])\n return opt", "def multi_gauss_surface_fit(coef_mat, s_mat):\n x = s_mat[:, :, 0]\n y = s_mat[:, :, 1]\n num_peaks = coef_mat.shape[0]\n multi_gauss = np.zeros(shape=x.shape, dtype=np.float32)\n\n for peak_ind in range(num_peaks):\n amp = coef_mat[peak_ind, 0]\n x_val = coef_mat[peak_ind, 1]\n y_val = coef_mat[peak_ind, 2]\n sigma = coef_mat[peak_ind, 3]\n gauss = amp * np.exp(-((x - x_val) ** 2 + (y - y_val) ** 2) / sigma ** 2)\n multi_gauss += gauss\n\n return multi_gauss", "def mix_2d_fit(self):\n xgg, ygg= np.meshgrid(self.xgrid, self.ygrid, indexing='ij')\n print 'Finding minimum N and sigma'\n # Coarsest grid\n N0Arr = np.arange(10)*5000. + 5000.\n sigmaArr= np.arange(5)*.1 + 3.8\n pArr = np.arange(10)*.1 + .1\n # CArr = \n pmin, Cmin, Nmin, sigma_min, rms = _mix_2d_fit(xgg, ygg, self.nArr, N0Arr, sigmaArr, pArr, CArr, normtype=normtype)\n # # Coarsest grid\n # N0Arr = np.arange(10)*1000. + Nmin - 2500.\n # sigmaArr= np.arange(50)*1. + sigma_min - 1.\n # Nmin, sigma_min, rms= _gauss_2d_fit(xgg, ygg, self.nArr, N0Arr, sigmaArr, normtype=normtype)\n # # finest grid\n # N0Arr = np.arange(10)*dN + Nmin - 500.\n # sigmaArr= np.arange(50)*dsigma + sigma_min - 0.5\n # Nmin, sigma_min, rms= _gauss_2d_fit(xgg, ygg, self.nArr, N0Arr, sigmaArr, normtype=normtype)\n # self.Ngauss = Nmin\n # self.sigma = sigma_min\n # self.rms2d = rms\n print 'End finding minimum N and sigma'\n print 'N =', Nmin,' sigma =', sigma_min \n return", "def residuals(x, z=None, max_time=np.inf):\n if z is None:\n return x\n n_samples = x.shape[0]\n residuals = np.zeros_like(x)\n lengthscale = mad(z)\n tic = time.time()\n for i in range(n_samples):\n if time.time() - tic > max_time:\n return None, None\n sum = 0\n weight = 0\n for j in range(n_samples):\n d = z[i] - z[j]\n d = np.sqrt(np.sum(d * d))\n k = uniform_kernel(d, lengthscale)\n sum += k * x[j]\n weight += k\n residuals[i] = x[i] - sum / weight\n return residuals, max_time - (time.time() - tic)", "def residual2P2Z(paras):\n initcond = setupinitcond(paras['pfun_num'].value, paras['zoo_num'].value)\n\n\n\n model = g2P2Z(initcond, timedays_model, paras)\n\n # to implement fitting algorithm make sure to calculate residual only for the last year!\n\n # will have to 1. : simplify the data (i.e. median per month)\n # will have to put data into structure to calculate efficiently (i.e. pandas dataframe like df[1] = N, df[2] = Si, etc.)\n model_ly = model[1460:1825]\n\n # aggregate model output in the same way as validation data (monthly mean)\n # create month vector to add to model output dataframe for analysis\n oneyearmodel = pandas.DataFrame()\n oneyearmodel = oneyearmodel.assign(day=pandas.Series(np.linspace(1, 365, 365)))\n\n # combine two columns\n phyto_model = pandas.DataFrame(\n {'data': model_ly[:, 4], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n phyto_monthly_median = phyto_model.groupby('month').median()\n phyto_resid = (phyto_monthly_median['data'].values - ChlA_monthly_median['ChlA'].values * 0.1)\n\n nitrate_model = pandas.DataFrame(\n {'data': model_ly[:, 0], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n nitrate_monthly_median = nitrate_model.groupby('month').median()\n nitrate_resid = (nitrate_monthly_median['data'].values - NO3NO2_monthly_median['NO3NO2'].values * 0.1)\n\n silicate_model = pandas.DataFrame(\n {'data': model_ly[:, 1], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n silicate_monthly_median = silicate_model.groupby('month').median()\n silicate_resid = (silicate_monthly_median['data'].values - SiOH_USF_monthly_median['SiOH'].values * 0.1)\n\n zoo_model = pandas.DataFrame(\n {'data': model_ly[:, 3], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n zoo_monthly_median = zoo_model.groupby('month').median()\n zoo_resid = (zoo_monthly_median['data'].values - ZooBM_monthly_median['ZooBM'].values * 0.1)\n\n ss = np.concatenate((phyto_resid, nitrate_resid, silicate_resid, zoo_resid))\n return ss", "def residual_G2D_norotation(pars,x,y,data=None, eps=None):\n\tparvals = pars.valuesdict() # a Parameters() object is passed as \"pars\"\n\tintensity_max = parvals[\"I_zero\"]\n\tcenterposition_x = parvals[\"x_zero\"]\n\tcenterposition_y = parvals[\"y_zero\"]\n\tbeamwidth_x = parvals[\"omegaX_zero\"]\n\tbeamwidth_y = parvals[\"omegaY_zero\"]\n\tbgr = parvals[\"backgr\"]\n\t\n\n\tmodel = intensity_max*np.exp(-2*np.power(x-centerposition_x,2)/beamwidth_x**2 - \\\n\t\t2*np.power(y-centerposition_y,2)/beamwidth_y**2) + bgr\n\tif data is None:\n\t\treturn np.array(model) # we don't flatten here because this is for plotting\n\tif eps is None:\n\t\tresid = np.array(model - data)\n\t\treturn resid.flatten() # minimization array must be flattened (LMFIT FAQ)\n\telse:\n\t\tresid = np.array((model - data)/eps)\n\t\treturn resid.flatten()", "def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R", "def best_fit(x, y, z, z_real, p = list(range(3, 15)), folds = 4, train = 0.7, seed = 42, n_lambda = 2001, n = 1, m = 1):\n lambdas = np.array([0] + np.logspace(-5.5, -1, n_lambda).tolist())\n polynomials = np.array(p)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n lambda_min_ridge = np.zeros(len(polynomials))\n lambda_min_lasso = np.zeros(len(polynomials))\n R2 = np.zeros((3, len(polynomials)))\n MSE = np.zeros((3, len(polynomials)))\n\n R2_data = np.zeros((3, len(polynomials)))\n MSE_data = np.zeros((3, len(polynomials)))\n\n\n for i in range(len(polynomials)):\n print(i + polynomials[0])\n ridge_sum = 0\n lasso_sum = 0\n model = regression(x, y, z, split = True, train = train, seed = seed, k = polynomials[i])\n z_test = np.ravel(np.copy(model.z_test))\n for j in range(n): #The mean of n times\n ridge_sum += model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True, n_lambda = n_lambda)[0]\n for j in range(m): #The mean of m times\n lasso_sum += model.lambda_best_fit(method = 'Lasso', fold = folds, n_lambda = n_lambda)[0]\n lambda_min_ridge[i] = ridge_sum/n\n lambda_min_lasso[i] = lasso_sum/m\n\n _,_, a, z_real_test = model.train_test(X = model.X_full, z = z_real, train = 0.7, seed = seed) #Both the training set and the test set for z_real in that order in list/tuple\n\n Beta_ols = model.OLS()\n Beta_ridge = model.Ridge(lam = lambda_min_ridge[i])\n Beta_lasso = model.Lasso(lam = lambda_min_lasso[i], max_iter = 1001)\n\n z_tilde_OLS = model.z_tilde(Beta_ols, X = model.X_test)\n z_tilde_Ridge = model.z_tilde(Beta_ridge, X = model.X_test)\n z_tilde_Lasso = model.z_tilde(Beta_lasso, X = model.X_test)\n\n R2[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_real_test)\n R2[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_real_test)\n R2[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_real_test)\n\n MSE[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_real_test)\n MSE[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_real_test)\n MSE[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_real_test)\n\n R2_data[0, i] = model.R_squared(z_tilde = z_tilde_OLS, z = z_test)\n R2_data[1, i] = model.R_squared(z_tilde = z_tilde_Ridge, z = z_test)\n R2_data[2, i] = model.R_squared(z_tilde = z_tilde_Lasso, z = z_test)\n\n MSE_data[0, i] = model.MSE(z_tilde = z_tilde_OLS, z = z_test)\n MSE_data[1, i] = model.MSE(z_tilde = z_tilde_Ridge, z = z_test)\n MSE_data[2, i] = model.MSE(z_tilde = z_tilde_Lasso, z = z_test)\n\n _, _, lambdas = model.lambda_best_fit(method = 'Ridge', fold = folds, random_num = True)\n\n min_MSE = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]\n min_R2 = [[np.argmin(MSE[0]), np.argmin(MSE[1]), np.argmin(MSE[2])], [np.argmin(MSE_data[0]), np.argmin(MSE_data[1]), np.argmin(MSE_data[2])]]\n\n print('Minimum MSE with Frank, OLS: ', np.min(MSE[0]), ' Ridge: ', np.min(MSE[1]), ' Lasso: ', np.min(MSE[2]))\n print('With polynoms: ', np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Minimum MSE with Data, OLS: ', np.min(MSE_data[0]), ' Ridge: ', np.min(MSE_data[1]), ' Lasso: ', np.min(MSE_data[2]))\n print('With polynoms: ', np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Maximum R2 with Frank, OLS: ', np.max(R2[0]), ' Ridge: ', np.max(R2[1]), ' Lasso: ', np.max(R2[2]))\n print('With polynoms: ', np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n print('Maximum R2 with Frank, OLS: ', np.max(R2_data[0]), ' Ridge: ', np.max(R2_data[1]), ' Lasso: ', np.max(R2_data[2]))\n print('With polynoms: ', np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0])\n print('----------------------------------------------------------------------------------------------')\n\n error_mins = np.array([[np.min(MSE[0]), np.min(MSE[1]), np.min(MSE[2])],\n [np.min(MSE_data[0]), np.min(MSE_data[1]), np.min(MSE_data[2])],\n [np.max(R2[0]), np.max(R2[1]) , np.max(R2[2])],\n [np.max(R2_data[0]), np.max(R2_data[1]), np.max(R2_data[2])],\n [np.argmin(MSE[0]) + polynomials[0], np.argmin(MSE[1]) + polynomials[0], np.argmin(MSE[2]) + polynomials[0]],\n [np.argmin(MSE_data[0]) + polynomials[0], np.argmin(MSE_data[1]) + polynomials[0], np.argmin(MSE_data[2]) + polynomials[0]],\n [np.argmax(R2[0]) + polynomials[0], np.argmax(R2[1]) + polynomials[0], np.argmax(R2[2]) + polynomials[0]],\n [np.argmax(R2_data[0]) + polynomials[0], np.argmax(R2_data[1]) + polynomials[0], np.argmax(R2_data[2]) + polynomials[0]]]).T\n\n text = ['MSE Franke', 'MSE Data','R\\(^2\\) Franke', 'R\\(^2\\) Data']\n print(latex_print(error_mins, text = text))\n\n print('Ridge lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))\n print('Ridge lambda, lowest indexes for Data: ', np.argmin(MSE_data[2]))\n print(lambda_min_ridge)\n print('Lasso lambda, lowest indexes for Franke: ', np.argmin(MSE[2]))\n print('Lasso lambda, lowest indexes for Data: ', np.argmin(R2_MSE[2]))\n print(lambda_min_lasso)\n #Real Franke\n\n plt.plot(polynomials, R2[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, R2[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, R2[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('R2 error between the model and FrankeFunction', fontsize = 14)\n plt.ylabel('R2')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_poly.png')\n\n plt.show()\n\n plt.plot(polynomials, MSE[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, MSE[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, MSE[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('MSE for test data between the model and FrankeFunction', fontsize = 14)\n plt.ylabel('MSE')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE.png')\n\n plt.show()\n\n #Noise Franke\n\n plt.plot(polynomials, R2_data[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, R2_data[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, R2_data[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('R2 error between the model and data', fontsize = 14)\n plt.ylabel('R2')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_poly_data.png')\n\n plt.show()\n\n plt.plot(polynomials, MSE_data[0], 'go--', label = 'OLS', color = 'red')\n plt.plot(polynomials, MSE_data[1], 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, MSE_data[2], 'go--', label = 'Lasso', color = 'green')\n plt.title('MSE for test data between the model and data', fontsize = 14)\n plt.ylabel('MSE')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n\n plt.savefig(results_dir + 'ridge_lasso_high_order_polyMSE_data.png')\n\n plt.show()\n\n #Polynomial and lambda\n\n plt.plot(polynomials, lambda_min_ridge, 'go--', label = 'Ridge', color = 'blue')\n plt.plot(polynomials, lambda_min_lasso, 'go--', label = 'Lasso', color = 'green')\n\n plt.title('The \\'best\\' lambda pr polynomial')\n plt.ylabel('Lambda')\n plt.xlabel('Polynomial degree')\n plt.legend()\n plt.tight_layout()\n plt.savefig(results_dir + 'ridge_lasso_lambda_poly.png')\n plt.show()", "def overall_fitting(filepath, system_information, model_function, model_function_for_ib, fitting_method):\n # load data\n data = load_data(filepath)\n\n x = data[\"x\"]\n\n parameters = []\n overall_rmse = []\n # cpu fit\n cpu_parameters = fitting(\n x, data[\"CPU Time\"], model_function, fitting_method, system_information + \"_CPU\")\n cpu_rmse = calculate_rmse(\n x, data[\"CPU Time\"], model_function, cpu_parameters)\n parameters.append(cpu_parameters)\n overall_rmse.append(cpu_rmse)\n\n # mb fit\n mb_parameters = fitting(\n x, data[\"Memory Time\"], model_function, fitting_method, system_information + \"_memory\")\n mb_rmse = calculate_rmse(\n x, data[\"Memory Time\"], model_function, mb_parameters)\n parameters.append(mb_parameters)\n overall_rmse.append(mb_rmse)\n\n # ib fit\n ib_parameters = fitting(\n x, data[\"MPI Time\"], model_function_for_ib, fitting_method, system_information + \"_IB\")\n ib_rmse = calculate_rmse(\n x, data[\"MPI Time\"], model_function_for_ib, ib_parameters)\n parameters.append(ib_parameters)\n overall_rmse.append(ib_rmse)\n\n return parameters, overall_rmse", "def _residual(self, x):\n h = x\n h = self.c1(h)\n h = self.activation(h)\n h = self.c2(h)\n h = F.avg_pool2d(h, 2)\n\n return h", "def do_fit(self):\n\n if (self._flag == 1):\n self._gf = [0.2]\n self._gf = self.par*(self._num_fu*len(self._sites)*2)\n x, F = self.read_from_file(\n self._sn, self._qn, self._path) # read data from the file\n # ,ftol=1.0e-7,xtol=1.0e-8)\n popt, pcov = curve_fit(\n self.modelfun, x, F, p0=self._gf, maxfev=5000)\n self._gf = popt\n\n elif (self._flag == 2):\n\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=0.0*math.copysign(1,(pow(-1,j)))\n# self._gf[j*5]=0.1\n# par[j*5+1]=6.45\n# par[j*5+2]=0.0\n# par[j*5+3]=0.05\n# par[j*5+4]=1.0\n\n X, F = self.read_from_file(self._sn, self._qn, self._path) # read data from the file\n\n# height, xx, width=self.moments(F)\n# Tracer()()\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=x[0,xx]\n# par[j*5]=X[0,xx]*math.copysign(1,(pow(-1,j)))\n# par[j*5+1]=X[1,xx]\n# par[j*5+2]=X[2,xx]\n# par[j*5+3]=0.007\n# par[j*5+4]=height*math.copysign(1,(pow(-1,j)))\n\n xi, yi, zi = np.mgrid[-6.5:6.5:160j, 4.0:8.9:160j, -7.5:7.5:160j]\n x, y, z = xi.flatten(), yi.flatten(), zi.flatten()\n XX = np.vstack((x, y, z))\n\n invdisttree = Invdisttree(X.T, F, leafsize=10, stat=1)\n AA = invdisttree(XX.T, nnear=130, eps=0, p=1)\n\n# aaa1,bbb1=self.detect_local_minima(-AA.reshape(xi.shape))\n# aaa2,bbb2=self.detect_local_maxima(-AA.reshape(xi.shape))\n if self.peaks==[]:\n print('\\n---------------------------------------------------------------------')\n print('Detecting maxima and minima of target function...',)\n\n peaks_min, min_coord, peaks_max, max_coord = self.detect_min_max(AA.reshape(xi.shape))\n print('done')\n print('Number of the min peaks: {}'.format(len(peaks_min)))\n print('Number of the max peaks: {}'.format(len(peaks_max)))\n print('---------------------------------------------------------------------\\n')\n # fig=plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # ax.plot_surface(xi[:,:,60],yi[:,:,60],bbb2[:,:,60], cmap=cm.jet, linewidth=0.2)\n # plt.hold(True)\n # plt.show()\n\n if peaks_max==[]:\n peaks=np.insert(peaks_min, np.arange(len(peaks_max)), peaks_max)\n coords=np.insert(min_coord, np.arange(max_coord.shape[1]), max_coord, axis=1)\n else:\n peaks = np.insert(peaks_max, np.arange(len(peaks_min)), peaks_min)\n coords = np.insert(max_coord, np.arange(min_coord.shape[1]), min_coord, axis=1)\n\n self.peaks=peaks\n self.coords=coords\n\n par = [0.0]*(self._num_fu*5)\n j1 = 0\n aaaa = 1\n for j in range(self._num_fu):\n if (j > aaaa*self.coords.shape[1]-1):\n j1 = 0\n aaaa += 1\n par[j*5] = xi[self.coords[0, j1], self.coords[0, j1], self.coords[0, j1]]\n par[j*5+1] = yi[self.coords[1, j1], self.coords[1, j1], self.coords[1, j1]]\n par[j*5+2] = zi[self.coords[2, j1], self.coords[2, j1], self.coords[2, j1]]\n # par[j*5+3] = 0.1003+0.1000*math.copysign(1, (pow(-1, j)))\n par[j*5+3] = 0.0001\n# if j < 15:\n# par[j*5+3] = 0.00001\n# else:\n# par[j*5+3] = 0.0005\n par[j*5+4] = self.peaks[j1]\n# print(coords[0, j1], coords[1, j1], coords[2, j1])\n j1 += 1\n # popt, pcov = curve_fit(self.modelfun1, x[:,1:20000], F[1:20000],p0=par,maxfev=150000,xtol=1e-8,ftol=1e-8)\n popt, pcov = curve_fit(\n self.modelfun1, X, F, p0=par, maxfev=150000, xtol=1e-6,\n ftol=1e-8)\n # popt, pcov = curve_fit(self.modelfun1, XX, AA, p0=par)\n self._gf = popt\n# self.error=np.diagonal(pcov, offset=0)\n# print(pcov)\n else:\n # pass\n sys.exit(\"Wrong flag in do_fit\")", "def _test5():\n# import matplotlib.pyplot as plt\n from math import pi, cos, sin\n n = 800\n PI2 = 2.0*pi\n angle = PI2 / n\n pts = []\n r = 10.0\n for i in range(n):\n beta = i * angle\n x = r*cos(beta)\n y = r*sin(beta)\n pts.append((x, y))\n print (regress(pts))\n are_zero = are_residuals_near_zero(pts)", "def regress_residuals(x, y):\r\n slope, intercept = regress(x, y)\r\n coords = zip(x, y)\r\n residuals = []\r\n for x, y in coords:\r\n e = y - (slope * x) - intercept\r\n residuals.append(e)\r\n return residuals", "def fitbivarGaussian(data):\n params = bivarParams(data)\n errorfunction = lambda p: ravel(bivarGaussian(*p)(*indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def r_sq(data, fit):\n ss_res = np.sum((data - fit) ** 2)\n ss_tot = np.sum((data - np.mean(data)) ** 2)\n\n return 1 - (ss_res / ss_tot)", "def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return", "def estimate_multi_gaussian(X):\n m, n = X.shape\n mu = mean(X, axis=0)\n sigma = cov_matrix(X, mu)\n\n return mu, sigma", "def _residual(function, p, x, y, y_err):\n return (y - function(p, x)) / y_err", "def residual5(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n B0 = parvals['B0']\n E = parvals['E']\n El = parvals['El']\n Tl = parvals['Tl']\n model = np.log((B0*np.exp((-E/k)*((1/x)-(1/283.15)))) / (1+(np.exp((El/k)*((1/Tl)-(1/x))))))\n return data - model", "def compute_terms(Y, latent_means, latent_Sigmas, B1, B2, mu, g1, g2):\n # these are the 's' parameters when nu=e_q, beta=0\n ss = [-mi[-1]*si[-1] for mi, si in zip(latent_means, latent_variances)]\n\n a1 = [-np.trace(Sigma) - np.matmul(mi.T, mi) + np.log(LA.det(Sigma)) for Sigma, mi in zip(latent_Sigmas, latent_means)]\n\n B_times_mean = [np.matmul(B1+B2, mi) for mi in latent_means]\n a2 = [\n np.matmul(\n yi - mu, \n (yi - mu).T\n ).item() - np.matmul(\n yi - mu, \n Bm\n ).item() for yi, Bm in zip(Y, B_times_mean)\n ]\n\n a3_scalars = [SQRT_PI_OVER_2*erfc(si/ROOT2)+si*np.exp(-si**2/2) for si in ss]\n a3 = [\n g1*sc*np.trace(\n np.matmul(\n B1.T, \n np.matmul(B1, Sigma)\n )\n ).item() for Sigma, sc in zip(latent_Sigmas, a3_scalars)\n ]\n\n a4_scalars = [SQRT_PI_OVER_2*(erf(si/ROOT2)+1)-si*np.exp(-si**2/2) for si in ss]\n a4 = np.array([\n g2*sc*np.trace(\n np.matmul(\n B2.T, \n np.matmul(\n B2, \n Sigma\n )\n )\n ).item() for Sigma, sc in zip(latent_Sigmas, a4_scalars)\n ],\n dtype='object'\n )\n\n a5_inner = [\n erfc(si/ROOT2)*np.matmul(\n B1.T, \n B1\n ) + (erf(si/ROOT2)+1)*np.matmul(\n B2.T, \n B2\n ) for si in ss]\n a5 = [\n SQRT_PI_OVER_2*np.matmul(mi.T, np.matmul(Bi, mi)) for mi, Bi in zip(latent_means, a5_inner)\n ]\n\n # convert all list of 1d arrays to lists of floats\n a1 = [element.item()*0.5 for element in a1]\n a3 = [element.item() for element in a3]\n a4 = [element.item() for element in a4]\n a5 = [element.item() for element in a5]\n\n return a1, a2, a3, a4, a5", "def eeg_rms(array, axis=0):\t\t\n\treturn np.sqrt(np.mean(array ** 2,axis))", "def evaluate_fit(path_to_results, filename=\"results.csv\"):\n import pandas as pd\n\n results = pd.read_csv(path_to_results + \"/\" + filename)\n\n def plot(pivotted, variance):\n import seaborn as sns\n import os\n import numpy as np\n from matplotlib.colors import LogNorm\n import math\n\n barmin, barmax = 1e-18, 1e-8\n cbar_ticks = [1e-20, 1e-18, 1e-16, 1e-14, 1e-12, 1e-10]\n log_norm = LogNorm(vmin=barmin, vmax=barmax)\n ax = sns.heatmap(\n pivotted,\n cmap=\"coolwarm\",\n vmax=barmax,\n vmin=barmin,\n norm=log_norm,\n cbar_kws={\"ticks\": cbar_ticks},\n ) # , yticklabels=achsislabel_y, xticklabels=achsislabel_x)\n # ax.invert_yaxis()\n fig = ax.get_figure()\n if not os.path.exists(path_to_results + \"/heatmap_variance\"):\n os.mkdir(path_to_results + \"/heatmap_variance\")\n\n fig.savefig(\n path_to_results + \"/heatmap_variance\" + \"/\" + str(obs_loc) + \"_\" + variance,\n dpi=dpi,\n )\n fig.clf()\n\n from processing import identify_numbers_from_string\n\n for obs_loc in results[\"obs_loc\"].unique():\n # extract only rows with obs_loc==obs_loc\n df_obs_loc = results[results.obs_loc == obs_loc]\n # extract columns for plotting\n df_obs_loc_cut = df_obs_loc[[\"S_in\", \"T_in\", \"cov\"]]\n # get values for sigma S and sigma T seperately from column cov\n df_obs_loc_cut[\"cov_numbers\"] = df_obs_loc_cut[\"cov\"].apply(\n identify_numbers_from_string\n )\n df_obs_loc_cut[\"sigma_S\"] = df_obs_loc_cut[\"cov_numbers\"].apply(lambda x: x[0])\n df_obs_loc_cut[\"sigma_T\"] = df_obs_loc_cut[\"cov_numbers\"].apply(lambda x: x[3])\n # convert objects to floats\n df_obs_loc_cut.sigma_S = pd.to_numeric(df_obs_loc_cut.sigma_S)\n df_obs_loc_cut.sigma_T = pd.to_numeric(df_obs_loc_cut.sigma_T)\n for variance in [\"sigma_S\", \"sigma_T\"]:\n pivot_df_obs_loc_cut = df_obs_loc_cut.pivot(\"S_in\", \"T_in\", variance)\n # plot heatmap\n import numpy as np\n\n plot(pivot_df_obs_loc_cut, variance)", "def fit_gauss(x, y):\n nx = numpy.array(x)\n ny = numpy.array(y)\n ne = numpy.ones(len(ny))\n#\n#--- we need to give an initial guess\n#\n ymax = numpy.max(ny)\n med = find_med(y)\n p0 = [ymax, med, 10, 0]\n\n fitobj = kmpfit.Fitter(residuals=residualsG, data=(nx,ny,ne))\n fitobj.fit(params0=p0)\n [amp, cent, width, floor] = fitobj.params\n\n return [amp, cent, width]", "def updateFittedAndResiduals(self, **kwargs)->np.ndarray:\r\n self.fittedTS = self.simulate(**kwargs) # Updates self.fittedTS\r\n if self._selectedIdxs is None:\r\n self._updateSelectedIdxs()\r\n self.fittedTS = self.fittedTS[self._selectedIdxs]\r\n residualsArr = self.calcResiduals(self.params)\r\n numRow = len(self.fittedTS)\r\n numCol = len(residualsArr)//numRow\r\n residualsArr = np.reshape(residualsArr, (numRow, numCol))\r\n cols = self.selectedColumns\r\n if self.residualsTS is None:\r\n self.residualsTS = self.observedTS.subsetColumns(cols)\r\n self.residualsTS[cols] = residualsArr", "def casdi_residual_star(params):\n return np.nanvar(casdi_residual(*params))", "def data_model_residual(surface, dem, unw, incidence):\n los,fem_los,residual = pu.los2pylith(surface,dem,unw,incidence)\n\n # Using image_grid\n fig = plt.figure()\n grid = ImageGrid(fig, 111, # similar to subplot(111)\n nrows_ncols = (1, 3),\n direction=\"row\",\n axes_pad = 0.05,\n add_all=True,\n label_mode = \"1\",\n share_all = True,\n cbar_location=\"top\",\n cbar_mode=\"each\", #\"single\"\n cbar_size=\"5%\",\n cbar_pad=0.05,\n )\n #grid[0].set_xlabel(\"X\")\n #grid[0].set_ylabel(\"Y\")\n #grid2[0].set_xticks([-2, 0])\n #grid2[0].set_yticks([-2, 0, 2])\n\n #NOTE: could find global min/max from three arrays here\n norm = Normalize(vmin=np.nanmin(los), vmax=np.nanmax(los))\n #for ax,data in zip(grid,[los,fem_los,residual]):\n im = grid[0].imshow(los,origin='upper',norm=norm,cmap=plt.cm.jet)\n grid[0].axhline(100,color='m') #show profile\n cax = grid.cbar_axes[0]\n cax.colorbar(im)\n grid[1].axhline(100,color='k') #show profile\n im1 = grid[1].imshow(fem_los,origin='upper',norm=norm,cmap=plt.cm.jet)\n\n cax = grid.cbar_axes[1]\n cax.colorbar(im1)\n\n im2 = grid[2].imshow(residual,origin='upper',cmap=plt.cm.jet)\n cax = grid.cbar_axes[2]\n cax.colorbar(im2)\n\n # Add letter labels\n for ax, label in zip(grid,['A', 'B', 'C']):\n ax.text(0.05, 0.95, label, transform=ax.transAxes,\n fontsize=16, fontweight='bold', va='top')\n\n # Annotate\n # NOTE: way too high!\n #plt.suptitle('FEM Results')\n\n # Add profile\n # NOTE: for now EW, but would be easy to do arbitrary line, and convert to km\n fig = plt.figure()\n #x = arange(los.shape[0])\n plt.axhline(color='k',ls='--')\n plt.plot(los[100],'m.',label='data')\n plt.plot(fem_los[100],'k-',lw=2,label='model')\n plt.xlabel('Distance [km]')\n plt.ylabel('Distance [km]')\n plt.legend(loc='upper left')\n\n plt.show()", "def get_residual(self) -> np.ndarray:\n return self._calculate_residual(self.coefficients)", "def dataModel():\n srcmap001 = fits.open('dataFiles/6gev_srcmap_001.fits')\n srcmap03 = fits.open('dataFiles/6gev_srcmap_03.fits')\n\n image_data = fits.getdata('6gev_image.fits')\n filename = get_pkg_data_filename('6gev_image.fits')\n hdu = fits.open(filename)[0]\n wcs = WCS(hdu.header)\n\n #Given the results of the fit, calculate the model\n modelData001 = np.zeros(srcmap001[0].shape)\n modelData03 = np.zeros(srcmap03[0].shape)\n\n file = open('plotsData/fitResults001.pk1','rb')\n fit001 = pickle.load(file)\n file.close()\n\n file = open('plotsData/fitResults03.pk1','rb')\n fit03 = pickle.load(file)\n file.close()\n\n\n for source in fit001:\n the_index = srcmap001.index_of(source)\n\n modelData001 += fit001[source][:, None, None]*srcmap001[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap001[the_index].data, axis=2), axis=1)[:-1, None, None]\n for source in fit03:\n the_index = srcmap03.index_of(source)\n modelData03 += fit03[source][:, None, None]*srcmap03[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap03[the_index].data, axis=2), axis=1)[:-1, None, None]\n\n fig = plt.figure(figsize=[12, 4.5])\n\n vmin = 0\n vmax = 70.0\n cbStep = 10.0\n ax = fig.add_subplot(121, projection=wcs)\n ax=plt.gca()\n ax.tick_params(direction='in')\n c = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax.get_transform('galactic'))\n ax.add_patch(c)\n mappable=plt.imshow((image_data),cmap='inferno',origin='lower',norm=colors.PowerNorm(gamma=0.6),vmin=vmin, vmax=vmax, interpolation='gaussian')#\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('Data ($>6$ GeV)')\n cb = plt.colorbar(mappable, label='Counts per pixel', pad=0.01,ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb.ax.tick_params(width=0)\n\n\n ax2=fig.add_subplot(122, projection=wcs)\n ax2 = plt.gca()\n\n sources = []\n sources.append({\n 'Name':'3FGL J1745.3-2903c',\n 'RA':266.3434922,\n 'DEC':-29.06274323,\n 'color':'xkcd:bright light blue'})\n\n sources.append({\n 'Name':'1FIG J1748.2-2816',\n 'RA':267.1000722,\n 'DEC':-28.27707114,\n 'color':'xkcd:fire engine red'\n })\n\n sources.append({\n 'Name':'1FIG J1746.4-2843',\n 'RA':266.5942898,\n 'DEC':-28.86244442,\n 'color':'xkcd:fluorescent green'\n })\n\n sources.append({\n 'Name':'Galactic Center',\n 'RA':266.417,\n 'DEC':-29.0079,\n 'color':'black'\n })\n\n #Add source names:\n for source in sources:\n l, b = ra_dec_to_l_b(source['RA'], source['DEC'])\n ax2.scatter(l, b, color=source['color'],marker='x',s=45.0, transform=ax2.get_transform('galactic'), label=source['Name'])\n\n c2 = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax2.get_transform('galactic'))\n ax2.add_patch(c2)\n mappable2 = plt.imshow((np.sum(modelData03,axis=0)), cmap='inferno',norm=colors.PowerNorm(gamma=0.6),origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('Model ($>6$ GeV)')\n cb2 = plt.colorbar(mappable2, label='Counts per pixel', pad=0.01, ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb2.ax.tick_params(width=0)\n leg = plt.legend(loc=1,frameon=True)\n leg.get_frame().set_alpha(0.5)\n leg.get_frame().set_edgecolor('white')\n text1 = leg.get_texts()\n for text in text1:\n text.set_color('black')\n\n fig.tight_layout()\n plt.subplots_adjust(wspace = 0.13, left=0.04, bottom=0.13, top=0.92)\n plt.show()\n #plt.savefig('plots/dataModelComparison.pdf',bbox_inches='tight')" ]
[ "0.6726535", "0.6726535", "0.6726535", "0.6638139", "0.6608545", "0.6222338", "0.6214014", "0.6180789", "0.61609596", "0.6121418", "0.6121418", "0.6110018", "0.6100451", "0.60828114", "0.602451", "0.6002838", "0.59453905", "0.5928811", "0.59279364", "0.5912145", "0.5909047", "0.5908603", "0.590333", "0.5875453", "0.58676696", "0.58584654", "0.58288306", "0.57727474", "0.5771738", "0.5769251", "0.57664734", "0.5740618", "0.5731354", "0.5706835", "0.56728965", "0.5664797", "0.5660698", "0.5647377", "0.5631228", "0.5631013", "0.56172085", "0.5609178", "0.5594852", "0.5566738", "0.55646735", "0.55621713", "0.5544234", "0.55361545", "0.553491", "0.5528115", "0.5519828", "0.55152714", "0.5506104", "0.5505126", "0.5504413", "0.5494965", "0.5494742", "0.54842955", "0.5477083", "0.5474322", "0.5469279", "0.54688776", "0.5437966", "0.5427829", "0.54266447", "0.5419859", "0.5408034", "0.5407637", "0.53997505", "0.5396497", "0.5391729", "0.5389238", "0.5360445", "0.53594273", "0.53526866", "0.53327894", "0.5332613", "0.5322587", "0.5314631", "0.52996045", "0.5292338", "0.5291201", "0.5274495", "0.52679485", "0.5264346", "0.5261909", "0.526016", "0.5253044", "0.5246607", "0.5245568", "0.5242501", "0.5237406", "0.5237217", "0.5232665", "0.5232523", "0.52310944", "0.5205017", "0.5200206", "0.51968634", "0.51951265" ]
0.5425459
65
Merge draft invoices. Work only with same partner. You can merge invoices and refund invoices with echa other. Moves all lines on the first invoice.
def merge_invoice(self, cr, uid, invoices, context=None): order_ids = [] pick_ids = [] if len(invoices) <= 1: return False parent = self.pool.get('account.invoice').browse(cr, uid, context['active_id']) for inv in invoices: if parent.partner_id != inv.partner_id: raise osv.except_osv(_("Partners don't match!"), _("Can not merge invoice(s) on different partners or states !.")) if inv.state != 'draft': raise osv.except_osv(_("Invalid action !"), _("You can merge only invoices in draft state.")) # Merge invoices that are in draft state inv_line_obj = self.pool.get('account.invoice.line') name = parent.name comment = parent.comment origin = parent.origin for inv in invoices: if inv.id == parent.id: continue # check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head. if inv.name: # Find if the same name already exist, if yes, skip to add. name_list = name.replace(' ', '').split(',') if inv.name not in name_list: name += ', %s' % inv.name if inv.comment: comment = comment and comment + ', %s' % inv.comment or inv.comment if inv.origin: origin += ', %s' % inv.origin line_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', inv.id)]) for inv_lin in inv_line_obj.browse(cr, uid, line_ids): mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', parent.id), ('product_id', '=', inv_lin.product_id.id), ('uos_id', '=', inv_lin.uos_id.id), ('price_unit', '=', inv_lin.price_unit) # kittiu: extra condition, unit price must also be the same. ]) if len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)}) inv_line_obj.unlink(cr, uid, inv_lin.id) elif inv.type == parent.type: inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id}) else: inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id, 'quantity': -inv_lin.quantity}) if inv.sale_order_ids: order_ids += [order.id for order in inv.sale_order_ids] if inv.picking_ids: pick_ids += [picking.id for picking in inv.picking_ids] self.write(cr, uid, parent.id, {'origin': origin, 'name': name, 'comment': comment}) #Remove By DRB #cr.execute('update sale_order_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id)) #cr.execute('update picking_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id)) self.unlink(cr, uid, [inv.id]) #Distinct List order_ids = list(set(order_ids)) pick_ids = list(set(pick_ids)) self.write(cr, uid, parent.id, {'sale_order_ids': [(6, 0, order_ids)], 'picking_ids': [(6, 0, pick_ids)]}) self.button_reset_taxes(cr, uid, [parent.id]) return parent.id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def create_or_find_b2b_invoices_and_process_ept(self, row, sale_order, invoice_date, tax):\n\n vat_number = row.get('Buyer Tax Registration', False)\n invoice_number = row.get('VAT Invoice Number', False)\n\n invoices = sale_order.invoice_ids.filtered(\n lambda x: x.type == 'out_invoice' and x.state != 'cancel')\n if not invoices:\n lines = sale_order.order_line.filtered(lambda line: line.qty_to_invoice > 0)\n if not lines:\n return False\n invoices = sale_order._create_invoices()\n self.write({'invoice_ids': [(4, invoices and invoices.id)]})\n\n for invoice in invoices:\n if not invoice.partner_id.vat:\n invoice.partner_id.vat = vat_number\n\n payments_lines = []\n if invoice.invoice_payments_widget != 'false':\n payments_dict = json.loads(invoice.invoice_payments_widget)\n payments_content = payments_dict.get('content', [])\n for line in payments_content:\n payments_lines.append(line.get('payment_id', False))\n\n invoice_line = invoice.mapped('invoice_line_ids').filtered(\\\n lambda line: line.tax_ids != tax)\n if invoice_line:\n invoice.button_draft()\n invoice.write({'ref': invoice_number, 'date': invoice_date})\n\n if len(invoice_line) > 1:\n for line in invoice_line:\n line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n else:\n invoice_line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n\n invoice.with_context({'check_move_validity': False})._recompute_tax_lines( \\\n recompute_tax_base_amount=True)\n invoice.action_post()\n for line in payments_lines:\n invoice.js_assign_outstanding_line(line)\n\n return True", "def action_draft(self):\n context = self._context or {}\n inv_obj = self.env['account.invoice']\n\n brw = self.browse( self.ids[0])\n inv_ids = [i.invoice_id.id for i in brw.line_ids]\n if inv_ids:\n inv_obj.write( {'wh_src_id': False})\n\n return self.write( {'state': 'draft'})", "def do_merge(self, cr, uid, ids, context=None): \n invent_obj = self.pool.get('stock.inventory')\n invent_line_obj = self.pool.get('stock.inventory.line')\n invent_lines = {}\n if context is None:\n context = {}\n for inventory in invent_obj.browse(cr, uid, context['active_ids'], context=context):\n if inventory.state == \"done\":\n raise osv.except_osv(_('Warning!'),\n _('Merging is only allowed on draft inventories.'))\n\n for line in inventory.inventory_line_id:\n key = (line.location_id.id, line.product_id.id, line.product_uom.id)\n if key in invent_lines:\n invent_lines[key] += line.product_qty\n else:\n invent_lines[key] = line.product_qty\n\n\n new_invent = invent_obj.create(cr, uid, {\n 'name': 'Merged inventory'\n }, context=context)\n\n for key, quantity in invent_lines.items():\n invent_line_obj.create(cr, uid, {\n 'inventory_id': new_invent,\n 'location_id': key[0],\n 'product_id': key[1],\n 'product_uom': key[2],\n 'product_qty': quantity,\n })\n\n return {'type': 'ir.actions.act_window_close'}", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].sudo().precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.sudo().create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoice['sale_order_id'] = order.id\n elif group_key in invoices:\n vals = {}\n if order.name not in invoices[group_key].origin.split(', '):\n vals['origin'] = invoices[group_key].origin + ', ' + order.name\n if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(\n ', ') and order.client_order_ref != invoices[group_key].name:\n vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref\n invoices[group_key].sudo().write(vals)\n if line.qty_to_invoice > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n elif line.qty_to_invoice < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n if not invoices:\n raise UserError(_('There is no invoiceable line.'))\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoiceable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_untaxed < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_received - l.qty_invoiced < 0):\n if float_is_zero(line.qty_received - line.qty_invoiced, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.partner_ref and order.partner_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.partner_ref)\n\n if line.qty_received - line.qty_invoiced > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n elif line.qty_received - line.qty_invoiced < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n\n if not invoices:\n raise UserError(_('There is no invoicable line.'))\n\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoicable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'in_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_invoice_create(self, grouped=False, final=False):\n if self.invoice_option == 'before_delivery':\n inv_obj = self.env['account.invoice']\n for order in self:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n for inv_line in order.order_line:\n inv_line.invoice_line_create(invoice.id, inv_line.product_uom_qty)\n\n else:\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n\n # Keep track of the sequences of the lines\n # To keep lines under their section\n inv_line_sequence = 0\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n\n # We only want to create sections that have at least one invoiceable line\n pending_section = None\n\n # Create lines in batch to avoid performance problems\n line_vals_list = []\n # sequence is the natural order of order_lines\n for line in order.order_line:\n if line.display_type == 'line_section':\n pending_section = line\n continue\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.client_order_ref and order.client_order_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.client_order_ref)\n\n if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final):\n if pending_section:\n section_invoice = pending_section.invoice_line_create_vals(\n invoices[group_key].id,\n pending_section.qty_to_invoice\n )\n inv_line_sequence += 1\n section_invoice[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(section_invoice)\n pending_section = None\n\n inv_line_sequence += 1\n inv_line = line.invoice_line_create_vals(\n invoices[group_key].id, line.qty_to_invoice\n )\n inv_line[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(inv_line)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n self.env['account.invoice.line'].create(line_vals_list)\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n sale_orders = references[invoices[group_key]]\n if len(sale_orders) == 1:\n invoices[group_key].reference = sale_orders.reference\n\n if not invoices:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n\n for invoice in invoices.values():\n invoice.compute_taxes()\n if not invoice.invoice_line_ids:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n # Idem for partner\n so_payment_term_id = invoice.payment_term_id.id\n fp_invoice = invoice.fiscal_position_id\n invoice._onchange_partner_id()\n invoice.fiscal_position_id = fp_invoice\n # To keep the payment terms set on the SO\n invoice.payment_term_id = so_payment_term_id\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def _get_related_invoices(self):\n rslt = super(StockMove, self)._get_related_invoices()\n rslt += self.mapped('picking_id.subcontract_id.invoice_ids').filtered(lambda x: x.state not in ('draft', 'cancel'))\n return rslt", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def action_move_create(self):\n inv_obj = self.env['account.invoice']\n context = dict(self._context or {})\n context.update({'wh_src': True})\n ret = self.browse(self.ids[0])\n for line in ret.line_ids:\n if line.move_id:\n raise exceptions.except_orm(\n _('Invoice already withhold !'),\n _(\"You must omit the follow invoice '%s' !\") %\n (line.invoice_id.number,))\n\n acc_id = ret.account_id.id\n journal_id = ret.journal_id.id\n demo_enabled = self.env['ir.module.module'].search(\n [('name', '=', 'base'), ('demo', '=', True)])\n args = [('id', 'in')]\n if not demo_enabled:\n args.append(('special', '=', False))\n\n if ret.line_ids:\n for line in ret.line_ids:\n writeoff_account_id, writeoff_journal_id = False, False\n amount = line.wh_amount\n if line.invoice_id.type in ['in_invoice', 'in_refund']:\n name = 'COMP. RET. CRS ' + ret.number + ' Doc. ' + (\n line.invoice_id.supplier_invoice_number or '')\n else:\n name = 'COMP. RET. CRS ' + ret.number + ' Doc. ' + (\n line.invoice_id.number or '')\n # ret_move = inv_obj.ret_and_reconcile(\n # self, [line.invoice_id.id], amount, acc_id,\n # journal_id, writeoff_account_id,\n # writeoff_journal_id, ret.date_ret, name, [line]\n # )\n # rl = {\n # 'move_id': ret_move['move_id'],\n # }\n #lines = [(1, line.id)]\n self.write({'line_ids': line})\n\n if (line.invoice_id.type in [\n 'out_invoice', 'out_refund']):\n inv_obj.write({'wh_src_id': ret.id})\n else:\n return False\n return True", "def recompute_billing_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, date, context=None):\n def _remove_noise_in_o2m():\n \"\"\"if the line is partially reconciled, then we must pay attention to display it only once and\n in the good o2m.\n This function returns True if the line is considered as noise and should not be displayed\n \"\"\"\n if line.reconcile_partial_id:\n sign = 1\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency * sign <= 0:\n return True\n else:\n if line.amount_residual * sign <= 0:\n return True\n return False\n\n if context is None:\n context = {}\n billing_date_condition = context.get('billing_date_condition', [])\n context_multi_currency = context.copy()\n if date:\n context_multi_currency.update({'date': date})\n\n currency_pool = self.pool.get('res.currency')\n move_line_pool = self.pool.get('account.move.line')\n partner_pool = self.pool.get('res.partner')\n journal_pool = self.pool.get('account.journal')\n line_pool = self.pool.get('account.billing.line')\n\n #set default values\n default = {\n 'value': {'line_cr_ids': [] },\n }\n\n #drop existing lines\n line_ids = ids and line_pool.search(cr, uid, [('billing_id', '=', ids[0])]) or False\n if line_ids:\n line_pool.unlink(cr, uid, line_ids)\n\n if not partner_id or not journal_id:\n return default\n\n journal = journal_pool.browse(cr, uid, journal_id, context=context)\n partner = partner_pool.browse(cr, uid, partner_id, context=context)\n currency_id = currency_id or journal.company_id.currency_id.id\n account_id = False\n if journal.type in ('sale','sale_refund'):\n account_id = partner.property_account_receivable.id\n elif journal.type in ('purchase', 'purchase_refund','expense'):\n account_id = partner.property_account_payable.id\n else:\n account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id\n\n default['value']['account_id'] = account_id\n\n if journal.type not in ('cash', 'bank'):\n return default\n\n total_credit = price or 0.0\n account_type = 'receivable'\n\n if not context.get('move_line_ids', False):\n ids = move_line_pool.search(cr, uid, \n [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner_id), \n ] + billing_date_condition, \n context=context)\n else:\n ids = context['move_line_ids']\n invoice_id = context.get('invoice_id', False)\n company_currency = journal.company_id.currency_id.id\n move_line_found = False\n\n #order the lines by most old first\n ids.reverse()\n account_move_lines = move_line_pool.browse(cr, uid, ids, context=context)\n\n #compute the total debit/credit and look for a matching open amount or invoice\n for line in account_move_lines:\n if _remove_noise_in_o2m():\n continue\n\n if invoice_id:\n if line.invoice.id == invoice_id:\n #if the invoice linked to the billing line is equal to the invoice_id in context\n #then we assign the amount on that line, whatever the other billing lines\n move_line_found = line.id\n break\n elif currency_id == company_currency:\n #otherwise treatments is the same but with other field names\n if line.amount_residual == price:\n #if the amount residual is equal the amount billing, we assign it to that billing\n #line, whatever the other billing lines\n move_line_found = line.id\n break\n #otherwise we will split the billing amount on each line (by most old first)\n total_credit += line.credit or 0.0\n elif currency_id == line.currency_id.id:\n if line.amount_residual_currency == price:\n move_line_found = line.id\n break\n total_credit += line.credit and line.amount_currency or 0.0\n\n #billing line creation\n for line in account_move_lines:\n\n if _remove_noise_in_o2m():\n continue\n\n if line.currency_id and currency_id==line.currency_id.id:\n amount_original = abs(line.amount_currency)\n amount_unreconciled = abs(line.amount_residual_currency)\n else:\n amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or 0.0)\n amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual))\n line_currency_id = line.currency_id and line.currency_id.id or company_currency\n rs = {\n 'move_line_id':line.id,\n 'type': line.credit and 'dr' or 'cr',\n 'reference':line.invoice.reference,\n 'account_id':line.account_id.id,\n 'amount_original': amount_original,\n 'amount': (move_line_found == line.id) and min(abs(price), amount_unreconciled) or amount_unreconciled,\n 'date_original':line.date,\n 'date_due':line.date_maturity,\n 'amount_unreconciled': amount_unreconciled,\n 'currency_id': line_currency_id,\n }\n \n # Negate DR records\n if rs['type'] == 'dr':\n rs['amount_original'] = - rs['amount_original']\n rs['amount'] = - rs['amount']\n rs['amount_unreconciled'] = - rs['amount_unreconciled']\n\n if rs['amount_unreconciled'] == rs['amount']:\n rs['reconcile'] = True\n else:\n rs['reconcile'] = False\n\n default['value']['line_cr_ids'].append(rs)\n\n# if ttype == 'payment' and len(default['value']['line_cr_ids']) > 0:\n# default['value']['pre_line'] = 1\n# elif ttype == 'receipt' and len(default['value']['line_dr_ids']) > 0:\n# default['value']['pre_line'] = 1\n default['value']['billing_amount'] = self._compute_billing_amount(cr, uid, default['value']['line_cr_ids'], price)\n return default", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def action_move_create(self):\n res = super(HrExpenseExpense, self).action_move_create()\n for expense in self:\n if expense.invoice:\n partner = expense.invoice.partner_id.commercial_partner_id\n move_lines = expense.account_move_id.line_ids\n c_move_lines = move_lines.filtered(\n lambda x: x.partner_id == partner and\n x.debit == abs(round(expense.invoice.residual, 2)))\n c_move_lines |= expense.invoice.move_id.line_ids.filtered(\n lambda x: x.account_id == expense.invoice.account_id and\n x.credit == abs(round(expense.invoice.residual, 2)))\n if len(c_move_lines) != 2:\n raise exceptions.Warning(\n _('Cannot reconcile supplier invoice payable with '\n 'generated line. Please check amounts and see '\n 'if the invoice is already added or paid. '\n 'Invoice: %s') % expense.invoice.number)\n c_move_lines.reconcile()\n return res", "def withholding_reconciliation(self):\n\n for inv_brw in self:\n move_ids = [move.id or False\n for move in (inv_brw.move_id, inv_brw.wh_move_id)]\n\n if not all(move_ids):\n continue\n\n line_ids = [line.id\n for move2 in (inv_brw.move_id, inv_brw.wh_move_id)\n for line in move2.line_id\n if line.account_id.id == inv_brw.account_id.id]\n\n if len(line_ids) < 2:\n continue\n\n # /!\\ NOTE: There could be some payments in the invoice let us\n # reconcile them too\n line_ids += [lin2.id for lin2 in inv_brw.payment_ids]\n line_ids = list(set(line_ids))\n\n line_ids = self.env['account.move.line'].browse(line_ids)\n line_ids.reconcile_partial()\n\n return True", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def _prepare_invoice_grp(self, cr, uid, order, line_ids, context=None):\n if context is None:\n context = {}\n context = dict(context)\n\n inv_data = super(grp_orden_compra, self)._prepare_invoice_grp(cr, uid, order, line_ids, context=context)\n\n # adicionando campos numero compromiso y no obligacion desde la OC\n monto_oc = math.floor(order.total_llavep or 0)\n monto_oc = int(monto_oc)\n inv_data.update({'nro_compromiso': order.nro_compromiso or False, 'monto_comprometido': monto_oc or 0, 'currency_id':order.currency_oc.id})\n\n # adicionando campos no afectacion y monto autorizado desde la primera APG\n if order.pc_apg_id:\n first_apg = order.pc_apg_id\n monto_apg = math.floor(first_apg.total_llavep)\n monto_apg = int(monto_apg)\n # TODO R SPRING X ADICIONANDO CABEZALES SIIF A LA FACTURA A PARTIR DE LA APG\n inv_data.update({'nro_afectacion': first_apg.nro_afectacion_siif or False,\n 'monto_afectado': monto_apg or 0,\n 'siif_tipo_ejecucion':first_apg.siif_tipo_ejecucion.id,\n 'siif_concepto_gasto':first_apg.siif_concepto_gasto.id,\n 'siif_financiamiento':first_apg.siif_financiamiento.id,\n 'siif_codigo_sir':first_apg.siif_codigo_sir.id,\n 'siif_nro_fondo_rot':first_apg.siif_nro_fondo_rot.id,\n }) # cambiando nro_afectacion 23/10\n # inv.update({'nro_afectacion': first_apg.nro_afectacion_apg or False, 'monto_afectado': monto_apg or 0})\n\n # # TODO R SPRING X NO LLEVAR LAS LLAVES PRESUPUESTALES POR DEFECTO\n # if order.pc_apg_id.llpapg_ids:\n # llavep_ids = []\n # for llavep in order.pc_apg_id.llpapg_ids:\n # llavep_ids.append((0, 0, {\n # 'programa_id': llavep.programa_id.id,\n # 'odg_id': llavep.odg_id.id,\n # 'auxiliar_id': llavep.auxiliar_id.id,\n # 'disponible': llavep.disponible,\n # 'proyecto_id': llavep.proyecto_id.id,\n # 'fin_id': llavep.fin_id.id,\n # 'mon_id': llavep.mon_id.id,\n # 'tc_id': llavep.tc_id.id,\n # 'importe': llavep.importe\n # }))\n # inv_data.update({'llpapg_ids': llavep_ids})\n\n return inv_data", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def duplicate_invoice(invoice):\n from invoicer.models import Invoice\n from invoicer.models import LineItem\n\n # copy main attributes\n new_invoice = Invoice(\n company=invoice.company,\n invoice_date=datetime.now(),\n client=invoice.client,\n location=invoice.location,\n tax_rate=invoice.tax_rate,\n left_address=invoice.left_address,\n right_address=invoice.right_address,\n terms=invoice.terms,\n footer=invoice.footer\n )\n new_invoice.save()\n\n # now line items\n for line_item in invoice.line_items.all():\n new_invoice.line_items.add(LineItem(\n name=line_item.name,\n description=line_item.description,\n price=line_item.price,\n taxable=line_item.taxable,\n item=line_item.item,\n quantity=line_item.quantity\n ))\n\n return new_invoice", "def action_cancel_draft(self):\n for statement in self:\n statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids\n for statement_line in statement_lines:\n if statement_line:\n statement_line.write({'cleared_bank_account': False,\n 'research_required': False\n })\n if statement_line.move_line_id:\n statement_line.move_line_id.write({'cleared_bank_account': False,\n 'bank_acc_rec_statement_id': False,\n })\n if statement_line.move_line_id.move_id:\n statement_line.move_line_id.move_id.write({'is_reconciled': False})\n\n statement.write({'state': 'draft',\n 'verified_by_user_id': False,\n 'verified_date': False\n })\n return True", "def envoi_par_mail(self):\n cr , uid, context = self.env.args\n if not self.pool['res.users'].has_group(cr, uid, 'is_plastigray.is_comptable_group'):\n raise Warning(u\"Accès non autorisé !\")\n ids=[]\n for obj in self:\n ids.append(str(obj.id))\n if len(ids)>0:\n SQL=\"\"\"\n select ai.is_mode_envoi_facture, ai.partner_id, ai.name, ai.id\n from account_invoice ai\n where \n ai.id in(\"\"\"+','.join(ids)+\"\"\") and \n ai.is_date_envoi_mail is null and \n ai.is_mode_envoi_facture like 'mail%'\n order by ai.is_mode_envoi_facture, ai.partner_id, ai.name\n \"\"\"\n cr.execute(SQL)\n result = cr.fetchall()\n\n # ** Un mail par client*********************************************\n partners={}\n for row in result:\n if row[0]=='mail_client':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n # ** Un mail+BL par client******************************************\n for row in result:\n if row[0]=='mail_client_bl':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n #** Envoi des mails par partner ************************************\n for partner_id in partners:\n ids=partners[partner_id]\n self._envoi_par_mail(partner_id, ids)\n #*******************************************************************\n\n\n # ** Un mail par facture *******************************************\n for row in result:\n if row[0] in ['mail', 'mail_regroupe_bl']:\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************\n\n\n # ** Un mail par facture en double exemplaire **********************\n for row in result:\n if row[0]=='mail2':\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************", "def button_fac_cob_ent(self):\n invoice = self._fac_ent()\n\n # pagar la factura\n # hacer configuracion para modificar esto\n receipt_obj = self.env['account.voucher.receiptbook']\n receipt = receipt_obj.search([('name', 'like', 'Recibos')], limit=1)\n\n journal = self.journal_id\n res = invoice.invoice_pay_customer()\n context = res['context']\n\n account_voucher_obj = self.env['account.voucher']\n voucher = account_voucher_obj.create({\n 'partner_id': context['default_partner_id'],\n 'journal_id': journal.id,\n 'account_id': journal.default_debit_account_id.id,\n 'type': context['type'],\n 'amount': context['default_amount'],\n 'net_amount': context['default_amount'],\n 'receiptbook_id': receipt.id,\n 'company_id': self.env.user.company_id.id\n })\n voucher.signal_workflow('proforma_voucher')\n\n account_move_line_obj = self.env['account.move.line']\n\n # obtener un recordser vacio\n lines2rec = account_move_line_obj.browse()\n\n # obtener las lineas a conciliar de facturas\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', invoice.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n # obtener las lineas a conciliar de pagos\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', voucher.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n period_obj = self.env['account.period']\n period = period_obj.find()\n\n # reconciliar las lineas de factura con pagos\n lines2rec.reconcile('manual',\n journal.default_debit_account_id.id, # writeoff_acc_id\n period.id, # writeoff_period_id,\n journal.id) # writeoff_journal_id)\n\n # imprime factura\n datas = {\n 'ids': invoice.ids,\n 'model': 'account.report_invoice',\n 'form': invoice.read()\n }\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'aeroo_report_ar_einvoice',\n 'datas': datas,\n }", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def action_move_create(self, cr, uid, ids, context=None):\n ait_obj = self.pool.get('account.invoice.tax')\n cur_obj = self.pool.get('res.currency')\n period_obj = self.pool.get('account.period')\n payment_term_obj = self.pool.get('account.payment.term')\n journal_obj = self.pool.get('account.journal')\n move_obj = self.pool.get('account.move')\n if context is None:\n context = {}\n for inv in self.browse(cr, uid, ids, context=context):\n if not inv.journal_id.sequence_id:\n raise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))\n if not inv.invoice_line:\n raise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))\n if inv.move_id:\n continue\n\n ctx = context.copy()\n ctx.update({'lang': inv.partner_id.lang})\n if not inv.date_invoice:\n self.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)\n company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id\n # create the analytical lines\n # one move line per invoice line\n iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n # check if taxes are all computed\n compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\n # I disabled the check_total feature\n group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n group_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context)\n if group_check_total and uid in [x.id for x in group_check_total.users]:\n if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):\n raise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n if inv.payment_term:\n total_fixed = total_percent = 0\n for line in inv.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n if (total_fixed + total_percent) > 100:\n raise osv.except_osv(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n # one move line per tax line\n iml += ait_obj.move_line_get(cr, uid, inv.id)\n\n entry_type = ''\n if inv.type in ('in_invoice', 'in_refund'):\n ref = inv.reference\n entry_type = 'journal_pur_voucher'\n if inv.type == 'in_refund':\n entry_type = 'cont_voucher'\n else:\n ref = self._convert_ref(cr, uid, inv.number)\n entry_type = 'journal_sale_vou'\n if inv.type == 'out_refund':\n entry_type = 'cont_voucher'\n\n diff_currency_p = inv.currency_id.id <> company_currency\n # create one move line for the total and possibly adjust the other lines amount\n total = 0\n total_currency = 0\n total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n acc_id = inv.account_id.id\n\n name = inv['name'] or inv['supplier_invoice_number'] or '/'\n totlines = False\n # kittiu\n #if inv.payment_term:\n if inv.payment_term and not inv.date_due:\n # --\n totlines = payment_term_obj.compute(cr,\n uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n if totlines:\n res_amount_currency = total_currency\n i = 0\n ctx.update({'date': inv.date_invoice})\n for t in totlines:\n if inv.currency_id.id != company_currency:\n amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)\n else:\n amount_currency = False\n\n # last line add the diff\n res_amount_currency -= amount_currency or 0\n i += 1\n if i == len(totlines):\n amount_currency += res_amount_currency\n\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': t[1],\n 'account_id': acc_id,\n 'date_maturity': t[0],\n 'amount_currency': diff_currency_p \\\n and amount_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n })\n else:\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': total,\n 'account_id': acc_id,\n 'date_maturity': inv.date_due or False,\n 'amount_currency': diff_currency_p \\\n and total_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref\n })\n\n date = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n part = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml)\n\n line = self.group_lines(cr, uid, iml, line, inv)\n\n journal_id = inv.journal_id.id\n journal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n if journal.centralisation:\n raise osv.except_osv(_('User Error!'),\n _('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n line = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\n move = {\n 'ref': inv.reference and inv.reference or inv.name,\n 'line_id': line,\n 'journal_id': journal_id,\n 'date': date,\n 'narration': inv.comment,\n 'company_id': inv.company_id.id,\n }\n period_id = inv.period_id and inv.period_id.id or False\n ctx.update(company_id=inv.company_id.id,\n account_period_prefer_normal=True)\n if not period_id:\n period_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)\n period_id = period_ids and period_ids[0] or False\n if period_id:\n move['period_id'] = period_id\n for i in line:\n i[2]['period_id'] = period_id\n\n ctx.update(invoice=inv)\n move_id = move_obj.create(cr, uid, move, context=ctx)\n new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n # make the invoice point to that move\n self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n # Pass invoice in context in method post: used if you want to get the same\n # account move reference when creating the same invoice after a cancelled one:\n move_obj.post(cr, uid, [move_id], context=ctx)\n self._log_event(cr, uid, ids)\n return True", "def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):\n order = self.browse(cr, uid, ids[0], context=context)\n inv_obj = self.pool.get('account.invoice')\n # create the invoice\n inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)\n # modify the invoice\n inv_obj.write(cr, uid, [inv_id], {'past_doc': order.past_doc})\n return inv_id", "def action_move_create(self, cr, uid, ids, context=None):\n ait_obj = self.pool.get('account.invoice.tax')\n cur_obj = self.pool.get('res.currency')\n period_obj = self.pool.get('account.period')\n payment_term_obj = self.pool.get('account.payment.term')\n journal_obj = self.pool.get('account.journal')\n move_obj = self.pool.get('account.move')\n if context is None:\n context = {}\n for inv in self.browse(cr, uid, ids, context=context):\n if not inv.journal_id:\n raise orm.except_orm(_('Error!'),\n _('Journal not defined for this invoice!'))\n if not inv.journal_id.iva_registry_id:\n raise orm.except_orm(_('Error!'),\n _('You must link %s with a VAT registry!') % (inv.journal_id.name))\n if not inv.journal_id.sequence_id:\n raise orm.except_orm(_('Error!'),\n _('Please define sequence on the journal related to this invoice.')) \n if not inv.invoice_line:\n raise orm.except_orm(_('No Invoice Lines!'),\n _('Please create some invoice lines.'))\n if inv.move_id:\n continue\n\n ctx = context.copy()\n ctx.update({'lang': inv.partner_id.lang})\n if not inv.date_invoice:\n self.write(cr, uid, [inv.id],\n {'date_invoice': fields.date.context_today(self,\n cr,\n uid,\n context=context)},\n context=ctx)\n company_currency = self.pool['res.company'].browse(cr, uid,\n inv.company_id.id).currency_id.id\n # create the analytical lines\n # one move line per invoice line\n # iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n iml = super(account_invoice_makeover, self)._get_analytic_lines(cr, uid, inv.id, context=ctx)\n # check if taxes are all computed\n compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n # self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n super(account_invoice_makeover, self).check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\n # I disabled the check_total feature\n group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n group_check_total = self.pool.get('res.groups').browse(cr, uid,\n group_check_total_id,\n context=context)\n if group_check_total and uid in [x.id for x in group_check_total.users]:\n if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding / 2.0)):\n raise orm.except_orm(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n if inv.payment_term:\n total_fixed = total_percent = 0\n for line in inv.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n # one move line per tax line\n iml += ait_obj.move_line_get(cr, uid, inv.id)\n\n# entry_type = ''\n if inv.type in ('in_invoice', 'in_refund'):\n ref = inv.reference\n# entry_type = 'journal_pur_voucher'\n# if inv.type == 'in_refund':\n# entry_type = 'cont_voucher'\n else:\n # ref = self._convert_ref(cr, uid, inv.number)\n ref = super(account_invoice_makeover, self)._convert_ref(cr, uid, inv.number)\n# entry_type = 'journal_sale_vou'\n# if inv.type == 'out_refund':\n# entry_type = 'cont_voucher'\n\n diff_currency_p = inv.currency_id.id <> company_currency\n # create one move line for the total and possibly adjust the other lines amount\n total = 0\n total_currency = 0\n # total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n total, total_currency, iml = super(account_invoice_makeover, self).compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n acc_id = inv.account_id.id\n\n name = inv['name'] or inv['supplier_invoice_number'] or '/'\n totlines = False\n if inv.payment_term:\n totlines = payment_term_obj.compute(cr,\n uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n if totlines:\n res_amount_currency = total_currency\n i = 0\n ctx.update({'date': inv.date_invoice})\n for t_line in totlines:\n if inv.currency_id.id != company_currency:\n amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t_line[1], context=ctx)\n else:\n amount_currency = False\n\n # last line add the diff\n res_amount_currency -= amount_currency or 0\n i += 1\n if i == len(totlines):\n amount_currency += res_amount_currency\n\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': t_line[1],\n 'account_id': acc_id,\n 'date_maturity': t_line[0],\n 'amount_currency': diff_currency_p \\\n and amount_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n 'payment_type': t_line[2]\n })\n else:\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': total,\n 'account_id': acc_id,\n 'date_maturity': inv.date_due or False,\n 'amount_currency': diff_currency_p \\\n and total_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n 'payment_type': None\n })\n\n date = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n part = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n line = map(lambda x:(0, 0, self.line_get_convert(cr, uid, x, part.id, date, context=ctx)), iml)\n\n # line = self.group_lines(cr, uid, iml, line, inv)\n line = super(account_invoice_makeover, self).group_lines(cr, uid, iml, line, inv)\n\n journal_id = inv.journal_id.id\n journal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n if journal.centralisation:\n raise orm.except_orm(_('User Error!'),\n _('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n line = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\n move = {\n 'ref': inv.reference and inv.reference or inv.name,\n 'line_id': line,\n 'journal_id': journal_id,\n 'date': date,\n 'narration': inv.comment,\n 'company_id': inv.company_id.id,\n }\n period_id = inv.period_id and inv.period_id.id or False\n ctx.update(company_id=inv.company_id.id,\n account_period_prefer_normal=True)\n if not period_id:\n period_ids = period_obj.find(cr, uid, inv.registration_date, context=ctx)\n period_id = period_ids and period_ids[0] or False\n if period_id:\n move['period_id'] = period_id\n for i in line:\n i[2]['period_id'] = period_id\n\n ctx.update(invoice=inv)\n move_id = move_obj.create(cr, uid, move, context=ctx)\n new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n # make the invoice point to that move\n self.write(cr, uid, [inv.id], {'move_id': move_id, 'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n # Pass invoice in context in method post: used if you want to get the same\n # account move reference when creating the same invoice after a cancelled one:\n move_obj.post(cr, uid, [move_id], context=ctx)\n # self._log_event(cr, uid, ids)\n super(account_invoice_makeover, self)._log_event(cr, uid, ids)\n return True", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n property_obj = self.pool.get('ir.property')\n\n for order in self.browse(cr, uid, ids, context=context):\n pay_acc_id = order.partner_id.property_account_payable.id\n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no purchase journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n if po_line.product_id:\n acc_id = po_line.product_id.product_tmpl_id.property_account_expense.id\n if not acc_id:\n acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n if not acc_id:\n raise osv.except_osv(_('Error !'), _('There is no expense account defined for this product: \"%s\" (id:%d)') % (po_line.product_id.name, po_line.product_id.id,))\n else:\n acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n\n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'address_invoice_id': order.partner_address_id.id,\n 'address_contact_id': order.partner_address_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)], \n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'payment_term': order.partner_id.property_payment_term and order.partner_id.property_payment_term.id or False,\n 'company_id': order.company_id.id,\n 'add_disc': order.add_disc or 0.0\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def action_move_create(self):\n\t\taccount_move = self.env['account.move']\n\n\t\tfor inv in self:\n\t\t\tif not inv.journal_id.sequence_id:\n\t\t\t\traise UserError(_('Please define sequence on the journal related to this invoice.'))\n\t\t\tif not inv.invoice_line_ids.filtered(lambda line: line.account_id):\n\t\t\t\traise UserError(_('Please add at least one invoice line.'))\n\t\t\tif inv.move_id:\n\t\t\t\tcontinue\n\n\n\t\t\tif not inv.date_invoice:\n\t\t\t\tinv.write({'date_invoice': fields.Date.context_today(self)})\n\t\t\tif not inv.date_due:\n\t\t\t\tinv.write({'date_due': inv.date_invoice})\n\t\t\tcompany_currency = inv.company_id.currency_id\n\n\t\t\t# create move lines (one per invoice line + eventual taxes and analytic lines)\n\t\t\timl = inv.invoice_line_move_line_get()\n\t\t\timl += inv.tax_line_move_line_get()\n\n\t\t\tdiff_currency = inv.currency_id != company_currency\n\t\t\t# create one move line for the total and possibly adjust the other lines amount\n\t\t\ttotal, total_currency, iml = inv.compute_invoice_totals(company_currency, iml)\n\n\t\t\tname = inv.name or ''\n\t\t\tif inv.payment_term_id:\n\t\t\t\ttotlines = inv.payment_term_id.with_context(currency_id=company_currency.id).compute(total, inv.date_invoice)[0]\n\t\t\t\tres_amount_currency = total_currency\n\t\t\t\tfor i, t in enumerate(totlines):\n\t\t\t\t\tif inv.currency_id != company_currency:\n\t\t\t\t\t\tamount_currency = company_currency._convert(t[1], inv.currency_id, inv.company_id, inv._get_currency_rate_date() or fields.Date.today())\n\t\t\t\t\telse:\n\t\t\t\t\t\tamount_currency = False\n\n\t\t\t\t\t# last line: add the diff\n\t\t\t\t\tres_amount_currency -= amount_currency or 0\n\t\t\t\t\tif i + 1 == len(totlines):\n\t\t\t\t\t\tamount_currency += res_amount_currency\n\n\t\t\t\t\t_logger.info(inv)\n\t\t\t\t\timl.append({\n\t\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t\t'name': name,\n\t\t\t\t\t\t'price': t[1],\n\t\t\t\t\t\t'account_id': inv.account_id.id,\n\t\t\t\t\t\t'date_maturity': t[0],\n\t\t\t\t\t\t'amount_currency': diff_currency and amount_currency,\n\t\t\t\t\t\t'currency_id': diff_currency and inv.currency_id.id,\n\t\t\t\t\t\t'invoice_id': inv.id,\n\t\t\t\t\t\t#'partner_id': inv.partner_line_id.id\n\t\t\t\t\t})\n\t\t\telse:\n\t\t\t\t_logger.info(inv)\n\t\t\t\ttotal_taxes_to_pay = self.return_tax_to_payy()\n\n\t\t\t\tif inv.taxes_collected_id.type_taxes == 'tax_company':\n\t\t\t\t\timl.append({\n\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t'name': name,\n\t\t\t\t\t'price': total_taxes_to_pay,\n\t\t\t\t\t'account_id': inv.taxes_collected_id.account_id.id,\n\t\t\t\t\t'date_maturity': inv.date_due,\n\t\t\t\t\t'amount_currency': diff_currency and total_currency,\n\t\t\t\t\t'currency_id': diff_currency and inv.currency_id.id,\n\t\t\t\t\t'invoice_id': inv.id,\n\t\t\t\t\t#'partner_id': inv.partner_line_id.id\n\t\t\t\t\t})\n\t\t\t\t\timl.append({\n\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t'name': name,\n\t\t\t\t\t'price': total- total_taxes_to_pay,\n\t\t\t\t\t'account_id': inv.account_id.id,\n\t\t\t\t\t'date_maturity': inv.date_due,\n\t\t\t\t\t'amount_currency': diff_currency and total_currency,\n\t\t\t\t\t'currency_id': diff_currency and inv.currency_id.id,\n\t\t\t\t\t'invoice_id': inv.id,\n\t\t\t\t\t#'partner_id': inv.partner_line_id.id\n\t\t\t\t\t})\n\n\t\t\t\telse:\n\t\t\t\t\timl.append({\n\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t'name': name,\n\t\t\t\t\t'price': total,\n\t\t\t\t\t'account_id': inv.account_id.id,\n\t\t\t\t\t'date_maturity': inv.date_due,\n\t\t\t\t\t'amount_currency': diff_currency and total_currency,\n\t\t\t\t\t'currency_id': diff_currency and inv.currency_id.id,\n\t\t\t\t\t'invoice_id': inv.id,\n\t\t\t\t\t#'partner_id': inv.partner_line_id.id\n\t\t\t\t})\n\n\t\t\tpart = self.env['res.partner']._find_accounting_partner(inv.partner_id)\n\n\t\t\t#validamo que sea una factura de proveedor\n\t\t\tif self.type == 'in_invoice':\n\t\t\t\tdata_new = []\n\t\t\t\tfor l in iml:\n\t\t\t\t\tif 'partner_id' in l:\n\t\t\t\t\t\tif l['partner_id']:\n\t\t\t\t\t\t\tdata_new.append((0, 0, self.line_get_convert(l, l['partner_id'])) )\n\t\t\t\t\telse:\n\t\t\t\t\t\tdata_new.append((0, 0, self.line_get_convert(l, part.id)) )\n\n\t\t\t\tline = [l for l in data_new ]\n\t\t\telse:\n\t\t\t\tline = [(0, 0, self.line_get_convert(l, part.id)) for l in iml ]\n\n\t\t\tline = inv.group_lines(iml, line)\n\n\t\t\tline = inv.finalize_invoice_move_lines(line)\n\n\t\t\tdate = inv.date or inv.date_invoice\n\t\t\tmove_vals = {\n\t\t\t\t'ref': inv.reference,\n\t\t\t\t'line_ids': line,\n\t\t\t\t'journal_id': inv.journal_id.id,\n\t\t\t\t'date': date,\n\t\t\t\t'narration': inv.comment,\n\t\t\t}\n\n\t\t\tmove = account_move.create(move_vals)\n\t\t\t# Pass invoice in method post: used if you want to get the same\n\t\t\t# account move reference when creating the same invoice after a cancelled one:\n\t\t\tmove.post(invoice = inv)\n\t\t\t# make the invoice point to that move\n\t\t\tvals = {\n\t\t\t\t'move_id': move.id,\n\t\t\t\t'date': date,\n\t\t\t\t'move_name': move.name,\n\t\t\t}\n\t\t\tinv.write(vals)\n\t\treturn True", "def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)", "def compute_advice(self):\n for advice in self:\n old_lines = self.env['hr.payroll.advice.line'].search([('advice_id', '=', advice.id)])\n if old_lines:\n old_lines.unlink()\n payslips = self.env['hr.payslip'].search([('date_from', '<=', advice.date), ('date_to', '>=', advice.date), ('state', '=', 'done')])\n for slip in payslips:\n if not slip.sudo().employee_id.bank_account_id and not slip.sudo().employee_id.bank_account_id.acc_number:\n raise UserError(_('Please define bank account for the %s employee') % (slip.employee_id.name,))\n payslip_line = self.env['hr.payslip.line'].search([('slip_id', '=', slip.id), ('code', '=', 'NET')], limit=1)\n if payslip_line:\n self.env['hr.payroll.advice.line'].create({\n 'advice_id': advice.id,\n 'name': slip.sudo().employee_id.bank_account_id.acc_number,\n 'ifsc_code': slip.sudo().employee_id.bank_account_id.bank_bic or '',\n 'employee_id': slip.employee_id.id,\n 'bysal': payslip_line.total\n })\n slip.advice_id = advice.id", "def abc_confirm_invoice(self, lines, packages, data, params, res):\n invoice = params.get('invoice')\n if invoice and invoice.state == 'draft':\n self.env.cr.commit()\n env = None\n try:\n # Ne cursor doesn't time out when requesting lock.\n # Could be bad I guess? Works for now.\n # TODO: Look into setting a more reasonable lock wait time.\n new_cr = Registry(self.env.cr.dbname).cursor()\n new_cr.autocommit(True)\n env = api.Environment(new_cr, self.env.uid, self.env.context)\n # Validate invoice\n invoice.signal_workflow('invoice_open')\n res['invoice']['name'] = invoice.number\n res['messages'].append(u\"Created and confirmed invoice %s.\" % invoice.number)\n res['results']['invoice'] = 'confirmed'\n # Commit to unlock the invoice sequence\n env.cr.commit()\n except Exception as e:\n res['warnings'].append((\n _(u\"Failed to confirm invoice %s!\") % (invoice and (invoice.number or invoice.name) or 'Unknown'),\n '%s\\n\\nTraceback:\\n%s' % (e.message or 'Unknown Error', traceback.format_exc())))\n finally:\n if env:\n env.cr.close()", "def process_b2c_amazon_order_ept(self, row, sale_order, invoice_date):\n invoice_number = row.get('VAT Invoice Number', False)\n invoice_url = row.get('Invoice Url', '')\n\n invoices = sale_order.invoice_ids.filtered(\n lambda x: x.type == 'out_invoice' and x.state != 'cancel')\n if not invoices:\n lines = sale_order.order_line.filtered(lambda line: line.qty_to_invoice > 0)\n if not lines:\n return False\n invoices = sale_order.with_context({'vcs_invoice_number': invoice_number})._create_invoices()\n self.write({'invoice_ids': [(4, invoices and invoices.id)]})\n\n for invoice in invoices:\n invoice_vals = {}\n invoice_vals.update({'date': invoice_date, 'invoice_url': invoice_url})\n if invoice.state == 'draft' and \\\n sale_order.amz_seller_id.is_invoice_number_same_as_vcs_report:\n invoice_vals.update({'name': invoice_number})\n invoice.write(invoice_vals)\n return True", "def generate_orders(self, cr, uid, ids, context=None):\n voucher_pool = self.pool.get('account.voucher')\n payment_term_obj = self.pool.get('account.payment.term')\n account_budget_confirmation_obj = self.pool.get('account.budget.confirmation')\n period_obj = self.pool.get('account.period')\n if context is None:\n context = {}\n for order in self.browse(cr, uid, ids, context=context):\n #################################to remind\n total_fixed = total_percent = 0\n for line in order.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (order.amount or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Can not create the payments !\\n\\\n The related payment term is probably miss configured as it gives a computed amount greater than the total permanent payment amount. \\\n The latest line of your payment term must be of type 'balance' to avoid rounding issues.\"))\n # create one move line for the total and possibly adjust the other lines amount\n totlines1 = []\n for o in order.line_ids:\n totlines1 += payment_term_obj.compute(cr, uid, order.payment_term.id, o.amount, order.date or False, context=context)\n \n d = {}\n for k, v in totlines1:\n d.setdefault(k, [k]).append(v)\n totlines = map(tuple, d.values())\n\n for t in totlines :\n #to substract date from the interval number \n order_date = t[0]\n entered_date = datetime.datetime.strptime(order_date, '%Y-%m-%d')\n entered_date = entered_date.date()\n account_id = (order.partner_id.property_account_payable and order.partner_id.property_account_payable.id) or \\\n (order.journal_id.default_credit_account_id and order.journal_id.default_credit_account_id.id)\n period_id = period_obj.find(cr, uid, t[0], context=context)[0]\n\n list_confirm = [conf.id for conf in o.confirmation_ids]\n confirmations = account_budget_confirmation_obj.search(cr, uid, [('id','in', list_confirm),('period_id','=', period_id)], context=context) #('date','=',t[0]),\n\n for confirm in confirmations:\n confirm_id = confirm\n\n voucher_lines = [(0, 0, {'name':ol.name, 'account_id':ol.account_id.id, 'type':'dr',\n 'amount':t[count + 1], 'account_analytic_id':ol.account_analytic_id.id, 'budget_confirm_id': confirm_id })\n for count, ol in enumerate(order.line_ids)]\n res = voucher_pool.onchange_price(cr, uid, 0, voucher_lines, [], partner_id=order.partner_id.id, context=context).get(\"value\", {})\n voucher_dict = {\n 'partner_id' : order.partner_id.id,\n 'account_id': account_id,\n 'company_id' : order.company_id.id,\n 'journal_id' : order.journal_id.id,\n 'period_id': order.period_id.id,\n 'type':'purchase',\n 'date' : t[0],\n 'reference': order.name,\n 'payment_permanent_voucher_id': order.id,\n 'line_ids':voucher_lines,\n 'amount':res.get(\"amount\", 0.0)\n }\n voucher_pool.create(cr, uid, voucher_dict, context=context)\n return self.write(cr, uid, ids, {'state':'done'}, context=context)", "def action_create_invoice(self):\n if self.partner_id:\n supplier = self.partner_id\n else:\n supplier = self.partner_id.search(\n [(\"name\", \"=\", \"Salon Default Customer\")])\n lines = []\n product_id = self.env['product.product'].search(\n [(\"name\", \"=\", \"Salon Service\")])\n for records in self.order_line_ids:\n if product_id.property_account_income_id.id:\n income_account = product_id.property_account_income_id.id\n elif product_id.categ_id.property_account_income_categ_id.id:\n income_account = product_id.categ_id.\\\n property_account_income_categ_id.id\n else:\n raise UserError(\n _(\"Please define income account for this product: \"\n \"'%s' (id:%d).\") % (product_id.name, product_id.id))\n value = (0, 0, {\n 'name': records.service_id.name,\n 'account_id': income_account,\n 'price_unit': records.price,\n 'quantity': 1,\n 'product_id': product_id.id,\n })\n lines.append(value)\n invoice_line = {\n 'move_type': 'out_invoice',\n 'partner_id': supplier.id,\n 'invoice_user_id': self.env.user.id,\n 'invoice_origin': self.name,\n 'invoice_line_ids': lines,\n }\n inv = self.env['account.move'].create(invoice_line)\n action = self.env.ref('account.action_move_out_invoice_type',\n raise_if_not_found=False)\n result = {\n 'name': action.name,\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'form']],\n 'target': 'current',\n 'res_id': inv.id,\n 'res_model': 'account.move',\n }\n self.inv_stage_identifier = True\n self.stage_id = 3\n invoiced_records = self.env['salon.order'].search(\n [('stage_id', 'in', [3, 4]), ('chair_id', '=', self.chair_id.id)])\n total = 0\n for rows in invoiced_records:\n invoiced_date = str(rows.date)\n invoiced_date = invoiced_date[0:10]\n if invoiced_date == str(date.today()):\n total = total + rows.price_subtotal\n self.chair_id.collection_today = total\n self.update_number_of_orders()\n return result", "def operation_invoices(self):\r\n for operation in self:\r\n invoices = self.env['account.invoice'].search([\r\n ('freight_hbl', '=', operation.id),\r\n ('type', 'in', ['out_invoice', 'out_refund']),\r\n ('state', '!=', 'cancel'),\r\n ])\r\n action = self.env.ref('account.action_invoice_tree1').read()[0]\r\n if len(invoices) > 1:\r\n action['domain'] = [('id', 'in', invoices.ids)]\r\n elif len(invoices) == 1:\r\n action['views'] = [(self.env.ref('account.invoice_form').id, 'form')]\r\n action['res_id'] = invoices.ids[0]\r\n else:\r\n action = {'type': 'ir.actions.act_window_close'}\r\n return action", "def create_order_amended_invoice(sender, instance, using, **kwargs):\n\n sender_name = sender._meta.model.__name__\n\n if sender_name == \"WillOrder\":\n order = instance\n elif sender_name == \"Allocation\":\n order = instance.asset_store.order\n else:\n order = instance.order\n\n if Invoice.objects.filter(\n order=order, been_paid=True, parent_invoice=None\n ).exists():\n amended_invoice_required = False\n latest_paid_invoice = order.invoice.latest_paid()\n print(\"latest_paid_invoice\", latest_paid_invoice)\n if latest_paid_invoice:\n order_details = InvoiceService(order).limit_details\n\n for order_detail, order_numbers in order_details.items():\n try:\n willorder_limit = OrderLimit.objects.get(\n invoice=latest_paid_invoice, detail=order_detail\n )\n if order_numbers > willorder_limit.limit:\n amended_invoice_required = True\n except OrderLimit.DoesNotExist:\n amended_invoice_required = True\n\n parent_invoice = Invoice.objects.get(order=order, parent_invoice=None)\n\n if amended_invoice_required:\n if Invoice.objects.filter(\n order=order, been_paid=False, parent_invoice=parent_invoice\n ).exists():\n print(\"UPDATE AMENDED INVOICE\")\n order.invoice.latest().update_invoice()\n else:\n Invoice.objects.create(\n order=order, parent_invoice=parent_invoice)\n else:\n print(\"DELETE AMENDED INVOICE\")\n if Invoice.objects.filter(\n order=order, been_paid=False, parent_invoice=parent_invoice\n ).exists():\n Invoice.objects.get(\n order=order, parent_invoice=parent_invoice, been_paid=False\n ).delete()", "def test_merge_ok_draft_with_draft(self):\n p1 = PathFactory.create(name=\"PATH_AB\", geom=LineString((0, 1), (10, 1)), draft=True)\n p2 = PathFactory.create(name=\"PATH_CD\", geom=LineString((10, 1), (20, 1)), draft=True)\n response = self.client.post(reverse('core:path-drf-merge-path'), {'path[]': [p1.pk, p2.pk]})\n self.assertIn('success', response.json())", "def exchange_payment(self, cr, uid, ids, context=None):\n data = self.browse(cr, uid, ids, context=context)[0]\n check_log_pool = self.pool.get('check.log')\n sequence_pool = self.pool.get('ir.sequence')\n move_pool = self.pool.get('account.move') \n move_line_pool = self.pool.get('account.move.line')\n\n voucher_obj = self.pool.get('account.voucher')\n old_voucher_ids = voucher_obj.search(cr, uid, [('move_id', '=', context['active_id'])], context=context)\n old_chk_log_ids = check_log_pool.search(cr,uid,[('name','in',old_voucher_ids),('status','=','active')], context=context)\n '''if chk_log_ids:\n check_log_pool.write(cr, uid, chk_log_ids, {'status':'delete','deleted':True},context=context)'''\n if old_chk_log_ids:\n raise osv.except_osv(_('Warning'), _('This move have already exchanged'))\n voucher_id = self.check_move_data(cr, uid, ids, context=context)\n if not voucher_id:\n raise osv.except_osv(_('Warning'), _('The account in credit lines must be of type liquidity'))\n if data.new_no and voucher_id:\n move = move_pool.browse(cr, uid, context['active_id'], context=context)\n journal_id=move and move.journal_id\n if self._exchange_journal_seq(journal_id, context=context):\n chk_log_ids = check_log_pool.search(cr,uid,[('status','=','active')], context=context)\n sequence_pool.write(cr, uid, [journal_id.check_sequence.id], {'number_next_actual':data.new_no}, context=context)\n next_seq = sequence_pool.get_id(cr, uid, journal_id.check_sequence.id, context=context)\n lines = move_line_pool.search(cr, uid,[('move_id','=',context['active_id'])], context=context)\n line = move_line_pool.browse(cr, uid, lines, context=context)[0]\n check_log_pool.create(cr, uid,{'name': voucher_id, 'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id,'company_id':move.company_id.id}, context=context)\n #check_log_pool.create(cr, uid,{'partner_id':line.partner_id.id,'date_due':move.date,'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id,'company_id':move.company_id.id}, context=context)\n move_pool.write(cr, uid,[context['active_id']], {'ref' : next_seq or ' '}, context=context)\n move_line_pool.write(cr, uid,lines, {'ref' : next_seq or ' '}, context=context)\n return {'type':'ir.actions.act_window_close'}", "def return_fnc(self, cr, uid, ids, context=None):\n\n wf_service = netsvc.LocalService(\"workflow\")\n for rec in self.browse(cr, uid, ids, context=context):\n lines = []\n if rec.state == 'recieved':\n for line in rec.spares_ids:\n if line.recieved_quantity != 0:\n \n lines_dict = {\n 'name': line.product_id.name[:250],\n #'picking_id': picking_id,\n 'product_id': line.product_id.id,\n 'product_qty': line.recieved_quantity,\n 'product_uom': line.product_id.uom_id.id,\n 'product_uos_qty':line.recieved_quantity,\n 'product_uos': line.product_id.uom_id.id,\n 'location_id': rec.damage_line_id.department_id.location_dest_id.id ,\n 'location_dest_id': rec.damage_line_id.department_id.stock_location_id.id,\n #'exchange_line_id': line.id,\n 'tracking_id': False,\n 'state': 'draft',\n 'note': '',\n 'price_unit': line.product_id.standard_price or 0.0,\n 'move_type': 'one',\n } \n \n lines.append([0, False, lines_dict])\n\n if lines:\n piking_dict = {\n 'name': '/',\n #'origin': order.name,\n #'request': order.id,\n 'date': time.strftime('%Y-%m-%d'),\n 'type': 'in',\n 'state': 'draft',\n #'exchange_id': order.id,\n 'job_id': rec.id,\n 'maintenance': True,\n 'note': '',\n 'department_id':rec.damage_line_id.department_id.department_id.id,\n #'stock_journal_id':order.stock_journal_id and order.stock_journal_id.id,\n 'invoice_state': 'none',\n 'move_lines': lines,\n 'state': 'draft'\n }\n new_id = self.pool.get('stock.picking.in').create(cr, uid, piking_dict, context)\n wf_service.trg_validate(uid, 'stock.picking', new_id, 'button_confirm', cr)\n self.write(cr, uid, ids, {'state': 'return','picking_id':new_id})\n continue\n else:\n self.write(cr, uid, ids, {'state': 'canceled'})\n\n return True", "def exchange_move_data(self, cr, uid, ids, context=None): \n move_line_pool = self.pool.get('account.move.line')\n move = self.pool.get('account.move').browse(cr, uid, context.get('active_id',[]), context=context)\n move_line = move_line_pool.search(cr, uid, [('move_id','=',context.get('active_id',[]))], context=context)\n partners = move_line_pool.read(cr, uid, move_line, ['partner_id'], context=context)\n if len(set([part['partner_id'] for part in partners])) > 1:\n raise osv.except_osv(_('Warning'), _('Can not create new exchange for multiple partner!!'))\n return True", "def _get_lines(self, cr, uid, ids, context=None):\n List=[]\n if ids:\n line = self.pool.get('payment.enrich.lines').browse(cr, uid, ids[0], context=context)\n \n record = line.enrich_id\n val = 0.0\n for line in record.enrich_lines:\n if line.state == 'done' :\n val += line.cost\n res = {\n 'paid_amount':val,\n 'residual_amount':record.amount - val,\n }\n record.write(res)\n return List", "def done(self, cr, uid, ids, context=None):\n budget_line = self.pool.get('account.budget.lines')\n budget_line_id = False \n for r in self.browse(cr, uid, ids, context=context):\n to = {'analytic_account': r.analytic_account_id.id,\n 'account_id': r.account_id.id,\n 'period_id': r.period_id.id,\n 'company': r.company_id.id,\n 'amount' : r.amount\n }\n \n budget_line_id ,history_ids=budget_line.transfer(cr, uid, {'type':r.type, 'budget_type':r.budget_type, 'line_ids': r.line_ids, 'to':to, 'reference':self._name+','+str(r.id)}, context=context)\n return self.write(cr, uid, ids,{'state':'done', 'budget_line':budget_line_id}, context=context)", "def get_invoice(self):\n\n # Check if unclosed invoice for the client exists\n old_inv = connection.Kinko.find_one({'cl': self.cl, 'tid': None,\n 'typ': TYPE_MAP[self.tab_type]})\n\n inv_num = None\n # If it does, update its values and update packages\n if old_inv:\n old_inv.dt = datetime.datetime.today()\n old_inv.range.lt = self.q_dict[\"cs.sd\"].get(\"$lt\", None)\n old_inv.save()\n\n inv_num = old_inv.num\n\n else:\n #kinko dict to be updated in Kinko Collection.\n kdict = {\n \"amt\": 0.0,\n \"cl\": unicode(self.cl),\n \"dt\": datetime.datetime.today(),\n \"typ\": TYPE_MAP[self.tab_type],\n \"range\": {\"lt\": self.q_dict[\"cs.sd\"].get(\"$lt\", None),\n \"gt\": self.q_dict[\"cs.sd\"].get(\"$gte\", None),\n }\n }\n\n k = Kinko(kdict)\n\n k_count = 1\n\n #the get num method of Kinko model generates the unique no for new kinko\n k[\"num\"] = self.get_knum(1)\n while connection.Kinko.collection.find({\"num\": k.num}).count() > 0:\n k[\"num\"] = self.get_knum(k_count+1)\n k_count += k_count\n\n connection.Kinko(k).save()\n\n inv_num = k['num']\n\n if inv_num:\n #after creating a new document in Kinko all packages are updated.\n connection.Package.collection.update(self.q_dict, {'$set': {'inv.num': inv_num}}, safe=True, multi=True)\n \n #Aggrigation of remitted amount for requested client\n non_invoiced = kinko_map_reduce(inv_num, TYPE_MAP[self.tab_type])\n\n if len(non_invoiced) == 0:\n return False\n else:\n inv = connection.Kinko.find_one({'num': inv_num})\n if inv:\n inv.amt = non_invoiced[0]['value']['amt']\n inv.save()\n return inv\n else:\n return False\n else:\n return False", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n invoice_vals = super(my_sale_order, self)._prepare_invoice(cr, uid, order,\n lines, context)\n\n invoice_vals.update({\n 'partner_shipping_id': order.partner_shipping_id.id,\n })\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def _prepare_invoice(self):\n self.ensure_one()\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'x_studio_field_rgEdd': self.x_studio_field_icWOZ.id,\n 'x_studio_car_type_1': self.vehicle.id,\n 'x_studio_job_card_1': self.x_studio_agency_job_card,\n 'x_studio_car_type_name': self.vehicle.model_id.name,\n 'x_studio_plate_num': self.vehicle.license_plate,\n 'x_studio_claim_num': self.claim_no,\n\n 'x_studio_is_insured':self.is_insured,\n 'x_studio_service_provider': self.service_advisor.id,\n 'date_invoice': fields.Date.today(),\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n }\n return invoice_vals\n\n # 'x_studio_field_rgEdd':order.x_studio_field_icWOZ.id,", "def make_invoices(self):\n for invoice in self.policy.invoices:\n db.session.delete(invoice)\n db.session.commit()\n\n billing_schedules = {'Annual': None, 'Semi-Annual': 3, 'Quarterly': 4, 'Monthly': 12}\n\n invoices = []\n first_invoice = Invoice(self.policy.id,\n self.policy.effective_date, # bill_date\n self.policy.effective_date + relativedelta(months=1), # due\n self.policy.effective_date + relativedelta(months=1, days=14), # cancel\n self.policy.annual_premium)\n invoices.append(first_invoice)\n\n if self.policy.billing_schedule == \"Annual\":\n pass\n elif self.policy.billing_schedule == \"Two-Pay\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*6\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Quarterly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*3\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Monthly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n else:\n print \"You have chosen a bad billing schedule.\"\n\n logger.info(str(len(invoices)) + \" invoices generated for policy %s\" % self.policy.id)\n\n for invoice in invoices:\n db.session.add(invoice)\n db.session.commit()", "def office_merge_process_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n contest_office_manager = ContestOfficeManager()\n\n is_post = True if request.method == 'POST' else False\n\n if is_post:\n # merge = request.POST.get('merge', False)\n skip = request.POST.get('skip', False)\n # Contest office 1 is the one we keep, and Contest office 2 is the one we will merge into Contest office 1\n contest_office1_we_vote_id = request.POST.get('contest_office1_we_vote_id', 0)\n contest_office2_we_vote_id = request.POST.get('contest_office2_we_vote_id', 0)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n redirect_to_contest_office_list = positive_value_exists(request.POST.get('redirect_to_contest_office_list', False))\n remove_duplicate_process = positive_value_exists(request.POST.get('remove_duplicate_process', False))\n state_code = request.POST.get('state_code', '')\n else:\n # merge = request.GET.get('merge', False)\n skip = request.GET.get('skip', False)\n # Contest office 1 is the one we keep, and Contest office 2 is the one we will merge into Contest office 1\n contest_office1_we_vote_id = request.GET.get('contest_office1_we_vote_id', 0)\n contest_office2_we_vote_id = request.GET.get('contest_office2_we_vote_id', 0)\n google_civic_election_id = request.GET.get('google_civic_election_id', 0)\n redirect_to_contest_office_list = positive_value_exists(request.GET.get('redirect_to_contest_office_list', False))\n remove_duplicate_process = positive_value_exists(request.GET.get('remove_duplicate_process', False))\n state_code = request.GET.get('state_code', '')\n\n if positive_value_exists(skip):\n results = contest_office_manager.update_or_create_contest_offices_are_not_duplicates(\n contest_office1_we_vote_id, contest_office2_we_vote_id)\n if not results['new_contest_offices_are_not_duplicates_created']:\n messages.add_message(request, messages.ERROR, 'Could not save contest_offices_are_not_duplicates entry: ' +\n results['status'])\n messages.add_message(request, messages.INFO, 'Prior contest offices skipped, and not merged.')\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n contest_office1_results = contest_office_manager.retrieve_contest_office_from_we_vote_id(contest_office1_we_vote_id)\n if contest_office1_results['contest_office_found']:\n contest_office1_on_stage = contest_office1_results['contest_office']\n contest_office1_id = contest_office1_on_stage.id\n else:\n messages.add_message(request, messages.ERROR, 'Could not retrieve office 1.')\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n contest_office2_results = contest_office_manager.retrieve_contest_office_from_we_vote_id(contest_office2_we_vote_id)\n if contest_office2_results['contest_office_found']:\n contest_office2_on_stage = contest_office2_results['contest_office']\n contest_office2_id = contest_office2_on_stage.id\n else:\n messages.add_message(request, messages.ERROR, 'Could not retrieve contest office 2.')\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n # # TODO: Migrate bookmarks\n # bookmark_item_list_manager = BookmarkItemList()\n # bookmark_results = bookmark_item_list_manager.retrieve_bookmark_item_list_for_contest_office(\n # contest_office2_we_vote_id)\n # if bookmark_results['bookmark_item_list_found']:\n # messages.add_message(request, messages.ERROR, \"Bookmarks found for Contest Office 2 - \"\n # \"automatic merge not working yet.\")\n # return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n # \"?google_civic_election_id=\" + str(google_civic_election_id) +\n # \"&state_code=\" + str(state_code))\n\n # Merge attribute values\n conflict_values = figure_out_office_conflict_values(contest_office1_on_stage, contest_office2_on_stage)\n\n for attribute in CONTEST_OFFICE_UNIQUE_IDENTIFIERS:\n conflict_value = conflict_values.get(attribute, None)\n if conflict_value == \"CONFLICT\":\n if is_post:\n choice = request.POST.get(attribute + '_choice', '')\n else:\n choice = request.GET.get(attribute + '_choice', '')\n if contest_office2_we_vote_id == choice:\n setattr(contest_office1_on_stage, attribute, getattr(contest_office2_on_stage, attribute))\n elif conflict_value == \"CONTEST_OFFICE2\":\n setattr(contest_office1_on_stage, attribute, getattr(contest_office2_on_stage, attribute))\n else:\n pass\n\n # Preserve unique google_civic_office_name, _name2, _name3, _name4, and _name5\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name)\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name2):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name2)\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name3):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name3)\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name4):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name4)\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name5):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name5)\n\n # TODO: Merge quick_info's office details in future\n\n # Merge ballot item's office details\n ballot_items_results = move_ballot_items_to_another_office(contest_office2_id, contest_office2_we_vote_id,\n contest_office1_id, contest_office1_we_vote_id,\n contest_office1_on_stage)\n if not ballot_items_results['success']:\n messages.add_message(request, messages.ERROR, ballot_items_results['status'])\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n # Merge public positions - DALE 2020-06-04 I think we will want to alter this soon\n public_positions_results = move_positions_to_another_office(contest_office2_id, contest_office2_we_vote_id,\n contest_office1_id, contest_office1_we_vote_id,\n True)\n if not public_positions_results['success']:\n messages.add_message(request, messages.ERROR, public_positions_results['status'])\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n # Merge friends-only positions - DALE 2020-06-04 I think we will want to alter this soon\n friends_positions_results = move_positions_to_another_office(contest_office2_id, contest_office2_we_vote_id,\n contest_office1_id, contest_office1_we_vote_id,\n False)\n if not friends_positions_results['success']:\n messages.add_message(request, messages.ERROR, friends_positions_results['status'])\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n # TODO: Migrate images?\n\n # Finally, move candidates last\n candidates_results = move_candidates_to_another_office(contest_office2_id, contest_office2_we_vote_id,\n contest_office1_id, contest_office1_we_vote_id)\n if not candidates_results['success']:\n messages.add_message(request, messages.ERROR, candidates_results['status'])\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n # Save contest_office2_on_stage to remove maplight_id, which much be unique,\n # before we try to save contest_office1_on_stage below\n if positive_value_exists(contest_office2_on_stage.maplight_id):\n contest_office2_on_stage.maplight_id = None\n contest_office2_on_stage.save()\n\n # Note: wait to wrap in try/except block\n contest_office1_on_stage.save()\n # There isn't any office data to refresh from other master tables\n\n # Remove contest office 2\n contest_office2_on_stage.delete()\n\n if redirect_to_contest_office_list:\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n if remove_duplicate_process:\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n return HttpResponseRedirect(reverse('office:office_summary', args=(contest_office1_on_stage.id,)))", "def make_po(self, cr, uid, ids, context=None):\n res = super(procurement_order, self).make_po(cr, uid, ids, context=None)\n for procurement in self.browse(cr, uid, ids, context=context):\n # da procurement prendo id ordine x ripassare le righe e vedere il listino for\n pricelist_item = self.pool.get('product.pricelist').price_get(cr, uid, [procurement.purchase_id.pricelist_id.id], procurement.purchase_id.product_id.id, procurement.product_qty or 1.0, procurement.purchase_id.partner_id.id)\n pricelist_item_id = pricelist_item['item_id'][procurement.purchase_id.pricelist_id.id]\n price_item = self.pool.get('product.pricelist.item').browse(cr, uid, pricelist_item_id, context=context)\n \n if price_item:\n for line in procurement.purchase_id.order_line:\n vals = {\n 'discount': price_item.discount_line,\n 'discount2': price_item.discount2_line\n }\n self.pool.get('purchase.order.line').write(cr, uid, [line.id], vals)\n \n return res", "def _set_additional_po_order_fields(self, invoice):\n\t\tpass", "def test_invoice_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # create another customer\n id_other = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id_other:\n # then performe update\n data = self.invoice_data\n data[\"customer_id\"] = id_other\n self._update_model(\"invoice\", id, data, [])\n self.assertIsNotNone(id_other)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def action_generate_lines_txt(self):\n rp_obj = self.env['res.partner']\n voucher_obj = self.env['account.wh.iva']\n txt_iva_obj = self.env['txt.iva.line']\n vouchers = []\n txt_brw = self.browse(self._ids)[0]\n txt_ids = txt_iva_obj.search([('txt_id', '=', txt_brw.id)])\n if txt_ids:\n txt_ids.unlink()\n\n if txt_brw.type:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['in_invoice', 'in_refund'])])\n else:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n\n for voucher in vouchers:\n acc_part_id = rp_obj._find_accounting_partner(voucher.partner_id)\n for voucher_lines in voucher.wh_lines:\n if voucher_lines.invoice_id.state not in ['open', 'paid']:\n continue\n for voucher_tax_line in voucher_lines.tax_line:\n txt_iva_obj.create(\n {'partner_id': acc_part_id.id,\n 'voucher_id': voucher.id,\n 'invoice_id': voucher_lines.invoice_id.id,\n 'txt_id': txt_brw.id,\n 'untaxed': voucher_tax_line.base,\n 'amount_withheld': voucher_tax_line.amount_ret,\n 'tax_wh_iva_id': voucher_tax_line.id,\n })\n return True", "def prepare_draft_rulings(self):\r\n records = self.retrieve_cached_records()\r\n if records:\r\n records = self.apply_rulings(records)\r\n records = u.apply_aggregators(\r\n map(lambda rc: (rc[0], rc[2]), self.ruling_columns),\r\n records\r\n )\r\n else:\r\n records = [] # might be None\r\n\r\n u.write_excel(\r\n records,\r\n self.draft_ruling_path,\r\n OrderedDict(map(lambda rc: (rc[0], rc[1]), self.ruling_columns)),\r\n )", "def ir_action_cancel_draft(self, cr, uid, ids, context=None):\n if not len(ids):\n return False\n wf_service = netsvc.LocalService(\"workflow\")\n for s_id in ids:\n self.write(cr, uid, s_id, {'state':'draft'})\n wf_service.trg_delete(uid, 'services.contracts.archive', s_id, cr) \n wf_service.trg_create(uid, 'services.contracts.archive', s_id, cr)\n return True", "def _prepare_invoice(self):\n self.ensure_one()\n # journal_id = self.env['account.invoice'].with_context(force_company=self.env.user.company_id.id).default_get(['journal_id'])['journal_id']\n journal_id = self.company_id.journal_id.id\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id\n }\n return invoice_vals", "def process_reconciliations(self, data):\n AccountMoveLine = self.env['account.move.line']\n for st_line, datum in zip(self, data):\n payment_aml_rec = AccountMoveLine.browse(datum.get('payment_aml_ids', []))\n for aml_dict in datum.get('counterpart_aml_dicts', []):\n # aml_dict['counterpart_aml_id'] sering error\n if aml_dict.get('counterpart_aml_id',False) :\n aml_dict['move_line'] = AccountMoveLine.browse(aml_dict['counterpart_aml_id'])\n del aml_dict['counterpart_aml_id']\n st_line.process_reconciliation(datum.get('counterpart_aml_dicts', []), payment_aml_rec, datum.get('new_aml_dicts', []))", "def action_move_create(self):\n\n res = super(account_invoice, self).action_move_create()\n\n for inv in self:\n if not inv.move_id:\n return res\n for ml in inv.move_id.line_id:\n ml_vals = {\n 'emp_police': inv.pol_numpol,\n 'emp_quittance': inv.prm_numero_quittance,\n 'emp_effet': datetime.datetime.strptime(inv.prm_datedeb, '%Y-%m-%d').date() if inv.prm_datedeb else datetime.datetime.today(),\n 'emp_datech': datetime.datetime.strptime(inv.prm_datefin, '%Y-%m-%d').date() if inv.prm_datefin else datetime.datetime.today(),\n }\n ml.update(ml_vals)\n move_vals = {\n 'num_police': inv.pol_numpol,\n 'num_quittance': inv.prm_numero_quittance,\n 'date_effect': datetime.datetime.strptime(inv.prm_datedeb, '%Y-%m-%d').date() if inv.prm_datedeb else datetime.datetime.today(),\n 'date_end': datetime.datetime.strptime(inv.prm_datefin, '%Y-%m-%d').date() if inv.prm_datefin else datetime.datetime.today(),\n }\n inv.move_id.update(move_vals)\n self._log_event()\n return res", "def open_invoices(self):\n return {\n 'domain': \"[('id', 'in', \" + str(self.invoice_ids.ids) + \" )]\",\n 'name': 'Invoices',\n 'view_mode': 'tree,form',\n 'res_model': 'account.move',\n 'type': 'ir.actions.act_window',\n }", "def done(self, cr, uid, ids, context=None):\n \n voucher_obj = self.pool.get('account.voucher')\n voucher_line_obj = self.pool.get('account.voucher.line')\n admin_affairs_model_obj = self.pool.get('admin.affairs.model')\n affairs_account_obj = self.pool.get('admin_affairs.account') \n model_id = admin_affairs_model_obj.search(cr, uid, [('model','=','environment.and.safety')], context=context)[0] \n affairs_account = affairs_account_obj.search(cr, uid, [('model_id','=',model_id)], context=context)\n if not affairs_account:\n raise osv.except_osv(_('Warning !'), _('Please insert account configuration For Environment and safety'))\n affairs_account_id = affairs_account[0]\n \n affairs_account_record = affairs_account_obj.browse(cr, uid, affairs_account_id,context=context) \n for record in self.browse(cr, uid, ids, context=context):\n if not record.allowances_lines_after :\n raise osv.except_osv(_('Partner Amount !'), _('Sorry no partner Amount After Rate To Transfer!'))\n notes = _(\"Enviroment and Safety allowances Contract: %s\")%(record.name)\n \n journal_id = affairs_account_record.journal_id\n analytic_id = affairs_account_record.analytic_id\n account_id = affairs_account_record.account_id\n\n\t\t# Creating Voucher / Ratitication\n voucher_id = voucher_obj.create(cr, uid, {\n 'amount': record.amount_total,\n 'type': 'ratification',\n 'date': time.strftime('%Y-%m-%d'),\n 'partner_id': record.partner_id.id,\n 'journal_id': journal_id and journal_id.id , \n 'state': 'draft',\n\t\t\t\t\t 'notes':record.notes,\n\t\t\t\t\t 'narration':notes ,\n \t 'company_id':record.company_id.id,\n })\n \t# Creating Voucher / Ratitication Lines\n for line in record.allowances_lines_after:\n '''account_id =line.category_id.account_id\n if not account_id:\n account_id = line.category_id.parent_id.account_id\n \n if not account_id:\n account_id = affairs_account_record.account_id \n\n if not account_id:\n raise osv.except_osv(_('Invalid action !'), _('Please insert Account configuration For Environment and safety Service')) ''' \n \n account_analytic_id =line.category_id.analytic_id\n if not account_analytic_id:\n account_analytic_id = line.category_id.parent_id.analytic_id \n \n if not account_analytic_id:\n account_analytic_id = affairs_account_record.analytic_id\n \n vocher_line_id = voucher_line_obj.create(cr, uid, {\n 'amount': record.amount_total,\n 'voucher_id': voucher_id,\n\t\t\t\t\t 'account_id':account_id and account_id.id,\n\t\t\t\t\t 'account_analytic_id':account_analytic_id and account_analytic_id.id ,\n 'type': 'dr',\n 'name':'environment and Safety allowances :' + record.name,\n })\n\t\t\n\t\t# Selecting Voucher Number / Refernece \n\n voucher_number = self.pool.get('account.voucher').browse(cr,uid,voucher_id)\n\n copy_attachments(self,cr,uid,[record.id],'services.contracts.archive',voucher_id,'account.voucher', context)\n self.write(cr, uid, ids, {'state':'done','transfer':True,'voucher_no':voucher_number.number}) \n return True", "def onchange_partner_id(self\n ):\n if self._context is None:\n context = {}\n acc_part_brw = False\n acc_id = False\n rp_obj = self.env['res.partner']\n wh_line_obj = self.env['account.wh.src.line']\n\n if self.partner_id:\n #partner = rp_obj.browse(self.partner_id)\n acc_part_brw = rp_obj._find_accounting_partner(self.partner_id)\n if self.type and self.type in ('out_invoice', 'out_refund'):\n acc_id = acc_part_brw.property_account_receivable_id \\\n and acc_part_brw.property_account_receivable_id.id or False\n else:\n acc_id = acc_part_brw.property_account_payable_id \\\n and acc_part_brw.property_account_payable_id.id or False\n\n # part_brw = self.ids and rp_obj._find_accounting_partner(self.browse(\n # self, self.ids[0]).partner_id)\n wh_lines = self.ids and wh_line_obj.search(\n [('wh_id', '=', self.ids[0])])\n if not self.partner_id:\n if wh_lines:\n wh_line_obj.unlink(wh_lines)\n wh_lines = []\n if self.partner_id and acc_part_brw and self.partner_id.id != acc_part_brw.id:\n if wh_lines:\n wh_line_obj.unlink(wh_lines)\n wh_lines = []\n\n return {'value': {\n 'line_ids': wh_lines,\n 'account_id': acc_id,\n }\n }", "def _actualizar_precios(self, request, presup_id, *args, **kwargs):\n obj_presup = Presupuesto.objects.get(pk=presup_id)\n lineas_actualizadas = []\n if not obj_presup._vigente():\n turnoList = obj_presup.get_turnos_activos()\n for turno in turnoList:\n kwargs['pk'] = presup_id\n for linea in turno.ofertatec_linea_set.all():\n obj_ofertatec = OfertaTec.objects.get(pk=linea.ofertatec.id)\n if linea.precio != obj_ofertatec.precio:\n linea.precio = obj_ofertatec.precio\n cantidad = linea.cantidad or 1\n horas = linea.cant_horas or 1\n linea.precio_total = linea.precio * cantidad * horas\n lineas_actualizadas.append(linea)\n if len(lineas_actualizadas) > 0 and obj_presup.fecha_realizado:\n for turno in turnoList:\n lab.views.createRevision(request, pk=turno.pk)\n createRevision(request, pk=obj_presup.pk)\n for linea in lineas_actualizadas:\n linea.save()\n return True\n # Si el presupuesto esta vigente no le hago modificaciones\n else:\n return False", "def process_reconciliation(self, counterpart_aml_dicts=None, payment_aml_rec=None, new_aml_dicts=None):\n\t\tcounterpart_aml_dicts = counterpart_aml_dicts or []\n\t\tpayment_aml_rec = payment_aml_rec or self.env['account.move.line']\n\t\tnew_aml_dicts = new_aml_dicts or []\n\n\t\taml_obj = self.env['account.move.line']\n\n\t\tcompany_currency = self.journal_id.company_id.currency_id\n\t\tstatement_currency = self.journal_id.currency_id or company_currency\n\t\tst_line_currency = self.currency_id or statement_currency\n\n\t\tcounterpart_moves = self.env['account.move']\n\n\t\t# Check and prepare received data\n\t\tif any(rec.statement_id for rec in payment_aml_rec):\n\t\t\traise UserError(_('A selected move line was already reconciled.'))\n\t\tfor aml_dict in counterpart_aml_dicts:\n\t\t\tif aml_dict['move_line'].reconciled:\n\t\t\t\traise UserError(_('A selected move line was already reconciled.'))\n\t\t\tif isinstance(aml_dict['move_line'], pycompat.integer_types):\n\t\t\t\taml_dict['move_line'] = aml_obj.browse(aml_dict['move_line'])\n\t\tfor aml_dict in (counterpart_aml_dicts + new_aml_dicts):\n\t\t\tif aml_dict.get('tax_ids') and isinstance(aml_dict['tax_ids'][0], pycompat.integer_types):\n\t\t\t\t# Transform the value in the format required for One2many and Many2many fields\n\t\t\t\taml_dict['tax_ids'] = [(4, id, None) for id in aml_dict['tax_ids']]\n\t\tif any(line.journal_entry_ids for line in self):\n\t\t\traise UserError(_('A selected statement line was already reconciled with an account move.'))\n\n\t\t# Fully reconciled moves are just linked to the bank statement\n\t\ttotal = self.amount\n\t\tfor aml_rec in payment_aml_rec:\n\t\t\ttotal -= aml_rec.debit - aml_rec.credit\n\t\t\taml_rec.with_context(check_move_validity=False).write({'statement_line_id': self.id})\n\t\t\tcounterpart_moves = (counterpart_moves | aml_rec.move_id)\n\n\t\t# Create move line(s). Either matching an existing journal entry (eg. invoice), in which\n\t\t# case we reconcile the existing and the new move lines together, or being a write-off.\n\t\tif counterpart_aml_dicts or new_aml_dicts:\n\t\t\tst_line_currency = self.currency_id or statement_currency\n\t\t\tst_line_currency_rate = self.currency_id and (self.amount_currency / self.amount) or False\n\n\t\t\t# Create the move\n\t\t\tself.sequence = self.statement_id.line_ids.ids.index(self.id) + 1\n\t\t\tmove_vals = self._prepare_reconciliation_move(self.statement_id.name)\t\n\t\t\t\n\t\t\t###################### Added search line and make create conditional #############################\n\t\t\t#move = self.env['account.move'].create(move_vals)\n\t\t\tmove = self.env['account.move'].search([('statement_id','=',move_vals['statement_id']),('voucher_seq','=',move_vals['voucher_seq'])])\n\t\t\tif not move:\n\t\t\t\tmove = self.env['account.move'].create(move_vals)\n\t\t\t################ End Change #############################\n\t\t\t\n\t\t\tcounterpart_moves = (counterpart_moves | move)\n\n\t\t\t# Create The payment\n\t\t\tpayment = self.env['account.payment']\n\t\t\tif abs(total)>0.00001:\n\t\t\t\tpartner_id = self.partner_id and self.partner_id.id or False\n\t\t\t\tpartner_type = False\n\t\t\t\tif partner_id:\n\t\t\t\t\tif total < 0:\n\t\t\t\t\t\tpartner_type = 'supplier'\n\t\t\t\t\telse:\n\t\t\t\t\t\tpartner_type = 'customer'\n\n\t\t\t\tpayment_methods = (total>0) and self.journal_id.inbound_payment_method_ids or self.journal_id.outbound_payment_method_ids\n\t\t\t\tcurrency = self.journal_id.currency_id or self.company_id.currency_id\n\t\t\t\tpayment = self.env['account.payment'].create({\n\t\t\t\t\t'payment_method_id': payment_methods and payment_methods[0].id or False,\n\t\t\t\t\t'payment_type': total >0 and 'inbound' or 'outbound',\n\t\t\t\t\t'partner_id': self.partner_id and self.partner_id.id or False,\n\t\t\t\t\t'partner_type': partner_type,\n\t\t\t\t\t'journal_id': self.statement_id.journal_id.id,\n\t\t\t\t\t'payment_date': self.date,\n\t\t\t\t\t'state': 'reconciled',\n\t\t\t\t\t'currency_id': currency.id,\n\t\t\t\t\t'amount': abs(total),\n\t\t\t\t\t'communication': self._get_communication(payment_methods[0] if payment_methods else False),\n\t\t\t\t\t'name': self.statement_id.name,\n\t\t\t\t})\n\n\t\t\t# Complete dicts to create both counterpart move lines and write-offs\n\t\t\tto_create = (counterpart_aml_dicts + new_aml_dicts)\n\t\t\tctx = dict(self._context, date=self.date)\n\t\t\tfor aml_dict in to_create:\n\t\t\t\taml_dict['move_id'] = move.id\n\t\t\t\taml_dict['partner_id'] = self.partner_id.id\n\t\t\t\taml_dict['statement_line_id'] = self.id\n\t\t\t\tif st_line_currency.id != company_currency.id:\n\t\t\t\t\taml_dict['amount_currency'] = aml_dict['debit'] - aml_dict['credit']\n\t\t\t\t\taml_dict['currency_id'] = st_line_currency.id\n\t\t\t\t\tif self.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:\n\t\t\t\t\t\t# Statement is in company currency but the transaction is in foreign currency\n\t\t\t\t\t\taml_dict['debit'] = company_currency.round(aml_dict['debit'] / st_line_currency_rate)\n\t\t\t\t\t\taml_dict['credit'] = company_currency.round(aml_dict['credit'] / st_line_currency_rate)\n\t\t\t\t\telif self.currency_id and st_line_currency_rate:\n\t\t\t\t\t\t# Statement is in foreign currency and the transaction is in another one\n\t\t\t\t\t\taml_dict['debit'] = statement_currency.with_context(ctx).compute(aml_dict['debit'] / st_line_currency_rate, company_currency)\n\t\t\t\t\t\taml_dict['credit'] = statement_currency.with_context(ctx).compute(aml_dict['credit'] / st_line_currency_rate, company_currency)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Statement is in foreign currency and no extra currency is given for the transaction\n\t\t\t\t\t\taml_dict['debit'] = st_line_currency.with_context(ctx).compute(aml_dict['debit'], company_currency)\n\t\t\t\t\t\taml_dict['credit'] = st_line_currency.with_context(ctx).compute(aml_dict['credit'], company_currency)\n\t\t\t\telif statement_currency.id != company_currency.id:\n\t\t\t\t\t# Statement is in foreign currency but the transaction is in company currency\n\t\t\t\t\tprorata_factor = (aml_dict['debit'] - aml_dict['credit']) / self.amount_currency\n\t\t\t\t\taml_dict['amount_currency'] = prorata_factor * self.amount\n\t\t\t\t\taml_dict['currency_id'] = statement_currency.id\n\n\t\t\t# Create write-offs\n\t\t\t# When we register a payment on an invoice, the write-off line contains the amount\n\t\t\t# currency if all related invoices have the same currency. We apply the same logic in\n\t\t\t# the manual reconciliation.\n\t\t\tcounterpart_aml = self.env['account.move.line']\n\t\t\tfor aml_dict in counterpart_aml_dicts:\n\t\t\t\tcounterpart_aml |= aml_dict.get('move_line', self.env['account.move.line'])\n\t\t\tnew_aml_currency = False\n\t\t\tif counterpart_aml\\\n\t\t\t\t\tand len(counterpart_aml.mapped('currency_id')) == 1\\\n\t\t\t\t\tand counterpart_aml[0].currency_id\\\n\t\t\t\t\tand counterpart_aml[0].currency_id != company_currency:\n\t\t\t\tnew_aml_currency = counterpart_aml[0].currency_id\n\t\t\tfor aml_dict in new_aml_dicts:\n\t\t\t\taml_dict['payment_id'] = payment and payment.id or False\n\t\t\t\tif new_aml_currency and not aml_dict.get('currency_id'):\n\t\t\t\t\taml_dict['currency_id'] = new_aml_currency.id\n\t\t\t\t\taml_dict['amount_currency'] = company_currency.with_context(ctx).compute(aml_dict['debit'] - aml_dict['credit'], new_aml_currency)\n\t\t\t\taml_obj.with_context(check_move_validity=False, apply_taxes=True).create(aml_dict)\n\n\t\t\t# Create counterpart move lines and reconcile them\n\t\t\tfor aml_dict in counterpart_aml_dicts:\n\t\t\t\tif aml_dict['move_line'].partner_id.id:\n\t\t\t\t\taml_dict['partner_id'] = aml_dict['move_line'].partner_id.id\n\t\t\t\taml_dict['account_id'] = aml_dict['move_line'].account_id.id\n\t\t\t\taml_dict['payment_id'] = payment and payment.id or False\n\n\t\t\t\tcounterpart_move_line = aml_dict.pop('move_line')\n\t\t\t\tif counterpart_move_line.currency_id and counterpart_move_line.currency_id != company_currency and not aml_dict.get('currency_id'):\n\t\t\t\t\taml_dict['currency_id'] = counterpart_move_line.currency_id.id\n\t\t\t\t\taml_dict['amount_currency'] = company_currency.with_context(ctx).compute(aml_dict['debit'] - aml_dict['credit'], counterpart_move_line.currency_id)\n\t\t\t\tnew_aml = aml_obj.with_context(check_move_validity=False).create(aml_dict)\n\n\t\t\t\t(new_aml | counterpart_move_line).reconcile()\n\n\t\t\t# Balance the move\n\t\t\tst_line_amount = -sum([x.balance for x in move.line_ids])\n\t\t\taml_dict = self._prepare_reconciliation_move_line(move, st_line_amount)\n\t\t\taml_dict['payment_id'] = payment and payment.id or False\n\t\t\taml_obj.with_context(check_move_validity=False).create(aml_dict)\n\n\t\t\t#################### Remarked Post Call ############################\n\t\t\t#move.post()\n\t\t\t#record the move name on the statement line to be able to retrieve it in case of unreconciliation\n\t\t\tself.write({'move_name': move.name})\n\t\t\tpayment and payment.write({'payment_reference': move.name})\n\t\telif self.move_name:\n\t\t\traise UserError(_('Operation not allowed. Since your statement line already received a number, you cannot reconcile it entirely with existing journal entries otherwise it would make a gap in the numbering. You should book an entry and make a regular revert of it in case you want to cancel it.'))\n\t\tcounterpart_moves.assert_balanced()\n\t\treturn counterpart_moves", "def pre_approve(self, cr, uid, ids, context={}):\n \tfor voucher in self.browse(cr, uid, ids, context=context):\n \t if not voucher.department_id.analytic_account_id:\n \t raise osv.except_osv(_('Configration Check!'), _(\"Please add cost center for your department!\"))\n \t periods = self.pool.get('account.period').search(cr, uid, [('date_start','<=',voucher.date),('date_stop','>=',voucher.date),('company_id','=',voucher.company_id.id)], context=context)\n\n\n res=0.0\n if voucher.purpose:\n if not voucher.purpose.account_id: raise osv.except_osv(_('Warning!'), _('Please configure account for this purpose!')) \n voucher_line = {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': voucher.amount,\n \t\t'amount': voucher.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': voucher.department_id.analytic_account_id and voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id,\n \t }\n new_amount = res and res or voucher.amount \n voucher_line.update({'amount':new_amount,'untax_amount':new_amount})\n \t if voucher.line_ids :\n for line in voucher.line_ids:\n \t\t self.pool.get('account.voucher.line').write(cr, uid, line.id, {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': res or line.amount,\n \t\t'amount': line.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': line.account_analytic_id and line.account_analytic_id.id or voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id or line.account_id.id,\n \t }, context=context)\n \t else:\n\n \t\t new_voucher_line = self.pool.get('account.voucher.line').create(cr, uid, voucher_line, context=context)\n context.update({'purchase':True})\n self.create_budget_confirmation(cr, uid, [voucher.id], context)\n \tself.write(cr, uid, ids,{'state': 'preapprove','type':'purchase','ratification':True}, context=context)\n #cxt = context.copy()\n #cxt.update({'type':'ratification'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approved'}, context=context)\n\n \t'''self.write(cr, uid, ids, {'state': 'preapprove'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approve','type':'purchase','ratification':True}, context=context)'''\n return True", "def do_merge(self, line):\n self.review.merge()", "def invoices(self):\r\n return inv.Invoices(self)", "def finish(self, closed=False, cont=False):\n self.removeTemporaryObject()\n if self.oldWP:\n App.DraftWorkingPlane = self.oldWP\n if hasattr(Gui, \"Snapper\"):\n Gui.Snapper.setGrid()\n Gui.Snapper.restack()\n self.oldWP = None\n\n if len(self.node) > 1:\n\n if True:\n FreeCADGui.addModule(\"Draft\")\n # The command to run is built as a series of text strings\n # to be committed through the `draftutils.todo.ToDo` class.\n if (len(self.node) == 2\n and utils.getParam(\"UsePartPrimitives\", False)):\n # Insert a Part::Primitive object\n p1 = self.node[0]\n p2 = self.node[-1]\n\n _cmd = 'FreeCAD.ActiveDocument.'\n _cmd += 'addObject(\"Part::Line\", \"Line\")'\n _cmd_list = ['line = ' + _cmd,\n 'line.X1 = ' + str(p1.x),\n 'line.Y1 = ' + str(p1.y),\n 'line.Z1 = ' + str(p1.z),\n 'line.X2 = ' + str(p2.x),\n 'line.Y2 = ' + str(p2.y),\n 'line.Z2 = ' + str(p2.z),\n 'Draft.autogroup(line)',\n 'FreeCAD.ActiveDocument.recompute()']\n self.commit(translate(\"draft\", \"Create Line\"),\n _cmd_list)\n else:\n # Insert a Draft line\n rot, sup, pts, fil = self.getStrings()\n\n _base = DraftVecUtils.toString(self.node[0])\n _cmd = 'Draft.makeWire'\n _cmd += '('\n _cmd += 'points, '\n _cmd += 'placement=pl, '\n _cmd += 'closed=' + str(closed) + ', '\n _cmd += 'face=' + fil + ', '\n _cmd += 'support=' + sup\n _cmd += ')'\n _cmd_list = ['pl = FreeCAD.Placement()',\n 'pl.Rotation.Q = ' + rot,\n 'pl.Base = ' + _base,\n 'points = ' + pts,\n 'line = ' + _cmd,\n 'Draft.autogroup(line)',\n 'FreeCAD.ActiveDocument.recompute()']\n self.commit(translate(\"draft\", \"Create Wire\"),\n _cmd_list)\n else:\n import Draft\n self.path = Draft.makeWire(self.node, closed=False, face=False)\n\n if self.ui and self.ui.continueMode:\n self.Activated()\n\n self.makeTrench()", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def write(self, cr, uid, ids, vals, context=None):\n new_accounts = {}\n deleted = []\n if 'line_id' in vals:\n for val in vals['line_id']:\n #the line changed\n if val[2]:\n if 'analytic_account_id' in val[2] or 'account_id' in val[2]:\n new_accounts[val[1]] = val[2]\n if val[0] == 2:\n #for delete case\n deleted.append(val[1])\n budget_line_obj = self.pool.get('account.budget.lines')\n\n analytic_obj = self.pool.get('account.analytic.account')\n account_obj = self.pool.get('account.account')\n for acc_move in self.browse(cr, uid, ids, context=context):\n for line in acc_move.line_id:\n account_id = line.account_id.id\n analytic_account_id = line.analytic_account_id.id\n budget = line.analytic_account_id.budget\n analytic_required = line.account_id.user_type.analytic_required \n if line.id in deleted:\n continue\n if line.id in new_accounts:\n if 'analytic_account_id' in new_accounts[line.id]:\n if new_accounts[line.id]['analytic_account_id']:\n analytic_account_id = new_accounts[line.id]['analytic_account_id']\n analytic_account = analytic_obj.browse(cr,uid,analytic_account_id,context=context)\n budget = analytic_account.budget\n else:\n #empty analytic account entered\n budget = analytic_account_id = False\n\n if 'account_id' in new_accounts[line.id]:\n account_id = new_accounts[line.id]['account_id']\n account_rec = account_obj.browse(cr,uid,account_id,context=context)\n analytic_required = account_rec.user_type.analytic_required\n line_ids = budget_line_obj.search(cr, uid, [('general_account_id','=',account_id),\n ('analytic_account_id', '=', analytic_account_id),\n ('period_id', '=', line.period_id.id)],\n context=context)\n if not analytic_account_id and analytic_required:\n raise orm.except_orm(_('Warning!'), _('Analytic Account Required!'))\n\n if not line_ids and budget:\n raise orm.except_orm(_('Warning!'), _('This account has noo budget!'))\n budget_line_vals = (vals.get('state','') in ['completed','closed','posted'] and \\\n {'move_line_ids':[(1,line.id,{'budget_line_id':line_ids and line_ids[0]})]}) or \\\n (line.budget_line_id and {'move_line_ids':[(3,line.id)]}) or {}\n budget_line_obj.write(cr, uid, line_ids and line_ids[0] or [], budget_line_vals,context=context)\n return super(account_move,self).write(cr, uid, ids, vals, context=context)", "def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)\n \n res.update({'part_number': line.part_number, 'internal_part_number' : line.internal_part_number})\n return res", "def action_create_invoices(self, data):\n invoice_obj = self.env['account.invoice']\n values = {}\n for val in data:\n values.setdefault(val['invoice_type'], {\n 'order': val.get('sale', val.get('purchase')),\n 'values': []\n })\n values[val['invoice_type']]['values'].append((0, 0, val['values']))\n\n for inv_type, inv_data in values.items():\n invoice = invoice_obj.new(self._prepare_invoice(inv_type))\n invoice._onchange_partner_id()\n inv = invoice._convert_to_write({\n name: invoice[name] for name in invoice._cache\n })\n for _, _, line in inv_data['values']:\n line['account_id'] = inv['account_id']\n inv['invoice_line_ids'] = inv_data['values']\n new_invoice = invoice_obj.sudo().create(inv)\n new_invoice.action_invoice_open()\n inv_data['order'].write({\n 'exchange_invoice_ids': [(4, new_invoice.id)]\n })", "def finish(self, closed=False, cont=False):\n self.removeTemporaryObject()\n if self.oldWP:\n App.DraftWorkingPlane = self.oldWP\n if hasattr(Gui, \"Snapper\"):\n Gui.Snapper.setGrid()\n Gui.Snapper.restack()\n self.oldWP = None\n\n if len(self.node) > 1:\n\n if False:\n Gui.addModule(\"Draft\")\n # The command to run is built as a series of text strings\n # to be committed through the `draftutils.todo.ToDo` class.\n if (len(self.node) == 2\n and utils.getParam(\"UsePartPrimitives\", False)):\n # Insert a Part::Primitive object\n p1 = self.node[0]\n p2 = self.node[-1]\n\n _cmd = 'FreeCAD.ActiveDocument.'\n _cmd += 'addObject(\"Part::Line\", \"Line\")'\n _cmd_list = ['line = ' + _cmd,\n 'line.X1 = ' + str(p1.x),\n 'line.Y1 = ' + str(p1.y),\n 'line.Z1 = ' + str(p1.z),\n 'line.X2 = ' + str(p2.x),\n 'line.Y2 = ' + str(p2.y),\n 'line.Z2 = ' + str(p2.z),\n 'Draft.autogroup(line)',\n 'FreeCAD.ActiveDocument.recompute()']\n self.commit(translate(\"draft\", \"Create Line\"),\n _cmd_list)\n else:\n # Insert a Draft line\n rot, sup, pts, fil = self.getStrings()\n\n _base = DraftVecUtils.toString(self.node[0])\n _cmd = 'Draft.makeWire'\n _cmd += '('\n _cmd += 'points, '\n _cmd += 'placement=pl, '\n _cmd += 'closed=' + str(closed) + ', '\n _cmd += 'face=' + fil + ', '\n _cmd += 'support=' + sup\n _cmd += ')'\n _cmd_list = ['pl = FreeCAD.Placement()',\n 'pl.Rotation.Q = ' + rot,\n 'pl.Base = ' + _base,\n 'points = ' + pts,\n 'line = ' + _cmd,\n 'Draft.autogroup(line)',\n 'FreeCAD.ActiveDocument.recompute()']\n self.commit(translate(\"draft\", \"Create Wire\"),\n _cmd_list)\n else:\n import Draft\n self.path = Draft.makeWire(self.node, closed=False, face=False)\n\n # super(_CommandTrench, self).finish()\n gui_base_original.Creator.finish(self)\n if self.ui and self.ui.continueMode:\n self.Activated()\n\n self.makeTrench()", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def invoices(self):\r\n return Invoices(self)", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def confirmed(self, cr, uid, ids, context=None):\n\tallow_archive_line_obj = self.pool.get('services.contracts.allowances.lines')\n for record in self.browse(cr, uid, ids, context=context):\n\t\tif not record.allowances_lines_before :\n \traise osv.except_osv(_('Partner Lines !'), _('Sorry no partner Lines!'))\n\n\t \tlines_ids = [line.id for line in record.allowances_lines_after]\n \tallow_archive_line_obj.unlink(cr,uid,lines_ids,context=context)\n\n\t\tfor lines in record.allowances_lines_before:\n\t\t\tif lines.percentage_rating < 0 or lines.percentage_rating > 100 :\n \t\traise osv.except_osv(_('Rate Error !'), _('Sorry you insert wrong rate ... rate is between (0,100)!'))\n \t\tamount_after_rate_id = allow_archive_line_obj.create(cr, uid, {\n \t\t\t\t'cost_of_rent':lines.cost_of_rent,\n \t\t\t\t'amount_untaxed':round (lines.amount_untaxed*lines.percentage_rating/100,2),\n \t\t\t\t'amount_tax':round(lines.amount_tax*lines.percentage_rating/100,2),\n \t\t\t\t'amount_total':round(lines.amount_total*lines.percentage_rating/100,2),\n \t\t\t\t'deduct_days':lines.deduct_days,\n \t\t\t\t'deduct_amount':lines.deduct_amount,\n \t\t\t\t'contract_id':lines.contract_id.id,\n\t\t\t\t\t'env_allow_id_after_rate':record.id,\n\t\t\t\t\t'type': 'after',\n 'category_id':lines.category_id.id,\n\t\t\t\t\t'percentage_rating':lines.percentage_rating,\n\n })\n\t\t\n \n self.write(cr, uid, ids, {'state':'confirmed'})\n return True", "def complete(self, cr, uid, ids, context={}):\n budget_pool = self.pool.get('account.budget')\n budget_line_pool = self.pool.get('account.budget.lines')\n for r in self.browse(cr, uid, ids, context=context):\n if r.type=='transfer' and not r.line_ids:\n raise osv.except_osv(_('Error!'),_('You cannot complete Transfer Operations without any Budget line.'))\n if r.budget_type=='cash':\n budget_ids = budget_pool.search(cr, uid,[('analytic_account_id', '=', r.analytic_account_id.id), \n ('period_id', '=', r.period_id.id)], context=context)\n budget_line_id = budget_line_pool.search(cr, uid,[('general_account_id', '=', r.account_id.id), \n ('account_budget_id', 'in', tuple(budget_ids))], context=context)\n if budget_line_id:\n line=budget_line_pool.browse(cr, uid, budget_line_id, context=context)[0]\n if line.planned_amount+line.total_operation < line.cash_total_operation + r.amount:\n raise orm.except_orm(_('Error!'),\n _(\"Cash budget (%s) can't be more than planned budget (%s)!\") % \\\n ( line.cash_total_operation+ r.amount,line.planned_amount+line.total_operation ,))\n if line.cash_residual_balance + r.amount <=0:\n raise orm.except_orm(_('Error!'),\n _(\"The amount you try to transfer (%s) is more than %s residual (%s)!\") % \\\n (r.amount, line.name, line.cash_residual_balance,))\n for e in r.line_ids:\n if line.planned_amount+line.total_operation < line.cash_total_operation - r.amount:\n raise orm.except_orm(_('Error!'),\n _(\"Cash budget (%s) can't be more than planned budget (%s)!\") % \\\n ( e.cash_total_operation- r.amount,line.planned_amount+line.total_operation ,))\n if e.line_id.cash_residual_balance - e.amount <=0:\n raise orm.except_orm(_('Error!'),\n _(\"The amount you try to transfer (%s) is more than %s residual (%s)!\") % \\\n (e.amount, e.line_id.name, e.line_id.cash_residual_balance,))\n return self.write(cr, uid, ids,{'state':'complete','name': r.name == '/' and \n self.pool.get('ir.sequence').get(cr, uid, 'account.budget.operation') or \n r.name, 'amount': r.type=='increase' and r.amount or sum([l.amount for l in r.line_ids])}, context=context)\n \n return super(account_budget_operation, self).complete(cr, uid, ids, context=context)", "def copy(self, cr, uid, id, default=None, context=None):\n if default is None:\n default = {}\n default.update({'name': self.pool.get('ir.sequence').get(cr, uid, 'payment.enrich'),'state':'draft','month':None,'enrich_lines':None,\n 'expenditure_voucher_id':None,'voucher_id':None,'approved_date':None})\n return super(payment_enrich, self).copy(cr, uid, id, default, context)", "def lines(self, form, ids=None, done=None):\n moveline_obj = self.pool.get('account.move.line')\n cr,uid = self.cr,self.uid\n ctx = self.context.copy()\n ctx['fiscalyear'] = form['fiscalyear_id']\n if form['filter'] == 'filter_period':\n ctx['period_from'] = form['period_from']\n ctx['period_to'] = form['period_to']\n elif form['filter'] == 'filter_date':\n ctx['date_from'] = form['date_from']\n ctx['date_to'] = form['date_to']\n ctx['state'] = form['target_move']\n\n account_ids = self.pool.get('account.account')._get_children_and_consol(cr, uid, [form['account_id'][0]], context=ctx)\n if not account_ids: return []\n move_query = moveline_obj._query_get(cr, uid, obj='l', context=ctx)\n\n cr.execute(\"\"\"\n select\n min(l.id) as id,\n to_char(date,'MONTH') as name,\n sum(l.debit-l.credit) as balance,\n sum(l.debit) as debit,\n sum(l.credit) as credit \n from\n account_move_line l\n left join\n account_account a on (l.account_id=a.id)\n where \n l.account_id in %s \n AND \"\"\"+move_query+\"\"\"\n group by\n to_char(date,'MONTH'),to_char(date,'MM') \n ORDER BY to_char(date,'MM')\n \"\"\", (tuple(account_ids),))\n\n self.data = cr.dictfetchall()\n return self.data", "def create_invoice(self, order): # noqa:max-complexity=18\n\n if len(order['order_lines']) == 0:\n raise RuntimeError(\n \"Expected 1 order_lines in order {}, got: {}\".format(\n order['order_id'],\n order['order_lines']\n )\n )\n\n order_id = order['order_id']\n\n refund = False\n if order['state'] == 'REFUND':\n refund = True\n self.stdout.write(self.style.WARNING(\"Refunded order: {}\".format(order_id)))\n elif order['state'] == 'PAID':\n pass\n else:\n self.stdout.write(self.style.WARNING(\"Not processing unknown order state {} for: {}\".format(order['state'], order_id)))\n return\n\n if self.only_known and order_id not in billy.TICKETBUTLER_IGNORE_LIST:\n self.stdout.write(self.style.WARNING(\"Only processing known invoices, skipping {}\".format(order_id)))\n return\n\n # Object containing all created tickets, to have an invoice relation\n # appended later\n ticketbutler_tickets = []\n\n for ticket in order['tickets']:\n\n sprints = list(filter(\n lambda q: q['question'] == 148,\n ticket['answers']\n ))[0]\n\n if any(filter(lambda c: c['choice_heading'].lower() == 'no', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_NO\n elif any(filter(lambda c: c['choice_heading'].lower() == 'maybe', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_MAYBE\n elif any(filter(lambda c: c['choice_heading'].lower() == 'yes', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_YES\n\n ticketbutler_ticket = models.TicketbutlerTicket.get_or_create(\n ticket['email'],\n ticket['full_name'],\n order_id,\n sprints,\n ticket['ticket_type_name'],\n )\n if refund:\n self.stdout.write(self.style.WARNING(\"This ticket was marked refunded: {}\".format(order_id)))\n ticketbutler_ticket.refunded = True\n ticketbutler_ticket.save()\n else:\n ticketbutler_ticket.refunded = False\n ticketbutler_ticket.save()\n\n ticketbutler_tickets.append(ticketbutler_ticket)\n\n if refund:\n self.stdout.write(self.style.WARNING(\"Skipping refunded order: {}\".format(order_id)))\n return\n\n # If an email is changed on a TicketButler ticket and an old user exists without any other tickets,\n # then disable this user's account and delete the ticket from the system\n all_order_tickets = models.TicketbutlerTicket.objects.filter(ticketbutler_orderid=order_id)\n\n for ticket in order['tickets']:\n\n for verify_ticket in all_order_tickets:\n # Check if the ticket is active in the current order, if it is\n # then skip it.\n if any(active.id == verify_ticket.id for active in ticketbutler_tickets):\n continue\n # Yeah, it's not active anymore, so delete it and potentially\n # disable the user account\n inactive_ticket = verify_ticket\n self.stdout.write(self.style.WARNING(\"Going to remove ticket for {}, order_id: {}\".format(inactive_ticket.user.email, order_id)))\n if inactive_ticket.user.tickets.all().exclude(id=inactive_ticket.id).exists():\n # Just remove the ticket\n self.stdout.write(self.style.WARNING(\"Found another ticket for user {} and deleted the inactive ticket in question but not the user\".format(inactive_ticket.user.email)))\n if inactive_ticket.pk:\n inactive_ticket.delete()\n continue\n else:\n # Remove the user account too if there are no submissions and it's not a superuser\n if not inactive_ticket.user.is_superuser and not inactive_ticket.user.submissions.all().exists():\n if inactive_ticket.user.is_active:\n self.stdout.write(self.style.WARNING(\"Also disabling user account for: {}\".format(inactive_ticket.user.email)))\n inactive_ticket.user.is_active = False\n inactive_ticket.user.save()\n else:\n self.stdout.write(self.style.WARNING(\"User was already inactive: {}\".format(inactive_ticket.user.email)))\n # In case the user had several tickets, and one of them was already deleted\n if inactive_ticket.pk:\n inactive_ticket.delete()\n\n if 'discount' in order:\n if order['discount']['amount'] == 100:\n\n for ticket in ticketbutler_tickets:\n ticket.free_ticket = True\n ticket.save()\n\n self.stdout.write(self.style.SUCCESS(\"Skipping invoice for free ticket for order id: {}\".format(order_id)))\n return\n else:\n self.stdout.write(self.style.ERROR(\"!!! Order id {} will have an invoice generated with missing information, Ticketbutler said the discount was: {}\".format(order_id, order['discount']['amount'])))\n\n for ticketbutler_order_line_no, order_line in enumerate(order['order_lines']):\n\n self.process_order_line(order, order_line, ticketbutler_tickets, ticketbutler_order_line_no=ticketbutler_order_line_no)", "def process(invoices, output):\n with open(output[\"fname\"], 'w', encoding=output[\"encoding\"]) as f:\n for invoice in invoices.values():\n flag = False\n repeating_entries = set()\n\n for key, entry in invoice.entries.items():\n if invoice.number_of_repeats[entry.name] > 1:\n flag = True\n repeating_entries.add((key, entry))\n\n if flag:\n f.write(str(invoice))\n for key, entry in sorted(repeating_entries, key=key_func):\n f.write(f'\\t{str(entry)}\\t{str(key)}\\n')", "def refund(self, cr, uid, ids, context=None):\n clone_list = []\n line_obj = self.pool.get('pos.order.line')\n \n for order in self.browse(cr, uid, ids, context=context):\n current_session_ids = self.pool.get('pos.session').search(cr, uid, [\n ('state', '!=', 'closed'),\n ('user_id', '=', uid)], context=context)\n if not current_session_ids:\n raise osv.except_osv(_('Error!'), _('To return product(s), you need to open a session that will be used to register the refund.'))\n\n clone_id = self.copy(cr, uid, order.id, {\n 'name': order.name + ' REFUND', # not used, name forced by create\n 'session_id': current_session_ids[0],\n 'date_order': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'parent_id': order.id,\n }, context=context)\n clone_list.append(clone_id)\n\n for clone in self.browse(cr, uid, clone_list, context=context):\n for order_line in clone.lines:\n print order_line.available_qty\n line_obj.write(cr, uid, [order_line.id], {\n 'return_qty': 0.0,\n 'qty': -(order_line.parent_id.available_qty),\n }, context=context)\n\n abs = {\n 'name': _('Return Products'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'pos.order',\n 'res_id':clone_list[0],\n 'view_id': False,\n 'context':context,\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n }\n return abs", "def _create_normalBom(self, idd, processedIds=[]):\n default = {}\n \n if idd in processedIds:\n return False\n checkObj=self.browse(idd)\n if not checkObj:\n return False\n bomType = self.env['mrp.bom']\n objBoms = bomType.search([('product_tmpl_id', '=', checkObj.product_tmpl_id.id), ('type', '=', 'normal'), ('active', '=', True)])\n idBoms = bomType.search([('product_tmpl_id', '=', checkObj.product_tmpl_id.id), ('type', '=', 'ebom'), ('active', '=', True)])\n\n if not objBoms:\n if idBoms:\n default={'product_tmpl_id': idBoms[0].product_tmpl_id.id,\n 'type': 'normal', 'active': True, }\n if idBoms[0].product_id:\n default.update({'product_id': idBoms[0].product_id.id})\n processedIds.append(idd)\n newidBom = idBoms[0].with_context({'internal_writing':True}).copy(default)\n if newidBom:\n newidBom.with_context({'internal_writing':True}).write(default)\n ok_rows = self._summarizeBom(newidBom.bom_line_ids)\n for bom_line in list(set(newidBom.bom_line_ids) ^ set(ok_rows)):\n bom_line.unlink()\n for bom_line in ok_rows:\n bom_line.with_context({'internal_writing':True}).write(\n { 'type': 'normal', 'source_id': False, \n 'product_qty': bom_line.product_qty, } )\n self._create_normalBom(bom_line.product_id.id, processedIds=processedIds)\n else:\n for bom_line in bomType.browse(objBoms[0].id).bom_line_ids:\n self._create_normalBom(bom_line.product_id.id, processedIds=processedIds)\n return False", "def action_done(self):\n result = super(StockPicking, self).action_done()\n if self.picking_type_code == 'outgoing' and self.group_id.sale_id:\n for line in self.move_line_ids.filtered(\n lambda x: x.lot_id and x.lot_id.plan_type == 'sim'):\n line.lot_id.write({'partner_id': self.partner_id.id})\n\n return result", "def batch_transfer(self):\n ticket_range = self.zendesk.ticket_range()\n for i in range(1, ticket_range):\n tickets = self.zendesk.get_list_of_tickets(i)\n for ticket in tickets[\"tickets\"]:\n ticket_id = ticket[\"id\"]\n self.transfer_ticket(ticket_id)", "def merge_object(self, obj):\n for key, value in obj.lines.items():\n if key not in self.lines:\n self.lines[key] = value\n self.lines[key] = self.lines[key] + value", "def _prepare_invoice(self):\n self.ensure_one()\n result = super(SaleOrder, self)._prepare_invoice()\n result.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return result", "def prep_incorporation_correction_filing(session, business, original_filing_id, payment_id, option):\n filing_template = copy.deepcopy(CORRECTION_INCORPORATION)\n filing_template['filing']['business'] = {'identifier': business.identifier}\n for party in filing_template['filing']['correction']['parties']:\n for role in party['roles']:\n if role['roleType'] == 'Completing Party':\n party['officer']['email'] = 'comp_party@email.com'\n filing_template['filing']['correction']['contactPoint']['email'] = 'test@test.com'\n filing_template['filing']['correction']['correctedFilingId'] = original_filing_id\n filing = create_filing(token=payment_id, filing_json=filing_template, business_id=business.id)\n filing.payment_completion_date = filing.filing_date\n filing.save()\n if option in ['COMPLETED']:\n uow = versioning_manager.unit_of_work(session)\n transaction = uow.create_transaction(session)\n filing.transaction_id = transaction.id\n filing.save()\n return filing", "def receive_voucher(self, cr, uid, ids, context=None):\n super(account_voucher, self).receive_voucher(cr, uid, ids, context=context)\n if self.browse(cr, uid, ids, context=context)[0].operation_type == 'treasury':\n self.action_move_line_create(cr, uid, ids, context=context)\n return self.write(cr, uid, ids, {'state': 'receive'}, context=context)", "def merge_recs(part_one, part_two, output):\n start_op_length = 28\n with open(part_one, 'rb') as a_handle, \\\n open(part_two, 'rb') as b_handle, \\\n open(output, 'wb') as merged:\n\n a_data = a_handle.read()\n b_data = b_handle.read()\n\n postgame_pos, _ = find_postgame(a_data, len(a_data))\n if postgame_pos:\n a_data_end = postgame_pos - LOOKAHEAD\n else:\n a_data_end = len(a_data)\n b_header_len, = struct.unpack('<I', b_data[:4])\n chapter = mgz.body.operation.build({\n 'type': 'action',\n 'op': 1,\n 'length': 2,\n 'action': {\n 'type': 'chapter',\n 'player_id': 0xff # our merge marker\n }\n })\n\n # part A with no postgame struct\n merged.write(a_data[:a_data_end])\n # chapter action\n merged.write(chapter)\n # offset to start of part B operations\n merged.write(struct.pack('<I', a_data_end + len(chapter) + b_header_len))\n # part B header (now a \"saved chapter\")\n merged.write(b_data[4:b_header_len])\n # part B operations with no start operation\n merged.write(b_data[b_header_len + start_op_length:])", "def _reset_invoices_and_references_to_created(cls, routing_slip: RoutingSlipModel):\n invoices: List[InvoiceModel] = db.session.query(InvoiceModel) \\\n .filter(InvoiceModel.routing_slip == routing_slip.number) \\\n .filter(InvoiceModel.invoice_status_code == InvoiceStatus.PAID.value) \\\n .all()\n for inv in invoices:\n # Reset the statuses\n inv.invoice_status_code = InvoiceStatus.CREATED.value\n inv_ref = InvoiceReferenceModel.find_by_invoice_id_and_status(\n inv.id, InvoiceReferenceStatus.COMPLETED.value\n )\n inv_ref.status_code = InvoiceReferenceStatus.ACTIVE.value\n # Delete receipts as receipts are reversed in CFS.\n for receipt in ReceiptModel.find_all_receipts_for_invoice(inv.id):\n db.session.delete(receipt)", "def update_invoice(cls, invoice_id: int, payment_request: Tuple[Dict[str, Any]], is_apply_credit: bool = False):\n current_app.logger.debug('<update_invoice')\n\n invoice: Invoice = Invoice.find_by_id(invoice_id, skip_auth_check=False)\n # If the call is to apply credit, apply credit and release records.\n if is_apply_credit:\n credit_balance = Decimal('0')\n payment_account: PaymentAccount = PaymentAccount.find_by_id(invoice.payment_account_id)\n invoice_balance = invoice.total - (invoice.paid or 0)\n if (payment_account.credit or 0) >= invoice_balance:\n pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(\n invoice.payment_method_code)\n # Only release records, as the actual status change should happen during reconciliation.\n pay_service.apply_credit(invoice)\n credit_balance = payment_account.credit - invoice_balance\n invoice.paid = invoice.total\n invoice.save()\n elif (payment_account.credit or 0) <= invoice_balance:\n invoice.paid = (invoice.paid or 0) + (payment_account.credit or 0)\n invoice.save()\n\n payment_account.credit = credit_balance\n payment_account.save()\n else:\n payment_method = get_str_by_path(payment_request, 'paymentInfo/methodOfPayment')\n\n is_not_currently_on_ob = invoice.payment_method_code != PaymentMethod.ONLINE_BANKING.value\n is_not_changing_to_cc = payment_method not in (PaymentMethod.CC.value, PaymentMethod.DIRECT_PAY.value)\n # can patch only if the current payment method is OB\n if is_not_currently_on_ob or is_not_changing_to_cc:\n raise BusinessException(Error.INVALID_REQUEST)\n\n # check if it has any invoice references already created\n # if there is any invoice ref , send them to the invoiced credit card flow\n\n invoice_reference = InvoiceReference.find_active_reference_by_invoice_id(invoice.id)\n if invoice_reference:\n invoice.payment_method_code = PaymentMethod.CC.value\n else:\n pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(\n PaymentMethod.DIRECT_PAY.value)\n payment_account = PaymentAccount.find_by_id(invoice.payment_account_id)\n pay_service.create_invoice(payment_account, invoice.payment_line_items, invoice,\n corp_type_code=invoice.corp_type_code)\n\n invoice.payment_method_code = PaymentMethod.DIRECT_PAY.value\n invoice.save()\n current_app.logger.debug('>update_invoice')\n return invoice.asdict()", "def action_process(self):\n self.check_difference_balance()\n for statement in self:\n statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids\n for statement_line in statement_lines:\n if statement_line.move_line_id:\n statement_line.move_line_id.write({'cleared_bank_account': statement_line.cleared_bank_account,\n 'bank_acc_rec_statement_id': statement_line.cleared_bank_account and statement.id or False\n })\n statement.write({'state': 'done',\n 'verified_by_user_id': self._uid,\n 'verified_date': time.strftime('%Y-%m-%d')\n })\n for debit_line in statement.debit_move_line_ids:\n if debit_line.move_line_id.cleared_bank_account:\n debit_line.move_line_id.move_id.is_reconciled = True\n return True", "def _inline_draft(request):\n # TODO(guido): turn asserts marked with XXX into errors\n # Don't use @login_required, since the JS doesn't understand redirects.\n if not request.user:\n # Don't log this, spammers have started abusing this.\n return HttpTextResponse('Not logged in')\n snapshot = request.POST.get('snapshot')\n assert snapshot in ('old', 'new'), repr(snapshot)\n left = (snapshot == 'old')\n side = request.POST.get('side')\n assert side in ('a', 'b'), repr(side) # Display left (a) or right (b)\n issue_id = int(request.POST['issue'])\n issue = models.Issue.get_by_id(issue_id)\n assert issue # XXX\n patchset_id = int(request.POST.get('patchset') or\n request.POST[side == 'a' and 'ps_left' or 'ps_right'])\n patchset = models.PatchSet.get_by_id(int(patchset_id), parent=issue.key)\n assert patchset # XXX\n patch_id = int(request.POST.get('patch') or\n request.POST[side == 'a' and 'patch_left' or 'patch_right'])\n patch = models.Patch.get_by_id(int(patch_id), parent=patchset.key)\n assert patch # XXX\n text = request.POST.get('text')\n lineno = int(request.POST['lineno'])\n message_id = request.POST.get('message_id')\n comment = _add_or_update_comment(user=request.user, issue=issue, patch=patch,\n lineno=lineno, left=left,\n text=text, message_id=message_id)\n issue.calculate_draft_count_by_user()\n issue_fut = issue.put_async()\n\n query = models.Comment.query(\n models.Comment.patch_key == patch.key, models.Comment.lineno == lineno,\n models.Comment.left == left).order(models.Comment.date)\n comments = list(c for c in query if not c.draft or c.author == request.user)\n if comment is not None and comment.author is None:\n # Show anonymous draft even though we don't save it\n comments.append(comment)\n issue_fut.get_result()\n if not comments:\n return HttpTextResponse(' ')\n for c in comments:\n c.complete()\n return render_to_response('inline_comment.html',\n {'user': request.user,\n 'patch': patch,\n 'patchset': patchset,\n 'issue': issue,\n 'comments': comments,\n 'lineno': lineno,\n 'snapshot': snapshot,\n 'side': side,\n },\n context_instance=RequestContext(request))", "def action_view_invoice_salon(self):\n return {\n 'name': 'Invoices',\n 'domain': [('invoice_origin', '=', self.name)],\n 'res_model': 'account.move',\n 'view_id': False,\n 'view_mode': 'tree,form',\n 'type': 'ir.actions.act_window',\n }", "def test_duplicate_invoices(self):\n biling_info = baker.make(BillingInfo)\n for i in range(20): # Try this more times to increase chance of failure\n order = baker.make(Order, user=biling_info.user)\n Invoice.objects.all().delete()\n call_concurrently(3, complete_concrete_order, order_id=order.id)\n self.assertEqual(Invoice.objects.count(), 1)", "def get_partial_books(self, fb_brw):\n cr, uid = self.cr, self.uid\n fb_obj = self.pool.get('fiscal.book')\n if not fb_brw.fbl_ids:\n return [{'init': [], 'lines': [], 'partial_total': []}]\n\n line_groups = self.get_line_groups(fb_brw)\n last_page = len(line_groups)\n if last_page == 1:\n # self._print_book(\n # [], line_groups[0].get('report_lines'), [], line_groups[0])\n return [{'init': [],\n 'lines': line_groups[0].get('report_lines'),\n 'partial_total': []}]\n res = []\n for subgroup in line_groups:\n cr.execute('SAVEPOINT report_original_fb_' + str(\n subgroup.get('page')))\n inv_ids = [line.get('invoice_id')[0]\n for line in subgroup.get('report_lines')\n if line.get('invoice_id')]\n self.context.update(\n call_from_report=1, report_group_inv_ids=inv_ids)\n fb_obj.update_book(cr, uid, [fb_brw.id], context=self.context)\n fb_brw.refresh()\n begin_line = self.get_begin_line(res)\n partial_total = (subgroup.get('page') != last_page and\n self.get_partial_total(fb_brw, begin_line) or [])\n res.append({'init': begin_line,\n 'lines': subgroup.get('report_lines'),\n 'partial_total': partial_total})\n # self._print_book(\n # begin_line, subgroup.get('report_lines'), partial_total,\n # subgroup)\n cr.execute('ROLLBACK TO SAVEPOINT report_original_fb_' + str(\n subgroup.get('page')))\n return res", "def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None):\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None)\n \n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result", "def UpdateInvoice(self):\n self.builder.get_object('GuiInvProd').get_buffer().set_text(\"Product:\\n\")\n self.builder.get_object('GuiInvPrice').get_buffer().set_text(\"Price:\\n\")\n self.amount = 0\n for items in self.prod_list:\n self.builder.get_object('GuiInvProd').get_buffer().insert_at_cursor(\n u\"%s\\n\" % items['name'])\n if self.is_member:\n self.builder.get_object('GuiInvPrice').get_buffer().insert_at_cursor(\n config.CURRENCY_SYMBOL + u\"%.2f\\n\" % items[2])\n self.amount = self.amount + items[2]\n else:\n self.builder.get_object('GuiInvPrice').get_buffer().insert_at_cursor(\n config.CURRENCY_SYMBOL + u\"%.2f\\n\" % items[3])\n self.amount = self.amount + items[3]\n if self.is_member:\n self.builder.get_object('GuiInvProd').get_buffer().insert_at_cursor(\n u\"\\nYou are a member.\")\n self.builder.get_object('GuiTotal').set_text(config.CURRENCY_SYMBOL + u\"%.2f\" % self.amount)\n self.builder.get_object('GuiInput').set_text(\"\")", "def unlink(self, cr, uid, ids, context=None):\n payenrich = self.read(cr, uid, ids, ['state'], context=context)\n for s in payenrich:\n if s['state'] not in ['draft', 'cancel']:\n raise osv.except_osv(_('Invalid Action Error'), _('In Order To Delete A Service Request Order(s), It Must Be Cancelled First!'))\n return super(payment_enrich, self).unlink(cr, uid, ids, context=context)", "def _prepare_invoice(self):\n # get current logged in user's timezone\n local = pytz.timezone(self.env['res.users'].browse(self._uid).tz) or pytz.utc\n\n self.ensure_one()\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1).id\n if not journal_id:\n raise UserError(_('Please define an accounting purchase journal for this company.'))\n invoice_vals = {\n 'name': self.partner_ref or '',\n 'origin': self.name,\n 'type': 'in_invoice',\n 'account_id': self.partner_id.property_account_payable_id.id,\n 'partner_id': self.partner_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.currency_id.id,\n 'comment': self.notes,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'purchase_id': self.id,\n 'date_invoice':pytz.utc.localize(datetime.datetime.now()).astimezone(local).strftime('%Y-%m-%d'),\n }\n return invoice_vals", "def generate_eob(\n self, date_of_service, date_of_eob, insured, invoice_id, cpt_code, charge_amount\n ):\n if insured == \"insured\":\n # first copayments\n copay_amount = np.random.choice(\n self.distributions[\"copay_amounts\"],\n 1,\n p=self.distributions[\"copay_distribution\"],\n )[0]\n if copay_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_service],\n \"copay_amount\": [copay_amount],\n \"adjustment_amount\": [0],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = charge_amount - copay_amount\n else:\n remaining_charge = charge_amount\n # next eob discounts\n eob_discount_percent = np.random.choice(\n self.distributions[\"eob_discount_percentages\"],\n 1,\n p=self.distributions[\"eob_discount_distribution\"],\n )[0]\n if eob_discount_percent > 0:\n insurance_adjustment = remaining_charge * eob_discount_percent / 100\n remaining_charge = remaining_charge - insurance_adjustment\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [insurance_adjustment],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n # next handle eob payments where relevant\n eob_payment_percentage = np.random.choice(\n self.distributions[\"eob_payment_percentages\"],\n 1,\n p=self.distributions[\"eob_payment_distribution\"],\n )[0]\n eob_payment_amount = remaining_charge * (eob_payment_percentage / 100.0)\n if eob_payment_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [0],\n \"paid_amount\": [eob_payment_amount],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = remaining_charge - eob_payment_amount\n else:\n remaining_charge = charge_amount\n return remaining_charge" ]
[ "0.6526457", "0.60499185", "0.6039584", "0.5971947", "0.5949904", "0.5877273", "0.58699375", "0.5868027", "0.5864572", "0.57496387", "0.56165034", "0.5592037", "0.55525774", "0.5516271", "0.5495002", "0.5483541", "0.5412485", "0.5412102", "0.53373694", "0.53041404", "0.5290604", "0.52692235", "0.5256279", "0.5249647", "0.5236856", "0.5236116", "0.52341986", "0.52206016", "0.5200139", "0.51388955", "0.51388496", "0.5122479", "0.51047635", "0.5051272", "0.50419605", "0.502181", "0.50170165", "0.5007705", "0.4998799", "0.49985367", "0.49881557", "0.49792898", "0.4976857", "0.49700484", "0.49641338", "0.49258026", "0.4922015", "0.49101573", "0.4909953", "0.4897923", "0.48913023", "0.48867247", "0.48777372", "0.4866811", "0.48238346", "0.481972", "0.48188823", "0.48106176", "0.48021778", "0.4791925", "0.47826585", "0.4778795", "0.4778554", "0.47749498", "0.4773191", "0.4770609", "0.47567505", "0.47523278", "0.4747633", "0.47409788", "0.4739125", "0.46946192", "0.46899083", "0.46645722", "0.46645567", "0.46550095", "0.46446663", "0.46387267", "0.46279615", "0.46250018", "0.46225634", "0.4588386", "0.45874342", "0.45742422", "0.4564728", "0.45570424", "0.45518747", "0.4543182", "0.45393336", "0.45365804", "0.45352572", "0.45351407", "0.45314208", "0.45305064", "0.45167252", "0.4512364", "0.45113882", "0.45081094", "0.45001996", "0.44958174" ]
0.7914905
0
r"""Return the standard path to the shared area on the current platform.
def shared_area_path() -> str: try: return os.environ["OITG_SHARED_AREA"] except KeyError: pass if os.name == "nt": # Windows return "Z:\\" if os.name == "unix" or os.name == "posix": # Linux / OSX / ... return os.path.expanduser("~/steaneShared/") raise Exception("Unknown OS")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_share_path():\n cwd = os.path.dirname(__file__)\n share = os.path.join(cwd, '../share')\n return os.path.abspath(share)", "def path_share(self) -> Path:\n return self.path_supervisor / SHARE_DATA", "def get_path(self):\n\t\treturn call_sdk_function('PrlShare_GetPath', self.handle)", "def path_extern_supervisor(self) -> PurePath:\n return PurePath(os.environ[ENV_SUPERVISOR_SHARE])", "def path_extern_mounts(self) -> PurePath:\n return self.path_extern_supervisor / MOUNTS_FOLDER", "def localPath(self):\n return self.home", "def _get_mount_path(self, connection_info):\n share = self._normalize_export(connection_info['data']['export'])\n return os.path.join(self._get_mount_point_base(),\n utils.get_hash_str(share))", "def path_extern_share(self) -> PurePath:\n return PurePath(self.path_extern_supervisor, SHARE_DATA)", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def path_config(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_INT)", "def _getSocketPath():\n return f'{CmdSsh._getSshDir()}/soos-%r@%h-%p'", "def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")", "def path(cls):\n from os.path import sep, join, exists\n from os import environ\n return join(environ.get(\"SystemRoot\", join(\"C:\", sep, \"Windows\")), \"System32\", \"mpclaim.exe\")", "def get_box_pathway():\n import os\n import sys\n sys.dont_write_bytecode = True\n user_env = os.environ['USERPROFILE']\n os.chdir(user_env)\n directory_list = os.listdir(user_env)\n Box_boolean = 'Box' in directory_list\n Box_Sync_boolean = 'Box Sync' in directory_list\n if Box_boolean is False and Box_Sync_boolean is False:\n raise ValueError('Box or Box Sync is not in your pathway')\n elif Box_boolean is True and Box_Sync_boolean is True:\n raise ValueError('Program does not know whether to distinguish Box or Box Sync')\n else:\n if Box_boolean is True:\n return user_env + '\\Box\\\\'\n elif Box_Sync_boolean is True:\n return user_env + '\\Box Sync\\\\'", "def get_home_path(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetHomePath', self.handle)", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def os_path(self, **kw):\n with_drive = kw.get(\"with_drive\", True)\n if os.name == \"nt\":\n return self.windows_path(with_drive=with_drive)\n return self.posix_path(with_drive=with_drive)", "def _get_os_path(self, name=None, path=''):\n\t\t\n\t\tif self.notebook_dir:\n\t\t\tout_path =os.path.join( self.notebook_dir, path.lstrip('/'))\n\t\telse:\n\t\t\tout_path = path\n\t\t\n\t\tif name:\n\t\t\tout_path = os.path.join(out_path, name.lstrip('/'))\n\t\t\n\t\treturn out_path", "def _get_local_share_path(self, share):\n local_vol_path = self._get_mount_point_for_gluster_vol()\n if not os.access(local_vol_path, os.R_OK):\n raise exception.GlusterfsException('share path %s does not exist' %\n local_vol_path)\n return os.path.join(local_vol_path, share['name'])", "def system_path(self, path):\n return os.path.join(self.prefix, path.lstrip('/'))", "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "def get_home():\n try:\n return str(Path.home())\n except Exception:\n return None", "def get_absolute_pathname(self):\n return os.path.join(settings.PRIVATE_STORAGE_ROOT, self.get_relative_pathname())", "def path(self):\r\n return self.chroot", "def path(self):\r\n return self.chroot", "def get_standard_directory(self):\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')", "def _get_mount_point_for_share(self, nfs_share):\n nfs_share = nfs_share.encode('utf-8')\n return os.path.join(self.configuration.nexenta_mount_point_base,\n md5(nfs_share, usedforsecurity=False).hexdigest())", "def GetPath(self):\n sdk_root = config.Paths().sdk_root\n if not sdk_root:\n raise NoSdkRootError()\n return os.path.join(sdk_root, self.name)", "def realPath(self):\n \n return (self.useLink and [self.linkPath] or [self.installPath])[0]", "def get_plato_path():\n\treturn \"/tsi/\"", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def getRootPath()->str:\n if '--develop' in sys.argv:\n return eel._get_real_path('public') + '/'\n\n return eel._get_real_path('build') + '/'", "def full_path(self) -> str:\n return self.workspace.get_full_path(self)", "def _GetLibraryPath(self, platform, backupPlatform=''):\n if platform == Environment.GetPlatform() or \\\n (backupPlatform and backupPlatform == Environment.GetPlatform()):\n return os.path.split(self._libraryPath)[1]\n return ''", "def home_directory(self):\n out = self._call(\"GETHOMEDIRECTORY\")\n return out.json()[\"Path\"]", "def shared_binary_location(cmd=\"shared\"):\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)", "def path(self):\n return self.chroot", "def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath", "def get_path(self):\n return self.sync_path", "def absolute_physical_path(self) -> str:\n return self._path", "def work_root(session):\n return session[\"AVALON_WORKDIR\"]", "def CoreDirectory():\n thisDir=WindowsPath(__file__).parent.resolve()\n # print(f\"this dir {thisDir}\")\n coreDir=thisDir.parent/\"MacroSystem/core\"\n return coreDir", "def get_current_path(self):\r\n path_2 = self.path.replace('\\\\', '/')\r\n return self.copy_to_clipboard(path_2)", "def standard_path_from_server_root(self, arg: str) -> str:\n # Remove beginning and ending quotes\n arg = arg.strip(\"'\\\"\")\n\n # Standardize the include argument based on server root\n if not arg.startswith(\"/\"):\n # Normpath will condense ../\n arg = os.path.normpath(os.path.join(self.root, arg))\n else:\n arg = os.path.normpath(arg)\n return arg", "def get_default_mc_loc():\n if platform.system() == 'Windows': # Windows\n return join_path(os.getenv('APPDATA'), '.minecraft') # C:\\Users\\<user name>\\AppData\\Roaming\n elif platform.system() == 'Darwin': # MacOS\n return join_path(os.getenv('HOME'), 'Library', 'Application Support', 'minecraft') # /home/<user name>/Library/Application Support/minecraft\n else: # Linux\n return join_path(os.getenv('HOME'), '.minecraft') # /home/<user name>/.minecraft", "def _get_reporoot():\n from os import path\n import acorn\n medpath = path.abspath(acorn.__file__)\n return path.dirname(path.dirname(medpath))", "def _getSshDir():\n return f'{Path.home()}/.ssh'", "def get_full_path(self):\n return self.path_display", "def masterPath(self):\n\t\treturn fl.File( self._path + '/master.data' )", "def get_output_path():\n\n path = rs.DocumentPath()\n name = rs.DocumentName()\n \n if gc.operating_system == \"mac\":\n\n path = path[:-len(name)] + \"_system.dat\"\n\n elif gc.operating_system == \"win\":\n\n i = path.rfind(\"\\\\\")\n\n path = path[:i] + \"/_system.dat\" \n\n return path", "def this_folder():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n return os.path.dirname(__file__)", "def platform_data_dir():\n if POSIX: # nocover\n dpath_ = os.environ.get('XDG_DATA_HOME', '~/.local/share')\n elif DARWIN: # nocover\n dpath_ = '~/Library/Application Support'\n elif WIN32: # nocover\n dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')\n else: # nocover\n raise NotImplementedError('Unknown Platform %r' % (sys.platform,))\n dpath = normpath(expanduser(dpath_))\n return dpath", "def _get_mount(self):\n if not self._mount.endswith(os.path.sep):\n return \"%s%s\" % (self._mount, os.path.sep)\n else:\n return self._mount", "def kard_folder_path(self):\n if self._base_path is None:\n if is_running_in_docker():\n container_id = os.popen(\n 'cat /proc/self/cgroup | grep docker | '\n 'grep -o -E \"[0-9a-f]{64}\" | head -n 1').read().rstrip()\n cli = docker.DockerClient(version='auto')\n cont = cli.containers.get(container_id)\n mount = next((\n c for c in cont.attrs['Mounts']\n if c['Destination'] == str(get_kard_root_path())))\n self._base_path = Path(mount['Source'])\n else:\n self._base_path = Path(self.kard.path).parent\n return self._base_path", "def get_path(self, path):\n if path.startswith('/') and not path.startswith('~/'):\n return os.getcwd() + '/' + path\n else:\n return path", "def AppPath(self):\n\t\treturn self.acad.Path", "def path_mounts(self) -> Path:\n return self.path_supervisor / MOUNTS_FOLDER", "def abspath(self, ref):\n \n directory, path = get_location(self.directory, ref.strip(),\n current=dirname(self.relative))\n path = join_fb_root(join(directory, path))\n return path", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def workspace_path(self):\n return self._path_temp", "def serverRootLocation():\n plist = \"/Library/Server/Preferences/Calendar.plist\"\n serverRoot = u\"/Library/Server/Calendar and Contacts\"\n if os.path.exists(plist):\n serverRoot = plistlib.readPlist(plist).get(\"ServerRoot\", serverRoot)\n return serverRoot", "def path(self):\n if self._path:\n return self._path\n path = os.environ[\"PATH\"].split(os.pathsep)\n path = [os.path.expanduser(x) for x in path]\n path = [os.path.abspath(x) for x in path]\n path = [x for x in path if os.path.exists(x)]\n self._path = path\n return self._path", "def get_root_abs_path() -> pathlib.Path:\n return PathManager._ROOT", "def key_path(self):\n keypath = self._get_field('System', 'keypath')\n localpath = \"/\".join(__file__.split('/')[:-1])\n return join(localpath, keypath)", "def path(self):\n return self.lib.path", "def _spdr_engine_location():\n return os.path.realpath(__file__).rpartition('/')[0]", "def abspath(self, *args):\n return os.path.join(self._spool, *args)", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def root_path(self) -> Path:\n return ARCHIVES_ROOT / self.source_name / self.key", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def path_extern_homeassistant(self) -> PurePath:\n return PurePath(self.path_extern_supervisor, HOMEASSISTANT_CONFIG)", "def server_relative_path(self):\n return self.properties.get(\"ServerRelativePath\", SPResPath(None))", "def fs_sis_shared_space(self):\n return self._fs_sis_shared_space", "def cwd_for_path(self, path):\n os_path = to_os_path(path, self.root_dir)\n # in the case of notebooks and kernels not being on the same filesystem,\n # walk up to root_dir if the paths don't exist\n while not os.path.isdir(os_path) and os_path != self.root_dir:\n os_path = os.path.dirname(os_path)\n return os_path", "def platform_cache_dir():\n if POSIX: # nocover\n dpath_ = os.environ.get('XDG_CACHE_HOME', '~/.cache')\n elif DARWIN: # nocover\n dpath_ = '~/Library/Caches'\n elif WIN32: # nocover\n dpath_ = os.environ.get('LOCALAPPDATA', '~/AppData/Local')\n else: # nocover\n raise NotImplementedError('Unknown Platform %r' % (sys.platform,))\n dpath = normpath(expanduser(dpath_))\n return dpath", "def platform_config_dir():\n if POSIX: # nocover\n dpath_ = os.environ.get('XDG_CONFIG_HOME', '~/.config')\n elif DARWIN: # nocover\n dpath_ = '~/Library/Application Support'\n elif WIN32: # nocover\n dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')\n else: # nocover\n raise NotImplementedError('Unknown Platform %r' % (sys.platform,))\n dpath = normpath(expanduser(dpath_))\n return dpath", "def _get_filesystem_path(self, request):\n return self._get_path(filesystem_path(self.base_path, request, self.url_base), False)", "def _windows_seps(path: str) -> str:\n\n if not path:\n return None\n elif os.sep != ntpath.sep:\n return path.replace(os.sep, ntpath.sep)\n else:\n return path", "def path_homeassistant(self) -> Path:\n return self.path_supervisor / HOMEASSISTANT_CONFIG", "def path():\n # Exclude path to this script from path.\n this_file = os.path.realpath(__file__)\n this_path = os.path.dirname(this_file)\n return os.pathsep.join(p for p in sys.path if p != this_path)", "def localdir():\n root = __file__\n if os.path.islink(root):\n root = os.path.realpath(root)\n directory = os.path.dirname(os.path.abspath(root))\n return os.path.normpath(os.path.join(directory, \"../settings/\"))", "def local_system_path(self, resource=None, path=None):\n p = os.path.join(config['resources']['data-dir'])\n if resource:\n if resource not in self._resources:\n raise InvalidPath(\"Invalid resource {}\".format(resource))\n p = os.path.join(config['resources']['data-dir'],\n self._resources[resource].local_dir)\n if path is not None:\n p = os.path.join(p, path)\n\n return os.path.expanduser(p)", "def local_path(self):\n if self.repo_path:\n return self.repo_path\n tmpdir = PurePath(tempfile.gettempdir())\n return str(tmpdir.joinpath('harvest', self.org, self.repo))", "def remote_path(self, volume):\n nfs_share = volume['provider_location']\n share = nfs_share.split(':')[1].rstrip('/')\n return '%s/%s/volume' % (share, volume['name'])", "def get_mount_point(path):\n\n path = os.path.abspath(path)\n while path != os.path.sep:\n if os.path.ismount(path):\n return path\n path = os.path.abspath(os.path.join(path, os.pardir))\n return path", "def getRootPath():\n return '/'.join(__file__.split('/')[:-4]) # Path of this file with pagebot/__init__.py(c) removed.", "def get_home_directory(self, user: str) -> str:\n process = self.run(\n \"/\",\n \"root\",\n [\"sh\", \"-c\", f\"realpath ~{user}\"],\n encoding=\"utf-8\",\n stdout=subprocess.PIPE,\n )\n return process.stdout.strip()", "def _win_documents_path():\n # Accesses the Windows API via ctypes\n import ctypes\n import ctypes.wintypes\n\n CSIDL_PERSONAL = 0x0005\n dll = ctypes.windll.shell32\n buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH + 1)\n if dll.SHGetSpecialFolderPathW(None, buf, CSIDL_PERSONAL, False):\n return buf.value.encode()\n else:\n raise ValueError", "def home():\n if sys.prefix == sys.exec_prefix:\n return sys.prefix\n else:\n return ':'.join((sys.prefix, sys.exec_prefix))", "def _get_home():\n from os.path import expanduser\n home = expanduser('~')\n return home", "def systemdir():\n if platform == 'windows':\n return os.path.join(os.environ['ProgramFiles'], 'automaton')\n else:\n return \"/etc/automaton/\"", "def _get_rc_local_path( self ):\n # might be a symlink but prepend_remote_shell_script doesn't work with symlinks\n return sudo( 'readlink -f /etc/rc.local' )", "def get_singularity_path():\n return os.path.join(get_appdata(), 'CCP\\\\EVE\\\\d_eve_sharedcache_sisi_singularity')", "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def _extract_system_path(self, script):\r\n\r\n DEFAULT_PATH = ['code']\r\n\r\n # Separate paths by :, like the system path.\r\n raw_path = script.get('system_path', '').split(\":\") + DEFAULT_PATH\r\n\r\n # find additional comma-separated modules search path\r\n path = []\r\n\r\n for dir in raw_path:\r\n if not dir:\r\n continue\r\n\r\n # path is an absolute path or a path relative to the data dir\r\n dir = os.path.join(self.capa_system.filestore.root_path, dir)\r\n # Check that we are within the filestore tree.\r\n reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)\r\n if \"..\" in reldir:\r\n log.warning(\"Ignoring Python directory outside of course: %r\", dir)\r\n continue\r\n\r\n abs_dir = os.path.normpath(dir)\r\n path.append(abs_dir)\r\n\r\n return path", "def get_root_folder() -> Path:\n return Path(__file__).parent.parent", "def pathtofolder():\n return os.getcwd()", "def site_section(self) -> str:\n match = PATH_FORMAT.match(self.path)\n if match:\n return match.group(\"base_path\")\n return \"\"" ]
[ "0.75354296", "0.6952207", "0.67871875", "0.67391086", "0.67256176", "0.6657467", "0.6635167", "0.661767", "0.66105354", "0.6436675", "0.6340287", "0.6331047", "0.63205075", "0.6297639", "0.62504154", "0.6217222", "0.6186836", "0.6157988", "0.61453235", "0.6119978", "0.61148596", "0.61036146", "0.60967356", "0.6091961", "0.60649556", "0.60649556", "0.60303104", "0.6011863", "0.6002475", "0.60024244", "0.5988673", "0.598336", "0.59639025", "0.5961879", "0.5954032", "0.5944549", "0.5920802", "0.5920613", "0.5919807", "0.5910782", "0.59034544", "0.5899904", "0.58890384", "0.5888895", "0.5888649", "0.58835614", "0.58757544", "0.58746046", "0.58584017", "0.5857375", "0.58552134", "0.58534133", "0.58511883", "0.5850528", "0.5836711", "0.58365226", "0.58319575", "0.58283645", "0.5811796", "0.58033127", "0.58021015", "0.5796344", "0.57751685", "0.5769574", "0.5767842", "0.5766182", "0.57602924", "0.5759437", "0.5755839", "0.5754592", "0.5742297", "0.5740234", "0.5738132", "0.5736668", "0.5732669", "0.5725006", "0.57211155", "0.5719392", "0.57179236", "0.57061726", "0.56993985", "0.5693953", "0.56905967", "0.5686172", "0.56812525", "0.5676247", "0.5670134", "0.5662917", "0.56614316", "0.56512785", "0.5649088", "0.56465447", "0.56461966", "0.56454945", "0.5642816", "0.56415224", "0.56411886", "0.5638341", "0.56371224", "0.56368035" ]
0.85109854
0
Return the path to the given users analysis directory on the shared area (``/Users//analysis``).
def analysis_root_path(user: Optional[str] = None) -> str: if user is None: user = _get_user() return os.path.join(shared_area_path(), "Users", user, "analysis")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def todays_analysis_path(day: Optional[str] = None, user: Optional[str] = None) -> str:\n if day is None:\n day = date.today().isoformat()\n if user is None:\n user = _get_user()\n path = os.path.join(analysis_root_path(user=user), day)\n\n if not os.access(path, os.R_OK):\n # If the dir does not exist, create it\n os.mkdir(path)\n\n return path", "def dir_user(assignment, user):\n return os.path.join(repository, assignment, user)", "def dir_results(assignment, user):\n return os.path.join(repository, assignment, user, 'results')", "def get_home_directory(self, user: str) -> str:\n process = self.run(\n \"/\",\n \"root\",\n [\"sh\", \"-c\", f\"realpath ~{user}\"],\n encoding=\"utf-8\",\n stdout=subprocess.PIPE,\n )\n return process.stdout.strip()", "def analysis_path(\n project: str, location: str, conversation: str, analysis: str,\n ) -> str:\n return \"projects/{project}/locations/{location}/conversations/{conversation}/analyses/{analysis}\".format(\n project=project,\n location=location,\n conversation=conversation,\n analysis=analysis,\n )", "def getFSUserDir(self):\n if not self.authorised:\n raise AuthError(401,\"I am sorry, but you are not authorised\")\n\n if self.authJson[\"userInfo\"] and self.authJson[\"userInfo\"][\"screenName\"]:\n fsDir = self.config.get(\"FileMan\",\"homedir\") + self.authJson[\"userInfo\"][\"screenName\"]\n return fsDir\n else: \n raise AuthError(500, \"Cannot determine the working directory - Liferay did not provide user's screenName\")", "def getUserDir() -> str:\n\n if os.name == \"nt\": # Windows system, try to return documents directory\n try:\n import ctypes.wintypes\n CSIDL_PERSONAL = 5 # Documents\n SHGFP_TYPE_CURRENT = 0 # Current value\n\n buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)\n ctypes.windll.shell32.SHGetFolderPathW(0, CSIDL_PERSONAL, 0, SHGFP_TYPE_CURRENT, buf)\n\n return buf.value\n except ImportError:\n pass\n\n return os.path.expanduser(\"~\") # Non-Windows system, return home directory", "def getRelativeRootExperimentPath(self):\n return userId + \"/\" + \\\n self._rootExportPath[self._rootExportPath.rfind(self._properties['export_dir']):]", "def get_share_path():\n cwd = os.path.dirname(__file__)\n share = os.path.join(cwd, '../share')\n return os.path.abspath(share)", "def path_to_data():\n jab = os.expanduser('~/jab')\n return os.path.join(jab, 'local/login_sums.csv')", "def user(path = None):\n if path:\n return \"%s/%s\" % (expanduser(\"~\"), path)\n else:\n return expanduser(\"~\")", "def shared_area_path() -> str:\n\n try:\n return os.environ[\"OITG_SHARED_AREA\"]\n except KeyError:\n pass\n\n if os.name == \"nt\": # Windows\n return \"Z:\\\\\"\n if os.name == \"unix\" or os.name == \"posix\": # Linux / OSX / ...\n return os.path.expanduser(\"~/steaneShared/\")\n raise Exception(\"Unknown OS\")", "def get_user_data_path():\n current_directory = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(current_directory, 'emergency_fund_info.json')", "def get_downloadpath(user_id):\r\n path = settings.DOCUMENT_PATH + str(user_id) + '/'\r\n if not os.path.isdir(path):\r\n os.mkdir(path)\r\n return path", "def getFSUserDir(self):\n\n return self.config.get(\"FileMan\",\"homedir\") + self.getRole()[\"roleName\"]", "def default_output_path():\n\n documents = os.path.join(os.path.expanduser('~'))\n try:\n documents = _xdg_documents_path()\n except: pass\n if platform.system() == 'Windows':\n try:\n documents = _win_documents_path()\n except: pass\n\n return os.path.join(documents, 'Topographica')", "def get_folder(self):\n return os.path.join(\n settings.PRIVATE_STORAGE_ROOT, Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2])", "def get_teamocil_dir() -> pathlib.Path:\n return pathlib.Path(\"~/.teamocil/\").expanduser()", "def get_game_dir(self, username=False):\n if not self.common and not username:\n raise RuntimeError(\"Can't determine this game's directory without username\")\n if self.common:\n subdir = \"common\"\n else:\n subdir = \"username\"\n subsubdir = self.dir\n if WIN32 or CYGWIN:\n subsubdir = subsubdir.lower()\n return os.path.join(subdir, subsubdir)", "def share_directory(self):\n # Get the user to share file/folder with.\n share_user = User.query.filter_by(email = self.email.data).first()\n if not share_user:\n return\n\n # The source to copy to another user.\n filename = os.listdir(self.path.data)[int(self.index.data)]\n src = os.path.join(self.path.data, filename)\n # Get home path for the user to share folder with.\n dst = os.path.join(share_user.get_files_path(), filename)\n # Copy source to destination.\n copytree(src, dst)", "def get_base_data_directory(self):\n user_category = ''\n directory = ''\n\n if self.session_start_date:\n start_time = self.session_start_date.split(' ')[0] #.replace('-', '')\n else:\n # PL. To avoid mixing users directory if they restart the application\n # after midnight but before 8 AM, the directory date doesn't change:\n _local_time = time.localtime()\n if _local_time[3] > 7:\n start_time = time.strftime(\"%Y-%m-%d\")\n else:\n # substract 8 hourse to current date to get yesterday's date. \n _local_time = time.gmtime((time.time() - 8*60*60))\n start_time = time.strftime(\"%Y-%m-%d\", _local_time)\n\n if self.is_inhouse():\n #directory = os.path.join(self.base_directory, self.endstation_name,\n # self.get_user_category(), self.get_proposal(),\n # start_time)\n directory = os.path.join(self.base_directory, start_time, self.proposal_number, self.get_proposal_number())\t \n else:\n #directory = os.path.join(self.base_directory, self.get_user_category(),\n # self.get_proposal(), self.endstation_name,\n # start_time)\n logging.debug(\"SoleilSession self.base_directory %s\" % self.base_directory)\n logging.debug(\"SoleilSession start_time %s\" % start_time)\n logging.debug(\"SoleilSession self.proposal_number %s\" % self.proposal_number)\n logging.debug(\"SoleilSession self.get_proposal_number() %s\" % self.get_proposal_number())\n directory = os.path.join(self.base_directory, start_time, self.get_proposal_number())\n\n return directory", "def user_home_path(self):\n return path.join(env.user_home, self._user_home_path)", "def _get_storage_path(self, stream_name:str=None, version:int=None, user_id:str=None):\n\n dirpath = self.data_path+\"study=\"+self.study_name+\"/\"\n\n if stream_name:\n dirpath += \"stream={0}/\".format(stream_name)\n\n if version:\n if \"stream=\" not in dirpath:\n raise ValueError(\"stream_name argument is missing.\")\n else:\n dirpath += \"version={0}/\".format(str(version))\n\n if user_id:\n if \"stream=\" not in dirpath or \"version=\" not in dirpath:\n raise ValueError(\"stream_name and/or version arguments are missing.\")\n else:\n dirpath += \"user={0}/\".format(user_id)\n\n return dirpath", "def get_storage_directory(username=None):\n\n return os.path.join(get_home_dir(username), '.cfy-agent')", "def get_disassembler_user_directory(self):\n pass", "def get_home_dir(self, username):\n user = connection.User.find_one({'email': str(username) })\n return str(user['_id'])", "def get_path(self, path):\n if path.startswith('/') and not path.startswith('~/'):\n return os.getcwd() + '/' + path\n else:\n return path", "def home_directory(self):\n out = self._call(\"GETHOMEDIRECTORY\")\n return out.json()[\"Path\"]", "def getRootDirectory(self):\n if Globals.WORKFLOWS_BASEDIR[0] == '~':\n return os.path.expanduser(Globals.WORKFLOWS_BASEDIR)\n else:\n return os.path.join('', Globals.WORKFLOWS_BASEDIR)", "def get_data_dir():\n return Path(current_app.config[\"USER_DIR\"]) / \"data\"", "def subOfUserFilesDir(subdir: str) -> str:\n return os.path.join(os.getcwd(), _baseUserFilesDir, subdir)", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def pathtofolder():\n return os.getcwd()", "def user_conf_dir(self):\n return os.path.join(BaseDirectory.xdg_config_home, \"speech-dispatcher\")", "def localpath(self, *args):\n return os.path.join(os.path.expanduser(self.serverfiles_dir), *args)", "def personaldir():\n if platform == 'windows':\n return os.path.join(os.environ['APPDATA'], 'automaton')\n else:\n return os.path.expanduser('~/.automaton/')", "def _GetOwnersFilePath(path):\n if _IsWellFormattedFilePath(path):\n # _SRC is removed because the file system on the machine running the code\n # may not have a(n) src directory.\n path_without_src = path[len(SRC):]\n\n return os.path.abspath(\n os.path.join(*(DIR_ABOVE_TOOLS + path_without_src.split(os.sep))))\n\n raise Error(\n 'The given path {} is not well-formatted. Well-formatted paths begin '\n 'with \"src/\" and end with \"OWNERS\"'.format(path))", "def load_analysis_path():\n import json\n import os\n with open(os.path.join(os.path.dirname(__file__), \"analysis_config.json\")) as my_file:\n analysis_paths = json.load(my_file)\n return analysis_paths", "def artiq_results_path(experiment: Optional[str] = None) -> str:\n\n path = os.path.join(shared_area_path(), \"artiqResults\")\n\n if experiment is None:\n try:\n experiment = os.environ[\"OITG_EXPERIMENT\"]\n except KeyError:\n raise Exception(\n \"No experiment supplied, and no OITG_EXPERIMENT environment key\")\n\n return os.path.join(path, experiment)", "def get_home_dir(self, username):\n return self.user_table[username]['home']", "def get_user_homedir():\n return os.path.expanduser(\"~\")", "def working_directory(self):\n project_full_path = \"/\".join(self.file_name.split(\"/\")[:-1])\n file_name = self.file_name.split(\"/\")[-1]\n if \".h5\" in file_name:\n file_name = file_name.split(\".h5\")[0]\n file_name += \"_hdf5\"\n if self.h5_path[0] == \"/\":\n h5_path = self.h5_path[1:]\n else:\n h5_path = self.h5_path\n return posixpath.join(project_full_path, file_name, h5_path)", "def get_absolute_pathname(self):\n return os.path.join(settings.PRIVATE_STORAGE_ROOT, self.get_relative_pathname())", "def _get_project_dir(self):\n return os.path.expanduser(\n self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"project_dir\")\n )\n or os.getcwd()\n )", "def get_user_data_dir_name(self):\n return self._user_data_dir.name", "def output_path():\n folder = path.join(path.curdir, \"stages\")\n folder = path.abspath(folder)\n return ensure_path(folder)", "def get_data_folder_path(challenge_short_name):\n return safe_join(settings.MEDIA_ROOT, challenge_short_name)", "def work_root(session):\n return session[\"AVALON_WORKDIR\"]", "def profile_directory_path(request, file):\n return directory_path('profile', file)", "def outputFilepath(self, filename):\n return os.path.expanduser('~') + '/' + filename", "def user_directory_path(instance, filename: str) -> str:\n\n # File will be uploaded to MEDIA_ROOT/user_<id>/<filename>\n return 'user_{0}/{1}'.format(instance.profile.user.pk, filename)", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'visual_genome')", "def path_share(self) -> Path:\n return self.path_supervisor / SHARE_DATA", "def format_path (in_path):\n return os.path.realpath(os.path.expanduser(in_path))", "def get_data_path():\n\n # Get pathname absolute or relative.\n path = os.path.join(\n os.path.dirname(__file__), __malstor_data_directory__)\n\n abs_data_path = os.path.abspath(path)\n if not os.path.exists(abs_data_path):\n raise project_path_not_found\n\n return abs_data_path", "def _normalized_path(path):\n return os.path.abspath(os.path.expanduser(path))", "def _get_config_path():\n return os.path.join(os.path.expanduser('~'))", "def user_data_path(repo_base, repo='', file_name='', file_format=None):\n if len(repo_base) == 0:\n raise ValueError('Invalid repo_base.')\n if len(file_name) > 0 and len(repo) == 0:\n raise ValueError('Must pass in repo when providing file_name.')\n parts = [repo_base, repo, file_name]\n for p in parts:\n if (not isinstance(p, six.string_types) or p.startswith('.')):\n raise ValueError('Invalid path component.')\n path = os.path.abspath(os.path.join(os.sep, 'user_data', *parts))\n\n if file_format:\n if re.match('[^0-9a-zA-Z_-]', file_format):\n raise ValueError('Invalid file format specified.')\n path = '%s.%s' % (path, file_format)\n\n return path", "def get_upload_path(instance, filename):\n \n userpath = \"{name}/{file}\".format(name=instance.user.username, file=filename)\n mainpath = os.path.join(\"infocomp\",userpath)\n return mainpath", "def get_abs_path(self, value):\n return os.path.abspath(os.path.expanduser(os.path.expandvars(value)))", "def user_data_dir(data_dir, username):\n user_data_dir = os.path.join(data_dir, username)\n if not os.path.exists(user_data_dir):\n os.mkdir(user_data_dir)\n return user_data_dir", "def presentation(self, value):\r\n realpath = os.path.expanduser(value)\r\n if self.auto_create:\r\n if not os.path.exists(realpath):\r\n os.makedirs(realpath)\r\n return realpath", "def Directory(self) -> str:", "def data_abex_results_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / \"Results\"", "def sas_file(self):\n\n return os.path.normpath(self.path +'\\\\'+ cfg_dict['format_pgm'])", "def get_user_directory(username=None):\n home = expanduser(\"~\")\n\n yew_dir = os.path.join(home, STORAGE_DIR, username or get_username())\n if not os.path.exists(yew_dir):\n os.makedirs(yew_dir)\n return yew_dir", "def read_analysis(self,id):\n raw=requests.get(cfg_dict['analysis'] + id, auth=HttpNtlmAuth(cfg_dict['user'], cfg_dict['pass']))\n self.analysis=json.loads(raw.text)\n\n raw = self.analysis['directory'] + cfg_dict['format_loc']\n norm = os.path.normpath(raw)\n if os.path.exists(norm):\n self.path=norm", "def dataset_path(cls):\n basepath = os.path.dirname(__file__)\n filepath = os.path.abspath(\n os.path.join(basepath, \"..\", \"datasets\", get_env('DATA_FILENAME')))\n return filepath", "def get_report_path(self):\n report_path = os.path.join(logPath, \"report.html\")\n return report_path", "def full_path(path):\n return os.path.realpath(os.path.expandvars(os.path.expanduser(path)))", "def GetDataDir(self):\r\n \r\n sp = wx.StandardPaths.Get()\r\n return sp.GetUserDataDir()", "def _glob_precip_stats_dirpath(reanalysis):\n fmt = os.path.join( os.path.split( os.path.split( filepath[reanalysis]['path'] )[0] )[0], '????', '??')\n return fmt.format('PRECTOT')", "def get_output_path(self):\n output_path = '%s/%s' % (\n os.path.expanduser(JOB_OUTPUT_PATH), self.get_unique_name())\n return output_path", "def get_user_config_path():\n\n return \"{}/.dapsenv/dapsenv.conf\".format(expanduser(\"~\"))", "def user_directory(sub_directory):\n result = None\n directory = os.path.join(HOME, sub_directory)\n if os.path.exists(directory):\n result = directory\n return result", "def get_user_folder_link_xpath():\n return links['users_folder'].get('folder_xpath')", "def experiment_dir(experiment_name: str) -> Path: # pragma: no cover\n return EXPERIMENTS_DIR / experiment_name", "def localPath(self):\n return self.home", "def get_path():\n\n output_path = None\n while output_path is None:\n print question + \"Please enter the directory where you would like the file saved?\"\n output_path = raw_input()\n if os.path.isdir(os.path.expanduser(output_path)):\n pass\n else:\n os.system('clear')\n print warn + \"%s is not valid, please try again: \" % str(output_path)\n output_path = None\n return os.path.expanduser(output_path)", "def homedir():\n return os.path.expanduser('~')", "def _get_vispy_app_dir():\n # Define default user directory\n user_dir = os.path.expanduser('~')\n\n # Get system app data dir\n path = None\n if sys.platform.startswith('win'):\n path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')\n path = path1 or path2\n elif sys.platform.startswith('darwin'):\n path = os.path.join(user_dir, 'Library', 'Application Support')\n # On Linux and as fallback\n if not (path and os.path.isdir(path)):\n path = user_dir\n\n # Maybe we should store things local to the executable (in case of a\n # portable distro or a frozen application that wants to be portable)\n prefix = sys.prefix\n if getattr(sys, 'frozen', None): # See application_dir() function\n prefix = os.path.abspath(os.path.dirname(sys.path[0]))\n for reldir in ('settings', '../settings'):\n localpath = os.path.abspath(os.path.join(prefix, reldir))\n if os.path.isdir(localpath):\n try:\n open(os.path.join(localpath, 'test.write'), 'wb').close()\n os.remove(os.path.join(localpath, 'test.write'))\n except IOError:\n pass # We cannot write in this directory\n else:\n path = localpath\n break\n\n # Get path specific for this app\n appname = '.vispy' if path == user_dir else 'vispy'\n path = os.path.join(path, appname)\n return path", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def workspace_path(self):\n return self._path_temp", "def path_extern_supervisor(self) -> PurePath:\n return PurePath(os.environ[ENV_SUPERVISOR_SHARE])", "def get_path(user_file:str):\n\n path: List[UserMeasure] = []\n with open(user_file) as f:\n for line in f.readlines():\n data = line.split(\",\")\n path.append(UserMeasure(\n data[0], # Timestamp\n data[1], # LAC\n data[2], # ID\n data[3], # Zone\n ))\n return path", "def get_working_dir(self):\n return self.role.directory", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def localdir():\n root = __file__\n if os.path.islink(root):\n root = os.path.realpath(root)\n directory = os.path.dirname(os.path.abspath(root))\n return os.path.normpath(os.path.join(directory, \"../settings/\"))", "def output_path(self) -> str:\n if self._output_path is None:\n if not self._root_folder:\n self._root_folder = self._env.experiments_folder\n folder = os.path.join(self._root_folder, self.key)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._output_path = folder\n\n return self._output_path", "def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path", "def _get_default_path(self):\n # return os.path.join(datasets.ROOT_DIR, 'data', 'MSRC21')\n # set local path\n return u'/Users/danilonunes/workspace/datasets/msrc21/'", "def _fixpath(p):\n return os.path.abspath(os.path.expanduser(p))", "def pwd_expanduser ( fspath, uid ):\n if not fspath or fspath[0] != '~':\n return fspath\n elif len ( fspath ) < 2:\n return get_home_dir ( uid )\n elif fspath[1] == os.sep:\n return get_home_dir ( uid ) + fspath[1:]\n else:\n return fspath", "def user_plugin_dir() -> str:\n return os.path.join(user_data_dir(), 'plugins')", "def get_softfax_output_file_path(request):\n if request.config.getoption(\"--output\"):\n path = os.path.join(ma_misc.get_abs_path(\"/results\", False), request.config.getoption(\"--output\"))\n if not os.path.isdir(path[:path.rfind(\"/\")]):\n os.makedirs(path[:path.rfind(\"/\")])\n return path\n else:\n return os.path.join(pytest.session_result_folder, \"performance/sending_fax_result.xls\")", "def assignments_root_path(self):\n return os.path.join(self.root_path,\n self.assignments.model.__name__.lower())", "def fix_path(path):\n return os.path.abspath(os.path.expanduser(path))", "def _get_organisms_file_path(self, gene_name, gene_id):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"organisms\", \"{}_{}.txt\".format(gene_name, gene_id))", "def get_score_path(cfg):\n return os.path.join(\n get_score_dir(cfg),\n \"ener.csv\")", "def subOfUserConfigFilesDir(subdir: str) -> str:\n return os.path.join(os.getcwd(), _baseUserConfigFilesDir, subdir)" ]
[ "0.6710544", "0.66245717", "0.64282644", "0.61189187", "0.5994886", "0.5977346", "0.5951611", "0.59258217", "0.5919063", "0.5871089", "0.5855553", "0.5822797", "0.5740604", "0.5706992", "0.56850857", "0.5682241", "0.5660873", "0.5612151", "0.5608156", "0.5605027", "0.560351", "0.5599671", "0.55976796", "0.5572536", "0.5572053", "0.55658996", "0.5558749", "0.5538831", "0.5538418", "0.55361325", "0.55134887", "0.5495109", "0.5492528", "0.546889", "0.5468836", "0.54613924", "0.5454739", "0.5449682", "0.54358715", "0.54317933", "0.5404243", "0.54029673", "0.5377288", "0.5374815", "0.5372315", "0.5371117", "0.53704035", "0.5367823", "0.5351231", "0.534481", "0.5337228", "0.5332551", "0.53239787", "0.5308816", "0.5286404", "0.52862597", "0.5269704", "0.52566403", "0.52532613", "0.52476776", "0.52416587", "0.52261436", "0.52209073", "0.52190155", "0.5210911", "0.5209251", "0.52067137", "0.52065885", "0.52064747", "0.5205118", "0.5195897", "0.5191503", "0.5183327", "0.5179935", "0.5175128", "0.51716053", "0.5171361", "0.5171136", "0.51568735", "0.51509935", "0.51495004", "0.51481616", "0.51448977", "0.514441", "0.5138244", "0.5130007", "0.51293945", "0.5129009", "0.5124722", "0.51218593", "0.5120487", "0.5118856", "0.5118308", "0.5110514", "0.5109511", "0.51076853", "0.51072913", "0.5103638", "0.5102426", "0.51005954" ]
0.84479433
0
Return the path to the analysis directory for the given day, defaulting to today. The analysis directory is intended to be used as working space for analysing data while it is taken, so that the code can easily be found again later if the data or conclusions reached are reexamined. If the directory does not exist, it is created.
def todays_analysis_path(day: Optional[str] = None, user: Optional[str] = None) -> str: if day is None: day = date.today().isoformat() if user is None: user = _get_user() path = os.path.join(analysis_root_path(user=user), day) if not os.access(path, os.R_OK): # If the dir does not exist, create it os.mkdir(path) return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_day_data_path(self, days_ago=0):\n home = os.environ.get('USERPROFILE').replace('\\\\', '/')\n self.data_dir= os.path.join(home, 'TimeData')\n if not os.path.isdir(self.data_dir):\n mkdir(self.data_dir)\n today_filename = os.path.join(\n self.data_dir,\n (datetime.now()-timedelta(days=days_ago)).strftime('%Y-%m-%d.json'))\n return today_filename", "def filepath(day, ind):\n if ind!=\"TradeReport\" and ind!=\"OrderDetail\" and ind!=\"OrderHistory\":\n raise NameError(' ind must be either TradeReport or OrderDetail')\n \n elif day<1 or day>31 or type(day)!=int:\n raise TypeError('day must be an integer between 1 and 31')\n \n if day<10:\n day=\"0\"+str(day)\n else:\n day=str(day)\n \n path=\"/data/LSE_DATA/raw/T_\" + ind + \"_\"+ day +\"012008.csv/\" + \"t_\" + ind +\".csv\"\n\n return path", "def _get_new_measurement_path() -> pathlib.Path:\n today = strftime(\"%Y%m%d\")\n today_path = DATA_DIR / today\n new_path = get_unique_path(today_path, 'measurement_{:03d}')\n return new_path", "def journal_today():\n today = pendulum.today()\n LOG.debug('Today: %s', today)\n\n path = PATH_FORMAT.format(year=today.year, month=today.month, date=today.to_date_string())\n path = pathlib.Path(path).expanduser()\n LOG.debug('Path of today: %s', path)\n\n parent = path.parent\n if not parent.exists():\n parent.mkdir(parents=True, exist_ok=True)\n LOG.debug('Created new path: %s', parent)\n\n if not path.exists():\n shutil.copyfile(TEMPLATE_PATH, path)\n\n return path", "def prepare_folder(self) -> str:\n base_folder = self.config['info']['folder']\n today_folder = f'{datetime.today():%Y-%m-%d}'\n folder = os.path.join(base_folder, today_folder)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n return folder", "def _set_output_dir(self):\n return os.path.join(self.outputDir,\n datetime.datetime.utcnow().strftime(\"%Y%m%d\"))", "def _get_directory(self):\n directory = os.environ.get(\"EEMETER_WEATHER_CACHE_DIRECTORY\",\n os.path.expanduser('~/.eemeter/cache'))\n if not os.path.exists(directory):\n os.makedirs(directory)\n return directory", "def day_name():\n file_path = os.path.dirname(__file__)\n day_path = os.path.normpath(os.path.join(file_path, '..'))\n return os.path.basename(day_path)", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def calc_directory(init_dir, dbg=False):\n dt_str, _ = calc_date_time()\n dt_final = os.sep.join([init_dir, dt_str])\n\n dbc.print_helper((\"Dir: \" + dt_final), dbg=dbg)\n return dt_final, dt_str", "def get_dag_directory(self) -> str:\n if isinstance(self._dag_directory, Path):\n return str(self._dag_directory.resolve())\n else:\n return str(self._dag_directory)", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)", "def tmpdir(self):\n dir_ = os.path.dirname(self.filename)\n try:\n path = At_code_checker.Dir_Map[dir_.lower()]\n if not os.path.isdir(path):\n create_dir(path)\n except KeyError:\n path = self.get_temp_dir()\n At_code_checker.Dir_Map[dir_.lower()] = path\n finally:\n return path", "def _get_new_log_file(self, date, hour):\n\n # Get folder name for this particular date\n dayfolder = str(date)\n\n # Generate new log directory if necessary\n if not os.path.exists('/'.join([self._logpath, dayfolder])):\n self._make_log_dir(dayfolder)\n\n # Return the path to the current log file\n filename = str(hour) + '.txt'\n return '/'.join([self._logpath, dayfolder, filename])", "def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path", "def _join_path(\n year: int, day: int, session: str, file_type: Optional[str] = None\n) -> str:\n cache_location = user_cache_dir(appname=\"advent-of-code\")\n cache_file = os.path.join(cache_location, str(session), str(year), str(day))\n if file_type == \"input_file\":\n cache_file = os.path.join(cache_file, \"input.txt\")\n if file_type == \"submission_file\":\n cache_file = os.path.join(cache_file, \"submission.txt\")\n if file_type == \"last_time_file\":\n cache_file = os.path.join(cache_file, \"time.txt\")\n return cache_file", "def _get_session_dir(self):\n\n fnd = os.path.join(self.config.capture_dir, self.timestamp.date_string(), self.timestamp.time_string())\n if not os.path.isdir(fnd):\n os.makedirs(fnd)\n\n return fnd", "def submission_dir(self):\n submissions_dir = osp.join(self.root, \"submissions\")\n date = '-'.join([\n f'{getattr(datetime.now(), x)}'\n for x in ['year', 'month', 'day']])\n time = '-'.join([\n f'{getattr(datetime.now(), x)}'\n for x in ['hour', 'minute', 'second']])\n submission_name = f'{date}_{time}'\n path = osp.join(submissions_dir, submission_name)\n return path", "def experiment_dir(experiment_name: str) -> Path: # pragma: no cover\n return EXPERIMENTS_DIR / experiment_name", "def working_directory(self):\n project_full_path = \"/\".join(self.file_name.split(\"/\")[:-1])\n file_name = self.file_name.split(\"/\")[-1]\n if \".h5\" in file_name:\n file_name = file_name.split(\".h5\")[0]\n file_name += \"_hdf5\"\n if self.h5_path[0] == \"/\":\n h5_path = self.h5_path[1:]\n else:\n h5_path = self.h5_path\n return posixpath.join(project_full_path, file_name, h5_path)", "def get_run_directory(output_root: Union[str, Path]) -> Path:\n output_root = Path(output_root).resolve()\n launch_time = datetime.datetime.now().strftime(\"%Y_%m_%d\")\n today_runs = [\n int(run_dir.name.split(\".\")[1])\n for run_dir in output_root.iterdir()\n if run_dir.name.startswith(launch_time)\n ]\n run_version = max(today_runs) + 1 if today_runs else 1\n datetime_dir = output_root / f\"{launch_time}.{run_version:0>2}\"\n return datetime_dir", "def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)", "def _default_log_dir():\n config_dir = os.path.abspath(os.path.dirname(self.config_filepath))\n log_dir = os.path.join(config_dir, \"logs\")\n if not os.path.isdir(log_dir):\n os.mkdir(log_dir)\n return log_dir", "def get_current_day() -> int:\n return datetime.now().day", "def get_first_of_day(self, folder_before=None, day=datetime.today(), filename='Epikurve.csv'):\n folders = os.listdir(self.data_root_path)\n folders.sort(reverse=True)\n reached = folder_before is not None\n __folder_before = str(folder_before).split('/')[-1]\n for folder in folders:\n if reached:\n path_csv = self.data_root_path / folder / filename\n with open(path_csv) as f:\n first = True\n for x in csv.reader(f, delimiter=';'):\n if first:\n first = False\n continue\n ts = datetime.strptime(x[2], '%Y-%m-%dT%H:%M:%S')\n break\n if ts.date() <= day.date():\n return folder\n else:\n if folder == __folder_before:\n reached = True", "def make_experiment_directory(path='',config=None,default_dir='_runs'):\n directory = path\n if not path:\n timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S-%f')\n directory = os.path.join(default_dir,timestamp)\n directory = os.path.abspath(directory) \n if os.path.isdir(directory) and not config.override and not config.cloud:\n raise ValueError(\n 'directory already exists, use --override option: %s'\n % directory)\n elif os.path.isdir(directory) and not config.cloud: \n rmtree(directory)\n if not config.cloud: \n os.makedirs(directory)\n if config:\n config.wdir = directory \n return directory", "def get_denoiser_data_dir():\r\n dir = get_qiime_project_dir() + \"/qiime/support_files/denoiser/Data/\"\r\n return dir", "def simulation_dir(self):\n try:\n return (self.output_directory / self.sim_id).expand()\n except AttributeError:\n return Path()", "def get_counts_dir(cls, event_type):\n if 'rna_events_dir' in cls.global_settings:\n return os.path.join(cls.global_settings['rna_events_dir'],\n event_type)\n return None", "def create_output_dir(self):\n if self.output_dir is None:\n new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))\n try:\n os.makedirs(self.output_dir)\n except OSError:\n pass", "def generate_subdir(channel, energy=13):\n return os.path.join('%dTeV' % energy, channel, strftime(\"%d_%b_%y\"))", "def current_day():\n now = pytz.timezone('America/Los_Angeles').localize(datetime.now())\n return now.strftime('%m/%d')", "def _get_station_filename():\n output_dir = os.path.join(output, state, station)\n if not os.path.isdir(output_dir):\n logger.debug(\"Creating directory %s\", output_dir)\n os.makedirs(output_dir)\n return os.path.join(output_dir, \"%s.%s\" % (c_time, format))", "def _directory_subdir(hemi, month_num):\n month_dir = '{:02}_{}'.format(month_num, cal.month_abbr[month_num])\n return os.path.join(nt.by_name(hemi)['long_name'],\n 'monthly', month_dir)", "def make_save_dir(self, dir_string='', date='auto', date_first=True):\n # Make a new unique identifier when making a new directory for data to be stored\n if self.update_ID:\n self.set_exp_id()\n\n # add the ID to the subdirectory, if prepend_ID is true\n if dir_string is not '' and self.prepend_ID:\n dir_string = rf'{self.exp_ID}_{dir_string}'\n\n # add the date automatically\n if date is 'auto':\n date = datetime.datetime.now().strftime('%Y_%m_%d')\n\n # decide ordering which to put the date\n if date_first:\n save_dir = rf'{self.base_directory}/{date}/{dir_string}'\n else:\n save_dir = rf'{self.base_directory}/{dir_string}/{date}'\n\n # or don't add the date\n if date is None: # if you want to make a directory that doesn't specify date, for saving more general stuff\n save_dir = rf'{self.base_directory}/{dir_string}'\n\n if not os.path.exists(save_dir):\n print(\"Data storage dir does not exist,\"\n \" creating it at {}\".format(save_dir))\n os.makedirs(save_dir)\n\n return save_dir", "def get_day_today() -> str:\n day = datetime.now().strftime(\"%w\")\n if day == '0': # Sunday\n return '6'\n elif day == '6': # Saturday\n return '5'\n elif day == '1': # Monday\n return '0'\n elif day == '2': # Tuesday\n return '1'\n elif day == '3': # Wednesday\n return '2'\n elif day == '4': # Thursday\n return '3'\n elif day == '5': # Friday\n return '4'", "def currentDay(self):\n day = datetime.datetime.today().day\n return day", "def get_directory(self):\n path = os.path.join(settings.SUBMISSION_DIR, \n str(self.course.semester), \n str(self.course.code), \n \"Sec.%d\" % self.course.section,\n self.code)\n return path.replace(\" \", \"_\")", "def outdir_str(d):\n f = folder_str(d)\n logs_dir = os.path.join(f, 'logs')\n try:\n if not os.path.exists(logs_dir):\n os.makedirs(logs_dir)\n except OSError:\n raise argparse.ArgumentTypeError('could not create \"%s\" directory' % logs_dir)\n return f", "def get_full_dir(self, path):\r\n full_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),\r\n path)\r\n if not os.path.exists(full_dir):\r\n os.makedirs(full_dir)\r\n return full_dir", "def create_summary_folder_name():\n dir_str = '' # ADD PATH\n time_stamp_str = time.strftime(\"%a, %d %b %Y %H:%M:%S/\", time.gmtime())\n param_str = ''\n return dir_str + time_stamp_str + param_str", "def _create_dir_name(date, dir_structure='ymd', is_exif=True):\n if is_exif:\n date_split = date.split(' ')[0].split(':')\n else:\n date_split = date.split(' ')[0].split('-')\n dir_name = '\\\\'\n if 'y' in dir_structure:\n dir_name += date_split[0] + '\\\\'\n if 'm' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:2]) + '\\\\'\n if 'd' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:3]) + '\\\\'\n return dir_name", "def get_log_dir():\n base_dir = os.path.realpath(cfg.CONF.ruiner.log_dir.rstrip('/'))\n return os.path.join(base_dir, test_start_time_tag())", "def get_data_dir() -> str:\n os.makedirs(DEFAULT_OUTPUT_DIR, exist_ok=True)\n return DEFAULT_OUTPUT_DIR", "def data_loader_new_directory_name(self, working_dir: Path):\n\n wd = SnowfakeryWorkingDirectory(working_dir)\n key = wd.index\n if key not in self.cached_counts:\n self.cached_counts[key] = wd.get_record_counts()\n\n if not self.run_until.sobject_name:\n return working_dir\n\n count = self.cached_counts[key][self.run_until.sobject_name]\n\n path, _ = str(working_dir).rsplit(\"_\", 1)\n new_working_dir = Path(path + \"_\" + str(count))\n return new_working_dir", "def get_base_data_directory(self):\n user_category = ''\n directory = ''\n\n if self.session_start_date:\n start_time = self.session_start_date.split(' ')[0] #.replace('-', '')\n else:\n # PL. To avoid mixing users directory if they restart the application\n # after midnight but before 8 AM, the directory date doesn't change:\n _local_time = time.localtime()\n if _local_time[3] > 7:\n start_time = time.strftime(\"%Y-%m-%d\")\n else:\n # substract 8 hourse to current date to get yesterday's date. \n _local_time = time.gmtime((time.time() - 8*60*60))\n start_time = time.strftime(\"%Y-%m-%d\", _local_time)\n\n if self.is_inhouse():\n #directory = os.path.join(self.base_directory, self.endstation_name,\n # self.get_user_category(), self.get_proposal(),\n # start_time)\n directory = os.path.join(self.base_directory, start_time, self.proposal_number, self.get_proposal_number())\t \n else:\n #directory = os.path.join(self.base_directory, self.get_user_category(),\n # self.get_proposal(), self.endstation_name,\n # start_time)\n logging.debug(\"SoleilSession self.base_directory %s\" % self.base_directory)\n logging.debug(\"SoleilSession start_time %s\" % start_time)\n logging.debug(\"SoleilSession self.proposal_number %s\" % self.proposal_number)\n logging.debug(\"SoleilSession self.get_proposal_number() %s\" % self.get_proposal_number())\n directory = os.path.join(self.base_directory, start_time, self.get_proposal_number())\n\n return directory", "def get_data_dir(dataset_name: str):\n # Get home directory\n home_dir = os.getenv(\"HOME\")\n\n # Merge home directory with results, project, dataset and directories\n data_dir = os.path.join(\n home_dir, \"data\", \"simple-einet-diff-sampling\", dataset_name\n )\n\n # Create directory if it does not exist\n os.makedirs(data_dir, exist_ok=True)\n return data_dir", "def get_dynamic_data(self, today, settings): # DOY=None, year_doy=None\n\n name_key = 'name_fmt'\n loc_key = 'dir_loc'\n dt_key = 'dt_fmt'\n clim_key = 'climatology'\n doy = today.timetuple().tm_yday\n\n print('settings', settings)\n\n if settings[clim_key]:\n # for climatology then we expect a DOY format\n if settings[dt_key] == 'doy':\n dynamic_key = '{:03d}'.format(doy)\n else:\n print('{} is set to climatology but date format from config_dict is {}'.format(settings[name_key],\n settings[dt_key]))\n sys.exit(0)\n elif settings[dt_key] == 'YYYYdoy':\n dynamic_key = '{}{:03d}'.format(today.year, doy)\n else:\n print('Hey user, the format of the dt_fmt configuration you gave: {} is not supported at '\n 'this time'.format(settings[dt_key]))\n sys.exit(0)\n\n fpath = os.path.join(settings[loc_key], settings[name_key].format(dynamic_key))\n return fpath", "def get_archive_filename():\r\n today = datetime.date.today()\r\n return str(today)", "def file_dir_dir_dir() -> str:\n stack_t = inspect.stack()\n ins = inspect.getframeinfo(stack_t[1][0])\n return os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(ins.filename))))", "def _logFile_default(self):\n print \"choosing default log file\"\n return os.path.join(self.rpiADCLogFolder,time.strftime(\"rpiADC-%Y-%m-%d.csv\", self.currentLocalTime))", "def get_day():\n return handle_invalid_inputs(question_4, days)", "def create_estimation_dir(parent_dir, suffix):\n if parent_dir is None:\n return None\n\n estimation_dir = os.path.join(parent_dir, \"estimations\")\n\n # Make a directory of the estimation with current time\n estimation_dir = os.path.join(estimation_dir, \"\".join([datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S_%f\"),\n suffix]))\n mkdir_p(estimation_dir)\n\n return estimation_dir", "def file_dir_dir() -> str:\n stack_t = inspect.stack()\n ins = inspect.getframeinfo(stack_t[1][0])\n return os.path.dirname(os.path.dirname(os.path.abspath(ins.filename)))", "def get_pycqed_appdata_dir():\n if os.name == 'nt':\n path = os.path.expandvars(r'%LOCALAPPDATA%\\pycqed')\n else:\n path = os.path.expanduser('~/.pycqed')\n os.makedirs(path, exist_ok=True)\n return path", "def path_helper(location, date, time, slc_dir='slc', data_dir='/media/bup/Data'):\n\n base_folder = data_dir + '/' + location + '/' + date + '/'\n name = date + '_' + time\n def_path = base_folder + slc_dir + '/' + name\n return def_path", "def pathtofolder():\n return os.getcwd()", "def get_directory(self) -> Path:\n path = Path(str(self.max_nesting_lvl))\n path /= str(self.num_snps)\n path /= self.read_quality\n path /= str(self.coverage)\n path /= str(self.denovo_kmer_size)\n return path", "def datafolder_from_timestamp(ts=make_timestamp(),make=False):\n\tfolder_day = ts.split(\"_\")[0]\n\tfolder_month=folder_day[:-2]\n\tfolder_year=folder_month[:-2]\n\t#Yearly folders\n\tyear_folder = data_root_folder+folder_separator+folder_year\n\tif make & (os.listdir(data_root_folder).count(folder_year)==0):\n\t\tos.mkdir(year_folder)\n\t#\n\t#Monthly folders\n\tmonth_folder = year_folder+folder_separator+folder_month\n\tif (os.listdir(year_folder).count(folder_month)==0) & make:\n\t\tos.mkdir(month_folder)\n\t#\n\t#Daily folders\n\tday_folder = month_folder+folder_separator+folder_day\n\tif (os.listdir(month_folder).count(folder_day)==0) & make:\n\t\tos.mkdir(day_folder)\n\n\treturn day_folder+folder_separator", "def _create_new_experiment_dir(self):\n ls = os.listdir(self.experiment_dir)\n existing_exp_dirs = [d for d in ls if d.startswith('experiment')]\n if len(existing_exp_dirs) == 0:\n out = 'experiment1'\n else:\n inds = [int(d.lstrip('experiment')) for d in existing_exp_dirs]\n out = 'experiment'+str(max(inds)+1)\n\n self.new_experiment_dir = os.path.join(self.experiment_dir, out)\n os.mkdir(self.new_experiment_dir)\n return None", "def create_plot_dir(base_dir: str) -> str:\n time_str = datetime.now().strftime('%Y%b%d-%H%M%S') \n plot_dir = os.path.join(res_dir, 'fig_'+time_str)\n# plot_dir = os.path.join(res_dir, 'plot')\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n\n return plot_dir", "def logdir(self) -> Path:\n assert (\n self._logdir\n ), \"Log provider has not been tied to a SummaryWriter yet\"\n return self._logdir", "def prerun(timestamp):\r\n if not os.path.isdir('log'):\r\n os.makedirs('log')\r\n if not os.path.isdir('collected'):\r\n os.makedirs('collected')\r\n if not os.path.isdir('done'):\r\n os.makedirs('done')\r\n time_stamped_folder = os.path.join('collected', timestamp)\r\n if not os.path.isdir(time_stamped_folder):\r\n os.makedirs(time_stamped_folder)\r\n return time_stamped_folder", "def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path", "def setup_logdir(self, default_logdir: Union[str, Path]) -> Path:\n self._default_logdir = Path(default_logdir)\n\n if self._create_logdir:\n self.logdir_path.mkdir(parents=True, exist_ok=True)\n\n if not self.logdir_path.is_dir():\n raise ValueError(f\"logdir '{self.logdir_path}' must be a directory.\")\n\n return self.logdir_path", "def daily_talk(corpus_dir: Pathlike, output_dir: Pathlike, num_jobs: int):\n prepare_daily_talk(corpus_dir, output_dir, num_jobs=num_jobs)", "def _get_project_dir(self):\n return os.path.expanduser(\n self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"project_dir\")\n )\n or os.getcwd()\n )", "def get_data_folder():\n data_folder = './MedData/'\n\n if not os.path.isdir(data_folder):\n os.makedirs(data_folder)\n\n return data_folder", "def GetCurrentDir(self) -> str:\n ...", "def _infer_output_directory():\n\n # Check if environment variable currently set\n if \"CHIME_DIR\" not in os.environ:\n # Inform user that the environment variable is not currently set\n print(\"CHIME_DIR not set please specify an output directory.\")\n # Check if user wishes to use default path\n use_default = input(\"Use default? [y/n]: \")\n\n if use_default.lower() in [\"y\", \"yes\"]:\n # Use default args to create directory\n _mk_output_dir()\n # Remind user to set environment variable in the future\n print(f\"You should set CHIME_DIR={os.environ['CHIME_DIR']}\")\n\n elif use_default.lower() in [\"n\", \"no\"]:\n # Make output directory using user args\n user_selection = input(\"Specify a path for the output directory: \")\n if user_selection:\n _mk_output_dir(directory_path=user_selection)\n # Remind user to set environment variable\n print(f\"You should set CHIME_DIR={user_selection}.\")\n\n # Recurse if invalid selection\n else:\n _infer_output_directory()\n else:\n # Covers edge-case where user reads documentation\n _mk_output_dir(directory_path=os.environ[\"CHIME_DIR\"])", "def test_directory_path_with_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=True)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (with-calendar)\",\n )\n assert directory_path == exist_directory_path", "def _get_default_path(self):\n return os.path.join(action_datasets.ROOT_DIR, 'data', 'Actions')", "def _output_log_path(name):\n output = Path(\"../Raw Data/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(\"000_logging.hdf5\")", "def _find_or_create_dir(dir_name: str) -> str:\r\n # Get the directory of the current file.\r\n parent_dir_path = os.path.dirname(os.path.realpath(__file__))\r\n \r\n # Create a directory if it doesn't exist.\r\n dir_path = os.path.join(parent_dir_path, dir_name)\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n\r\n return dir_path", "def get_log_directory(self):\n\n return self.__config_parser__.get('SETTINGS', 'LOGFILE_DIRECTORY')", "def get_ais_path(self, vessel_type: str, simulation_date: datetime.date) -> Path:\n # adjust date to match AIS file naming convention\n file_date = f\"{self.year}{simulation_date.month:02}01\"\n\n for path in self.paths:\n if vessel_type in path.name and file_date in path.name:\n break\n\n return path", "def get_directory() -> str:\n return directory", "def _get_filepath(self) -> str:\n return os.path.join(\n os.sep.join(\n [\n self.period.value,\n 'activities',\n f'activities_{self._dt_string}.json'\n ]\n )\n )", "def create_path_by_date(dest, dt):\n if not os.path.isdir(dest):\n raise FileNotFoundError(f\"dest {dest} must be valid path\")\n yyyy, mm, dd = dt[0:3]\n yyyy = str(yyyy).zfill(4)\n mm = str(mm).zfill(2)\n dd = str(dd).zfill(2)\n new_dest = os.path.join(dest, yyyy, mm, dd)\n if not os.path.isdir(new_dest):\n os.makedirs(new_dest)\n return new_dest", "def get_query_folders(root_dir, today):\n\n pat = r'(\\d*-\\d*-\\d*-\\d\\s\\s)' #regex for the folder of the type that the SERPs are saved in\n query_folder_list = []\n\n for f in os.listdir(root_dir):\n #match = re.search(pat, f) #search for the format in the regex in the folder name\n #if match:\n #dir_name = match.group(1) #extract query term\n # date = match.group(1) #extract date\n\n if os.path.isdir(os.path.join(root_dir, f)) and (f == today):\n query_folder_list.append(os.path.join(root_dir, f))\n else:\n pass\n if not query_folder_list: #if empty, no htmls were collected for that query term for that day\n print(\" does not match a recent dir with same query name in {0}\".format(root_dir))\n\n return query_folder_list", "def day(self):\n return 0", "def day(self):\n return 0", "def default_agasc_dir():\n if 'AGASC_DIR' in os.environ:\n out = Path(os.environ['AGASC_DIR'])\n else:\n out = Path(os.environ['SKA'], 'data', 'agasc')\n return out", "def get_today_stat(cls):\n return cls.get_specified_days_stat(0)", "def test_directory_path_without_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=False)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (without-calendar)\",\n )\n assert directory_path == exist_directory_path", "def get_fifteen_logfile():\n return \"fifteenStrategy\" + get_day() + \".log\"", "def get_dir_path():\n return DIR_PATH", "def dailyanalysis(experiment):\n import os\n for fn in os.listdir('/network/aopp/hera/mad/bakerh/fms_tmp/' +\n experiment):\n if fn.find('exe.fms') == -1 and fn.find('mppnccombine.ifc') == -1:\n storedaily('/network/aopp/hera/mad/bakerh/fms_tmp/' + experiment +\n '/' + fn + '/combine/',\n '/network/aopp/hera/mad/bakerh/data/FMS/output/' +\n experiment + '/' + fn + '/history/')\n print('Completed ' + fn)", "def output_directory(self):\n if self._output_directory is None:\n cache_filename = self._original_cache\n output_directory = settings.cache_folder / cache_filename\n output_directory.makedirs_p()\n self._output_directory = output_directory.expand()\n return Path(self._output_directory)", "def getSteamAppDir(appid: int) -> str:\n\tfor path in libraryFolders():\n\t\ttry:\n\t\t\tlogger.info(f'searching for {appid} in {path}..')\n\t\t\twith open(f'{path}appmanifest_{appid}.acf', 'r') as file:\n\t\t\t\t# found the app!\n\t\t\t\t# get the app's name\n\t\t\t\tinstDir = Property.parse( file, f'appmanifest_{appid}.acf' ).as_dict()[ 'appstate' ][ 'installdir' ]\n\t\t\t\tpath += f'common/{instDir}/'\n\t\t\t\tlogger.info(f'{appid} found! path: {path}')\n\t\t\t\treturn path\n\t\texcept FileNotFoundError:\n\t\t\t# if no, just continue\n\t\t\tcontinue\n\traise RuntimeError(f'No path found for app {appid}!')", "def get_log_folder(cls, test_suite_name):\n if not test_suite_name:\n test_suite_name = os.path.splitext(os.path.basename(sys.modules['__main__'].__file__))[0]\n sdk_path = cls.get_sdk_path()\n log_folder = os.path.join(sdk_path, \"TEST_LOGS\",\n test_suite_name +\n time.strftime(\"_%m%d_%H_%M_%S\", time.localtime(LOG_FOLDER_TIMESTAMP)))\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n return log_folder", "def output_yesterday(max_past_days=10):\n path = None\n\n for yesterday_path in yesterdays(max_past_days=max_past_days):\n if yesterday_path.is_file():\n path = yesterday_path\n LOG.debug('Found last known journal.')\n break\n else:\n raise ValueError('Could not find previous journal in past {0:d} days.'.format(max_past_days))\n\n return path", "def default_awg_dir():\n path = os.path.join(get_pycqed_appdata_dir(), 'awg')\n os.makedirs(path, exist_ok=True)\n return path", "def create_daily_file(self, output_dir: str,\n day: int, header='Bazin', get_cost=False):\n # Create the output directory if it doesn't exist\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n features_file = output_dir + 'day_' + str(day) + '.dat'\n\n if header == 'Bazin':\n # add headers to files\n with open(features_file, 'w') as param_file:\n if get_cost:\n param_file.write(self.bazin_header)\n else:\n self.bazin_header = 'id redshift type code ' + \\\n 'orig_sample queryable ' + \\\n 'last_rmag gA gB gt0 ' + \\\n 'gtfall gtrise rA rB rt0 rtfall rtrise iA ' + \\\n 'iB it0 itfall itrise zA zB zt0 ztfall ztrise\\n'\n param_file.write(self.bazin_header)\n\n else:\n with open(features_file, 'w') as param_file:\n param_file.write(self.header)", "def get_current_day():\n current_day = datetime.now().strftime('%A').lower()\n return current_day", "def getFSDir(self):\n \n # target dir configuration value\n dirname = os.path.abspath(\n self.config.get(\"FileMan\",\"targetdir\")\n )\n\n # exists or not - create\n if not os.path.isdir(dirname):\n try:\n os.mkdir(dirname)\n except OSError,e:\n raise AuthError(500,\"Could not create target directory [%s]:%s\"%\\\n (dirname, e))\n\n # check directory permission\n if not os.access(dirname, 7):\n raise AuthError(500,\"Write access denied for target directory [%s]\"% (dirname))\n\n return dirname", "def _get_default_path(self):\n return os.path.join(cfg.ROOT_DIR, 'data', 'KITTI')", "def getDesDir(self):\n if self.desdir == None:\n cwd = os.getcwd()\n self.desdir = os.path.join(cwd, self.args.dest)\n return self.desdir", "def daily_avg(dacycle,avg):\n \n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n weekdir = os.path.join(analysisdir , 'data_%s_weekly'%avg)\n daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)\n\n if not os.path.exists(daydir):\n print \"Creating new output directory \" + daydir\n os.makedirs(daydir)\n\n files = os.listdir(weekdir)\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n fileinfo = {}\n for filename in files:\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')\n fileinfo[filename] = date\n \n dt = dacycle['cyclelength']\n\n for k,v in fileinfo.iteritems():\n cycle_file = os.path.join(weekdir,k)\n for i in range(abs(dt.days)):\n daily_file = os.path.join(daydir,'%s_fluxes.%s.nc'%(avg,(v+datetime.timedelta(days=i)).strftime('%Y-%m-%d')))\n if not os.path.lexists(daily_file):\n os.symlink(cycle_file,daily_file)\n #print daily_file,cycle_file" ]
[ "0.62076753", "0.5934615", "0.59194785", "0.58207476", "0.57378083", "0.5428975", "0.54136276", "0.53955543", "0.5333741", "0.5282543", "0.5224942", "0.519696", "0.519696", "0.5180282", "0.5148305", "0.51434815", "0.5098279", "0.5076191", "0.5064754", "0.50544584", "0.5054282", "0.5029764", "0.5018956", "0.5011002", "0.5006264", "0.50057095", "0.4997772", "0.4975446", "0.49469227", "0.4939346", "0.4935224", "0.49333403", "0.49302968", "0.49030307", "0.4895709", "0.4894794", "0.48804787", "0.48800293", "0.48771587", "0.48694927", "0.48628643", "0.48620668", "0.48609427", "0.48585984", "0.48503974", "0.4841737", "0.48330587", "0.48327696", "0.48310566", "0.48130727", "0.48052126", "0.4802002", "0.4791357", "0.4789006", "0.4785057", "0.47821715", "0.47819987", "0.47765183", "0.47762534", "0.47747698", "0.47747266", "0.47739616", "0.47721207", "0.47570148", "0.47505736", "0.4743814", "0.47336915", "0.4731094", "0.4729925", "0.47270882", "0.4721733", "0.47176045", "0.4712256", "0.47111875", "0.47102994", "0.47098446", "0.46976063", "0.46963957", "0.4687641", "0.46872878", "0.46804866", "0.46786895", "0.46786895", "0.46781185", "0.46746048", "0.46731442", "0.467312", "0.4670103", "0.46694618", "0.4668328", "0.46648943", "0.4664607", "0.46593451", "0.4656399", "0.4653126", "0.4649628", "0.46474388", "0.46473578", "0.46470696", "0.46453115" ]
0.7822021
0
Return the path to an experiment's ARTIQ results directory. The standard results path is ``/artiqResults/``.
def artiq_results_path(experiment: Optional[str] = None) -> str: path = os.path.join(shared_area_path(), "artiqResults") if experiment is None: try: experiment = os.environ["OITG_EXPERIMENT"] except KeyError: raise Exception( "No experiment supplied, and no OITG_EXPERIMENT environment key") return os.path.join(path, experiment)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_abex_results_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / \"Results\"", "def data_abex_results_iteration_dir(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n return data_abex_results_dir(experiment_name) / iteration_name(iteration)", "def _get_results_path(self):\n # if we already have the results path set, please return it\n if self._results_path is not None:\n return self._results_path\n\n self._validate_results_path()\n\n path = self.results_path\n\n if path is None:\n for i in range(1, 10001):\n name = f\"AutoML_{i}\"\n if not os.path.exists(name):\n self.create_dir(name)\n self._results_path = name\n return name\n # If it got here, could not create, raise expection\n raise AutoMLException(\"Cannot create directory for AutoML results\")\n elif os.path.exists(self.results_path) and os.path.exists(\n os.path.join(self.results_path, \"params.json\")\n ): # AutoML already loaded, return path\n self._results_path = path\n return path\n # Dir does not exist, create it\n elif not os.path.exists(path):\n self.create_dir(path)\n self._results_path = path\n return path\n # Dir exists and is empty, use it\n elif os.path.exists(path) and not len(os.listdir(path)):\n self._results_path = path\n return path\n elif os.path.exists(path) and len(os.listdir(path)):\n raise AutoMLException(\n f\"Cannot set directory for AutoML. Directory '{path}' is not empty.\"\n )\n\n raise AutoMLException(\"Cannot set directory for AutoML results\")", "def dir_results(assignment, user):\n return os.path.join(repository, assignment, user, 'results')", "def experiment_dir(experiment_name: str) -> Path: # pragma: no cover\n return EXPERIMENTS_DIR / experiment_name", "def ml_predict_results_path(self) -> str:\n return join(self.machine_learning_path, 'results')", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def results_dir(filename = None):\n path = 'results'\n if os.path.isdir(path):\n if not os.access(path, os.R_OK | os.W_OK):\n raise EnvironmentError(\"{0} is not readable or writable\".format(os.path.abspath(path)))\n return os.path.join(path, filename) if filename else path\n os.mkdir(path) # raises if it fails\n return os.path.join(path, filename) if filename else path", "def local_results(self):\n\n return self._local_results_path", "def get_result_path(self):\n return logPath", "def results_directory(self, run_config):\n suite_dir_name = '{}_{}'.format(run_config['test_suite_start_time'],\n run_config['test_id'])\n datetime_str = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n test_result_dir = '{}'.format(datetime_str)\n result_dir = os.path.join(self.workspace, 'results', suite_dir_name,\n test_result_dir)\n\n # Creates workspace and default log folder\n if not os.path.exists(result_dir):\n print('Making results directory:{}'.format(result_dir))\n os.makedirs(result_dir)\n\n return result_dir", "def get_qiime_temp_dir():\r\n qiime_config = load_qiime_config()\r\n qiime_config_value = qiime_config['temp_dir']\r\n if qiime_config_value is not None:\r\n result = qiime_config_value\r\n else:\r\n result = '/tmp/'\r\n return result", "def getRelativeRootExperimentPath(self):\n return userId + \"/\" + \\\n self._rootExportPath[self._rootExportPath.rfind(self._properties['export_dir']):]", "def output_path(self) -> str:\n if self._output_path is None:\n if not self._root_folder:\n self._root_folder = self._env.experiments_folder\n folder = os.path.join(self._root_folder, self.key)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._output_path = folder\n\n return self._output_path", "def results(results_dir):\n\n if not isinstance(results_dir, str):\n raise TypeError(\"results_dir must be a string!\")\n\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n return results_dir", "def get_trial_dir() -> str:\n return logging.root._log_dir # type: ignore", "def index_path(self):\n\t\treturn os.path.normpath(self.output + \"/\" + self.resultset_index)", "def remote_results(self):\n\n return self._remote_results_path", "def config_data_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / DATA_CONFIG", "def get_archive_file_path(self,results):\n path = os.path.join(self.archive_path,results.version)\n if not os.path.exists(path):\n os.makedirs(path)\n return os.path.join(path,self.get_archive_filename(results))", "def get_log_dir():\n base_dir = os.path.realpath(cfg.CONF.ruiner.log_dir.rstrip('/'))\n return os.path.join(base_dir, test_start_time_tag())", "def get_qiime_scripts_dir():\r\n script_fp = which('print_qiime_config.py')\r\n\r\n if script_fp is None:\r\n raise ScriptsDirError(\"Could not find the directory containing QIIME \"\r\n \"scripts. QIIME scripts must be accessible via \"\r\n \"the PATH environment variable, and they must \"\r\n \"be executable. Please ensure that you have a \"\r\n \"valid QIIME installation (see the QIIME \"\r\n \"Installation Guide: \"\r\n \"http://qiime.org/install/install.html).\")\r\n\r\n return dirname(script_fp)", "def config_abex_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / ABEX_CONFIG", "def test_get_result_directory(self):\n pass", "def get_output_path(self):\n output_path = '%s/%s' % (\n os.path.expanduser(JOB_OUTPUT_PATH), self.get_unique_name())\n return output_path", "def _fetch_results_dir(self, backend=None, results_dir=None):\n if backend is None and results_dir is not None:\n return results_dir\n elif backend is not None and results_dir is None:\n return Config().get_results_dir(backend)\n elif backend is None and results_dir is None:\n return self.nominal_results_dir\n else:\n raise ResultsAttributeError('Invalid combination of attributes!')", "def _getResultsFileName(self, toilPath):\n return os.path.join(toilPath, \"results.txt\")", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)", "def output_dir(self):\n return os.path.join(self._sandbox, 'output' + os.path.sep)", "def queries_path(tmpdir):\n return Path(tmpdir) / \"queries\"", "def get_output_path():\n return os.getcwd() + \"/output/\"", "def sirv_truth_dir(self):\n return op.join(self.root_dir, \"SIRV\")", "def data_abex_input_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / DATA_DIR / \"ABEX-Inputs\"", "def test_get_qiime_scripts_dir(self):\r\n obs = get_qiime_scripts_dir()\r\n\r\n # We can't do much testing of the observed value, but let's at least\r\n # check that the directory exists.\r\n self.assertTrue(isdir(obs))", "def simulation_dir(self):\n try:\n return (self.output_directory / self.sim_id).expand()\n except AttributeError:\n return Path()", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._R_script)", "def get_benchmark_file_path(cls, results_dir):\n return os.path.join(results_dir, \"benchmarks.json\")", "def getTradeOutputDir():\n\tglobal config\n\treturn config['directory']['output']", "def data_abex_input_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_abex_input_dir(experiment_name) / iteration_csv", "def get_qiime_project_dir():\r\n # Get the full path of util.py\r\n current_file_path = abspath(__file__)\r\n # Get the directory containing util.py\r\n current_dir_path = dirname(current_file_path)\r\n # Return the directory containing the directory containing util.py\r\n return dirname(current_dir_path)", "def tempdir(self):\n path = tempfile.gettempdir()\n return os.path.join(path, 'parquet-index-test-' + str(uuid.uuid4()))", "def format_result_paths(self):\n if self.file_blacklist == \"ALL\" or self.results_path is None:\n return\n\n # Blacklist the prediction files for any datasets that were not given\n if self.holdout_dataset is None:\n self.file_blacklist.append(\"predictions_holdout\")\n if self.test_dataset is None:\n self.file_blacklist.append(\"predictions_test\")\n\n # Add given `experiment_recorders` to `result_paths`\n for recorder in self.experiment_recorders:\n try:\n recorder, result_path = recorder\n except IndexError:\n raise IndexError(f\"Expected `recorder` to be tuple of (class, str), not {recorder}\")\n\n self.result_paths[recorder.result_path_key] = result_path\n\n # Set full filepath for result files relative to `results_path`, or to None (blacklist)\n for k in self.result_paths.keys():\n if k == \"root\":\n continue\n elif k not in self.file_blacklist:\n # If `k` not in `RESULT_FILE_SUB_DIR_PATHS`, then added via `experiment_recorders`\n self.result_paths[k] = os.path.join(\n self.results_path, RESULT_FILE_SUB_DIR_PATHS.get(k, self.result_paths[k])\n )\n else:\n self.result_paths[k] = None\n # G.debug('Result file \"{}\" has been blacklisted'.format(k))", "def get_resultdir(project_obj, result_folder):\n description = project_obj.description.replace(' ', '') # remove any spaces in the project name\n return 'documents/%s/%s/reports/%s' % (str(project_obj.date.date()), description, result_folder)", "def get_tranquility_path():\n return os.path.join(get_appdata(), 'CCP\\\\EVE\\\\d_eve_sharedcache_tq_tranquility')", "def build_result_folder(timestamp=str(int(time.time()))):\n out_path = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n print(\"Writing to {}\\n\".format(out_path))\n\n data_path = os.path.abspath(os.path.join(out_path, \"data\"))\n evaluation_path = os.path.abspath(os.path.join(out_path, \"evaluation\"))\n\n if not os.path.exists(out_path):\n os.makedirs(data_path)\n os.makedirs(evaluation_path)\n return out_path", "def out_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(dataset_path(dataset, work_dir), consts.OUTPUT_DIR)", "def output_abspath(self):\n return os.path.join(self.queue.data_abspath, self.id)", "def get_fuzzer_filestore_path(benchmark_df, fuzzer):\n fuzzer_df = benchmark_df[benchmark_df.fuzzer == fuzzer]\n filestore_path = fuzzer_df.experiment_filestore.unique()[0]\n exp_name = fuzzer_df.experiment.unique()[0]\n return posixpath.join(filestore_path, exp_name)", "def get_score_path(cfg):\n return os.path.join(\n get_score_dir(cfg),\n \"ener.csv\")", "def questionDirectoryPath(instance, filename):\n return \"phanluot/{}/{}\".format(instance.questionID, filename)", "def fasta_dir(self):\n return op.join(self.root_dir, 'fasta')", "def ml_evaluate_samples_path(self) -> str:\n return join(self.machine_learning_path, 'evaluate')", "def set_results_path(self):\n\n self.results_input.delete(0, END)\n path = set_path()\n self.results_input.insert(0, path)", "def assure_experiment_dir(self):\n if not os.path.exists(self.experiment_dir):\n os.makedirs(self.experiment_dir)\n experiment_conf_dir = os.path.join(self.experiment_dir, 'conf')\n if not os.path.exists(experiment_conf_dir):\n os.makedirs(experiment_conf_dir)", "def _fetch_run_dir(self, run_id, backend=None, results_dir=None):\n results_dir = self._fetch_results_dir(backend=backend, results_dir=results_dir)\n return os.path.join(results_dir, run_id)", "def get_test_data_path():\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"data\") + os.path.sep)", "def create_experiment_path(self, experiment):\n # get communications object as link to microscope hardware\n communicatons_object = self._get_control_software().connection\n experiment_path = communicatons_object.create_experiment_path(\n experiment, self.experiment_folder\n )\n return experiment_path", "def default_agasc_dir():\n if 'AGASC_DIR' in os.environ:\n out = Path(os.environ['AGASC_DIR'])\n else:\n out = Path(os.environ['SKA'], 'data', 'agasc')\n return out", "def get_results_directory(self, high_level_directory):\n batch_num_dir_name = \"BN{}\".format(self.config_manager.get(ConfigManager.CONFIG_BATCH_MO_NUMBER))\n serial_num_dir_name = \"SN{}-{}\".format(self.config_manager.get(ConfigManager.CONFIG_BATCH_MO_NUMBER),\n self.config_manager.get(ConfigManager.CONFIG_SERIAL_NUMBER))\n results_path = os.path.join(high_level_directory, batch_num_dir_name, serial_num_dir_name)\n os.makedirs(results_path, exist_ok=True, mode=0o660)\n \n return results_path", "def tests_path(self, version):\n base = self.version_path(version)\n return osp.normpath(osp.join(base, os.pardir, os.pardir,\n \"share\", \"aster\", \"tests\"))", "def resultFilepath(result):\n basePath = RESULTS_PATH+'/'+result['type']+'/'\n if result['type'] == 'artist':\n return basePath + result['name']['ids']['nameId']+'.json'\n else:\n print '__ERR : type de reponse non pris en charge'", "def dataset_path(cls):\n basepath = os.path.dirname(__file__)\n filepath = os.path.abspath(\n os.path.join(basepath, \"..\", \"datasets\", get_env('DATA_FILENAME')))\n return filepath", "def return_output_path(self):\n # Return the path of the output file\n return os.path.join(self._output_file_location, self._output_filename)", "def test_get_qiime_scripts_dir(self):\r\n # get_qiime_scripts_dir will raise an error if it can't find a scripts\r\n # directory.\r\n scripts_dir = get_qiime_scripts_dir()\r\n self.assertTrue(isdir(scripts_dir), \"The QIIME scripts directory does \"\r\n \"not exist: %s\" % scripts_dir)", "def output_path():\n folder = path.join(path.curdir, \"stages\")\n folder = path.abspath(folder)\n return ensure_path(folder)", "def data_characterization_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / DATA_DIR / \"Characterization\"", "def tmp_dir_path(self) -> str:\n return self._tmp_dir_path", "def test_data_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "def _output_path(name):\n output = Path(\"../Analysis Results/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(f\"{name}.png\")", "def data_characterization_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_characterization_dir(experiment_name) / iteration_csv", "def get_axebindir():\n import sys\n\n if 'axesrc' in sys.modules:\n modfile = sys.modules['axesrc'].__file__\n axebindir = os.path.abspath(os.path.join(os.path.dirname(modfile),'../bin/'))\n\n else:\n from pyraf import iraf\n\n # just read all variables\n all_variables = iraf.getVarDict()\n\n arch = all_variables['arch']\n stsdas = all_variables['stsdas']\n # compose the bin directory\n axebindir = os.path.join(stsdas, 'bin'+arch)\n #axe = all_variables['axe']\n #axebindir = all_variables['axebin']\n\n # compose the bin directory\n #axebindir = os.path.join(axe, 'bin')\n\n # return the path\n return axebindir", "def get_score_dir(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"capt\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut))", "def return_testvideo_path():\r\n\tpath = '{}/Downloads/Test_videos/BigBuckBunny.mp4'.format(os.environ['USERPROFILE'] if os.name == 'nt' else os.environ['HOME'])\r\n\treturn os.path.abspath(path)", "def test_dir():\n return os.path.abspath(os.path.dirname(__file__))", "def get_test_file_path(self):\n xml_file_path_prefix = \"./tests/\"\n return xml_file_path_prefix + self.test_name + \"_data/\"", "def form_results():\n folder_name = \"/{0}_{1}_{2}_{3}_{4}_{5}_autoencoder\". \\\n format(datetime.datetime.now(), z_dim, learning_rate, batch_size, n_epochs, beta1)\n tensorboard_path = results_path + folder_name + '/Tensorboard'\n saved_model_path = results_path + folder_name + '/Saved_models/'\n log_path = results_path + folder_name + '/log'\n if not os.path.exists(results_path + folder_name):\n os.mkdir(results_path + folder_name)\n os.mkdir(tensorboard_path)\n os.mkdir(saved_model_path)\n os.mkdir(log_path)\n return tensorboard_path, saved_model_path, log_path", "def getJobDir(jobName=None):\n if jobName is None:\n jobName = getJobName()\n return os.path.join(os.environ['LCATR_INSTALL_AREA'], jobName,\n os.environ['LCATR_VERSION'])", "def get_report_path(self):\n report_path = os.path.join(logPath, \"report.html\")\n return report_path", "def get_lr_root():\n data_acis_lr = Path(\"data\", \"acis\", \"LoadReviews\")\n path = \"/\" / data_acis_lr\n if not path.exists():\n path = os.environ[\"SKA\"] / data_acis_lr\n if not path.exists():\n raise FileNotFoundError(\"no available ACIS load review directory\")\n return path", "def rdap_info_cache_directory() -> str:\n current_path = Path(__file__).resolve().parent\n return os.path.join(current_path, 'cache', 'rdap')", "def GetResultFile(self):\n\n file_path = self.configfile.map['ResultFilePath']\n\n # Check if several entrie\n if file_path is not None:\n if len(file_path) > 1:\n warning(\n 'Many path for the result file are setted ({}), I will take the first one'\n .format(file_path))\n file_path = file_path[0]\n\n # If the storing file is elsewhere\n if file_path != \"#\":\n sys.path.insert(0, file_path)\n base = DBASE.open('Anna')\n\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None\n\n else:\n base = DBASE.open('Anna')\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None", "def __get_output_dir(self, conf):\n return conf[self.conf_item.get_output_dir()]", "def form_results():\n folder_name = \"/{0}_{1}_{2}_{3}_{4}_{5}_Adversarial_Autoencoder\". \\\n format(time.strftime(\"%Y-%m-%d %Hh%Mm%Ss\", time.localtime()), z_dim,\n learning_rate, batch_size, n_epochs, beta1)\n tensorboard_path = results_path + folder_name + '/Tensorboard'\n saved_model_path = results_path + folder_name + '/Saved_models/'\n log_path = results_path + folder_name + '/log'\n if not os.path.exists(results_path + folder_name):\n os.mkdir(results_path + folder_name)\n os.mkdir(tensorboard_path)\n os.mkdir(saved_model_path)\n os.mkdir(log_path)\n return tensorboard_path, saved_model_path, log_path", "def res_file_directory(username,code,run_number,results_dir=\"results\",res_file_subdir=os.path.join(\"results\",\"res\")):\n\n group_home = os.environ.get(\"GROUP_HOME\")\n if (type(group_home) is not str):\n raise(ValueError(\"Need to set environment variable GROUP_HOME\"))\n\n res_directory = os.path.join(group_home,results_dir,username,code,\"run\"+run_number)\n if (res_file_subdir is not None):\n res_directory = os.path.join(res_directory,res_file_subdir)\n\n return res_directory", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def track_path(self, filename):\n return os.path.join(os.path.dirname(__file__), 'testdata', filename)", "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "def get_sample_data_dir():\n \n return resource_filename('cdat_lite.test.test_cdms', 'sample_data')", "def log_path(self):\n return os.path.join(self._sandbox, 'log')", "def get_output_dir(self):\n return self.output_dir", "def getLogPath(self, folder):\n path = join(self.folder,'experimentLog_0001.txt')\n for f_path in os.listdir(folder):\n if re.search('experimentLog_[0-9]*.txt', f_path):\n path = join(self.folder,f_path)\n break\n\n return path", "def get_output_dir(imdb, net):\n path = os.path.abspath(os.path.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))\n if net is None:\n return path\n else:\n return os.path.join(path, net.name)", "def output_path(self):\n\n output_path = stringify(self._output_path)\n if output_path is None:\n with current_context() as ctx:\n output_path_relative = stringify(self.output_path_relative)\n if output_path_relative is not None:\n output_path = join_path(ctx.paths.output, output_path_relative)\n else:\n output_path = ctx.current.project.get_output_path(self.executor.output_type)\n return output_path", "def _get_project_dir(self):\n return os.path.expanduser(\n self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"project_dir\")\n )\n or os.getcwd()\n )", "def get_pycqed_appdata_dir():\n if os.name == 'nt':\n path = os.path.expandvars(r'%LOCALAPPDATA%\\pycqed')\n else:\n path = os.path.expanduser('~/.pycqed')\n os.makedirs(path, exist_ok=True)\n return path" ]
[ "0.80512667", "0.7191955", "0.68996114", "0.6845597", "0.68339694", "0.66065645", "0.65667313", "0.65667313", "0.65667313", "0.65351224", "0.63939637", "0.6320295", "0.61587936", "0.6123075", "0.60877836", "0.60146594", "0.5972927", "0.5961885", "0.5882054", "0.58809346", "0.5845682", "0.5831248", "0.5815053", "0.57882553", "0.57843715", "0.574206", "0.5716158", "0.5699615", "0.5683362", "0.5673101", "0.56584567", "0.5619277", "0.56152874", "0.559876", "0.55766433", "0.5568953", "0.5566138", "0.5544834", "0.5544834", "0.55414873", "0.55372864", "0.5533345", "0.55184895", "0.5497518", "0.5493019", "0.5490097", "0.5483502", "0.548034", "0.5464465", "0.5458607", "0.54467773", "0.54435253", "0.5434052", "0.542069", "0.5416532", "0.541214", "0.5388817", "0.5386616", "0.537702", "0.5369012", "0.536298", "0.534746", "0.53414756", "0.53385115", "0.5332305", "0.5331681", "0.53314906", "0.5325333", "0.53203815", "0.53184193", "0.5310373", "0.53062963", "0.5300834", "0.5296873", "0.52886343", "0.5284749", "0.5268316", "0.5265501", "0.5263195", "0.5262423", "0.526051", "0.52584326", "0.5256575", "0.5254926", "0.5252969", "0.52506286", "0.5249883", "0.5246273", "0.522035", "0.522035", "0.522035", "0.5219927", "0.5212773", "0.5211824", "0.5210138", "0.5207362", "0.5201455", "0.5187998", "0.5174206", "0.5168886" ]
0.873001
0
estimate an MxF user factor matrix and an FxN item factor matrix from the MxN rating matrix
def factor_mat(all_dat, f_num, iterations, regularization): # get # of users and # of items [u_num, i_num] = all_dat.shape # init user factors and item factors with random values u_fac = np.matrix(np.random.rand(u_num, f_num)) # MxF i_fac = np.matrix(np.random.rand(i_num, f_num)) # NxF # calculate the preference matrix preference = cal_preference(all_dat) # calculate the confidence matrix confidence = cal_confidence(all_dat) # recalculate the user factors and item factors using the alternating least square method for itr in range(iterations): u_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization) #print itr, "u_fac" i_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization) #print itr, "i_fac" # save the output df = pd.DataFrame(u_fac) df.to_csv("tmp/u_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') df = pd.DataFrame(i_fac.T) df.to_csv("tmp/i_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') # an MxF user factor matrix and an FxN item factor matrix return [u_fac, i_fac.T]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_matrix(self):\n\n self.matrix = np.zeros((len(self.users), len(self.items)))\n\n for user in self.train_set['users']:\n for item in self.train_set['feedback'][user]:\n self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \\\n self.train_set['feedback'][user][item]", "def get_user_item_matrix(datafile, user_index, product_index):\n num_users = len(user_index)\n num_items = len(product_index)\n result = np.zeros((num_users, num_items))\n num_reviews = len(datafile)\n result_dense = np.zeros((num_reviews, 3))\n for line in datafile.iterrows():\n i = line[0]\n user_id = datafile['user_id'][i]\n product_id = datafile['business_id'][i]\n user = user_index[user_id]\n product = product_index[product_id]\n rating = datafile['stars'][i]\n result[user, product] = rating\n result_dense[i, 0] = user\n result_dense[i, 1] = product\n result_dense[i, 2] = rating\n return result, result_dense", "def create_user_item_matrix(data,type='unary'): \n if type == 'unary':\n\n # for unary rating drop duplicates\n data = data.drop_duplicates()\n\n # create sparse matrix\n matrix = csr_matrix((data['rating'], (data['user_id'],data['product_id'])))\n\n # rows and cols with empty values will be dropped (doesnt make any difference in size for sparse matrix, but if later converted to dense, it saves space)\n # get all non empty rows and cols\n rows, cols = matrix.nonzero()\n unique_rows = np.unique(rows)\n unique_cols = np.unique(cols)\n\n # select only rows and cols with values\n matrix = matrix[unique_rows]\n matrix = matrix[:,unique_cols]\n\n return matrix\n\n if type == 'count':\n\n # create sparse matrix with counted ratings\n matrix = csr_matrix((data['rating'], (data['user_id'],data['product_id'])))\n\n # rows and cols with empty values will be dropped (doesnt make any difference in size for sparse matrix, but if later converted to dense, it saves space)\n # get all non empty rows and cols\n rows, cols = matrix.nonzero()\n unique_rows = np.unique(rows)\n unique_cols = np.unique(cols)\n\n # select only rows and cols with values\n matrix = matrix[unique_rows]\n matrix = matrix[:,unique_cols]\n\n '''\n Im Gegensatz zum Memory Based approach, muss beim Model Based Approach noch das Rating angepasst werden. \n Das heisst, dass Produkte die viel eingekauft wurden ein höhers Rating erhalten und solche die weniger \n eingekauft wudren ein tieferes. Gleichzeitig muss das Maximum limitiert werden. Dies wird mittels \n dem max_count integer in der Funktion bewerkstelligt\n '''\n\n # create diagonal Matrix with 1 divided by maximum values per row. This needs to be done because there is no divide function for csr matrices implemented\n # source: https://stackoverflow.com/questions/42225269/scipy-sparse-matrix-division\n diag = diags(1/matrix.max(axis=1).A.ravel())\n\n # multipy the matrix with the maximum values to get range from 0-1\n matrix = diag.dot(matrix)\n\n # sort indices; not really needed, just cosmetics\n matrix.sort_indices()\n\n return matrix", "def example():\n num_user, num_item, ratings = build_ticket()\n \n # suffle_data\n np.random.shuffle(ratings)\n\n # split data to training & validation\n train_pct = 0.9\n train_size = int(train_pct * len(ratings))\n train = ratings[:train_size]\n validation = ratings[train_size:]\n\n # params\n num_feature = 5\n bmf_model = BayesianMatrixFactorization(\n num_user, num_item, num_feature, train, validation, max_rating=1, min_rating=0, ratingsMatirx=ratings)\n\n start_time = time.clock()\n bmf_model.estimate(10)\n end_time = time.clock()\n \n mat = np.matrix(bmf_model.item_features)\n with open('../data/ticket/item_feature', 'w') as f:\n for line in mat:\n np.savetxt(f, line, fmt='%.5f')\n\n print \"time spend = %.3f\" % (end_time - start_time)\n\n return bmf_model", "def get_user_feature_matrix_p(user_dict, user_index, aspect_index, N, popularity, A_dense, Polarity):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n max = 0\n min = 1000\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n if Polarity == False:\n count_dict[aspect] += 1\n else:\n count_dict[aspect] += review[1]\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n if count > max:\n max = count\n if count < min:\n min = count\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = (((count - min)/(max - min))*5)\n\n if len(popularity) > 0:\n col = np.zeros((len(result), 1))\n result = np.append(result, col, axis=1)\n for i in range(len(result)):\n items = A_dense[A_dense[:, 0] == i][:, 1]\n items = items.astype(int)\n result[i, len(result[1]) - 1] = np.mean(popularity[items, 1])\n return result", "def fit(self, users, items, ratings, test_users=[], test_items=[], test_ratings=[], **kargs):\n global_mean_bias_init = np.float32(np.mean(ratings))\n global_mean_bias_init = 0.01\n self.model = DeepFM_(**self.dfm_params, global_mean_bias_init=global_mean_bias_init, first_half_fit_only_fm=self.first_half_fit_only_fm)\n \n # もし、CTR予測の場合は、y=0のデータをランダム生成する。\n if self.ctr_prediction:\n users = list(users) + list(np.random.choice(list(set(users)), size=len(users)))\n items = list(items) + list(np.random.choice(list(set(items)), size=len(items)))\n ratings = list((np.array(ratings)>0).astype(int)) + [0]*len(ratings)\n test_ratings = list((np.array(test_ratings)>0).astype(int))\n \n Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)\n \n if len(test_users)>0:\n test_Xi, test_Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(test_users, test_items)\n self.model.fit(Xi, Xv, ratings, test_Xi, test_Xv, test_ratings, early_stopping=True)\n else:\n self.model.fit(Xi, Xv, ratings, early_stopping=True, **kargs)\n \n # load data\n self.trained_users = list(set(users))\n self.trained_items = list(set(items))\n self.global_mean = self.model.predict(Xi, Xv).mean()", "def calculate_recommendations(self, vote_list, itemMatch, itemIgnored):\n #print \"--------------------------------------------------\"\n #print \"calculate_recommendations\"\n #print \"--------------------------------------------------\"\n\n # http://www.quuxlabs.com/blog/2010/09/matrix-factorization-a-simple-tutorial-and-implementation-in-python/\n\n # U = np.array('users')\n # D = np.array('video_games')\n\n # R = |U| cross |D|\n\n # We want to discover K latent features\n\n # Find\n # P(a | |U| corss K matrix)\n # Q(a | |D| cross K matrix)\n # Such that their product approximates R\n # R approx= P cross transpose(Q) = hat(R)\n #\n\n # r[i][j] = transpose(p)[i] * q[j]\n # = sum( 1..k, p[i][k] * q[k][j] )\n\n # e[i][j]**2 = (r[i][j] - hat(r)[i][j])**2\n # = (r[i][j] - sum( 1..K, p[i][k] * q[k][j]))**2\n # squared error, estimated rating can be either higher or lower than the real thing\n\n # find the gradient\n # diff(e[i][j]**2, p[i][k]) = -2*(r[i][j] - hat(r)[i][j]) * (q[k][j]) = -2*e[i][j] * q[k][j]\n # diff(e[i][j]**2, q[k][j]) = -2*(r[i][j] - hat(r)[i][j]) * (p[i][k]) = -2*e[i][j] * p[i][k]\n\n # update rules\n # alpha = settings.alpha # learning_rate\n # alpha = 0.0002 # learning_rate\n # p[i][k]' = p[i][k] + alpha * diff(e[i][j]**2, p[i][k])\n # = p[i][k] + 2 * alpha * e[i][j] * q[k][j]\n # q[k][j]' = q[k][j] + alpha * diff(e[i][j]**2, q[k][j])\n # = q[k][j] + 2 * alpha * e[i][j] * p[i][k]\n\n # training data\n # T = (u[i], d[j], r[i][j])\n # np.array()\n\n # iterate until convergance\n # E = sum((u[i], d[j], r[i][j]) in T, e[i][j])\n # = sum((u[i], d[j], r[i][j]) in T, r[i][j]\n # - sum(1..k, p[i][k]*q[k][j]))**2\n\n # regularization\n # beta = 0.02\n # e[i][j]**2 = (r[i][j] - sum(1..K, p[i][j]*q[k][j]))**2\n # + ((beta/2) * sum(1..K, norm(P)**2 + norm(Q)**2))\n #\n # p[i][k]' = p[i][k] + alpha * (2 * e[i][j] * q[k][j] - beta * p[i][k])\n # q[k][j]' = q[k][j] + alpha * (2 * e[i][j] * p[i][k] - beta * q[k][j])\n\n data = np.array(vote_list)\n\n encoder = OneHotEncoder()\n\n users = data[:,0]\n unique_users = list(set(users))\n for i in range(len(users)):\n users[i] = unique_users.index(users[i])\n\n video_games = data[:,1]\n unique_games = list(set(video_games))\n for i in range(len(video_games)):\n video_games[i] = unique_games.index(video_games[i])\n\n ratings = data[:,2]\n M = len(set(video_games))\n N = len(set(users))\n R = np.zeros((N,M))\n for i in range(len(users)):\n user = users[i]\n game = video_games[i]\n rating = ratings[i]\n R[user][game] = rating\n\n K = 2\n\n P = np.random.rand(N,K)\n Q = np.random.rand(M,K)\n\n nP, nQ = self.matrix_factorization(R, P, Q, K)\n nR = np.dot(nP, nQ.T)\n\n itemMatch = {}\n for i in range(N):\n user = unique_users[i]\n itemMatch[user] = []\n for j in range(M):\n if R[i][j] == 0:\n video_game = unique_games[j]\n recommendation = (video_game, nR[i][j])\n itemMatch[user].append(recommendation)\n itemMatch[None] = []\n print 'pmf recommendations', itemMatch.items()\n print '\\n'\n recommendations = itemMatch.items()\n\n # returns\n # [\n # (<user1>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # (<user2>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # ]\n\n return recommendations", "def init_matrix(self, train, num_features):\n user_matrix = np.random.rand(self.num_users, num_features)\n item_matrix = np.random.rand(num_features, self.num_items)\n item_nnz = train.getnnz(axis=0)\n item_sum = train.sum(axis=0)\n item_matrix[0, :] = item_sum / item_nnz\n return user_matrix, item_matrix", "def create_adjust_matrix(self, is_sub: bool):\n matrix = None\n if not is_sub:\n ratings = np.ones_like(self._user, dtype=np.float32)\n matrix = sp.csr_matrix(\n (ratings, (self._user, self._item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n else:\n if self.type == \"ND\":\n drop_user = self.rand_sample(\n self.n_users,\n size=int(self.n_users * self.drop_ratio),\n replace=False,\n )\n drop_item = self.rand_sample(\n self.n_items,\n size=int(self.n_items * self.drop_ratio),\n replace=False,\n )\n R_user = np.ones(self.n_users, dtype=np.float32)\n R_user[drop_user] = 0.0\n R_item = np.ones(self.n_items, dtype=np.float32)\n R_item[drop_item] = 0.0\n R_user = sp.diags(R_user)\n R_item = sp.diags(R_item)\n R_G = sp.csr_matrix(\n (\n np.ones_like(self._user, dtype=np.float32),\n (self._user, self._item),\n ),\n shape=(self.n_users, self.n_items),\n )\n res = R_user.dot(R_G)\n res = res.dot(R_item)\n\n user, item = res.nonzero()\n ratings = res.data\n matrix = sp.csr_matrix(\n (ratings, (user, item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n\n elif self.type == \"ED\" or self.type == \"RW\":\n keep_item = self.rand_sample(\n len(self._user),\n size=int(len(self._user) * (1 - self.drop_ratio)),\n replace=False,\n )\n user = self._user[keep_item]\n item = self._item[keep_item]\n\n matrix = sp.csr_matrix(\n (np.ones_like(user), (user, item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n\n matrix = matrix + matrix.T\n D = np.array(matrix.sum(axis=1)) + 1e-7\n D = np.power(D, -0.5).flatten()\n D = sp.diags(D)\n return D.dot(matrix).dot(D)", "def forward(self, user, item):\n item_emb = self.product_factors(item.view(-1)) + self.product_bias(\n item.view(-1)\n )\n user_emb = self.user_factors(user.view(-1)) + self.user_bias(user.view(-1))\n mat_mult = (item_emb * user_emb).sum(1)\n\n return mat_mult", "def affinity_matrix(test_specs):\n\n np.random.seed(test_specs[\"seed\"])\n\n # uniform probability for the 5 ratings\n s = [(1 - test_specs[\"spars\"]) / test_specs[\"ratings\"]] * test_specs[\"ratings\"]\n s.append(test_specs[\"spars\"])\n P = s[::-1]\n\n # generates the user/item affinity matrix. Ratings are from 1 to 5, with 0s denoting unrated items\n X = np.random.choice(\n test_specs[\"ratings\"] + 1, (test_specs[\"users\"], test_specs[\"items\"]), p=P\n )\n\n Xtr, Xtst = numpy_stratified_split(\n X, ratio=test_specs[\"ratio\"], seed=test_specs[\"seed\"]\n )\n\n return Xtr, Xtst", "def update_model(self):\n for itemidx, itemid in self._allitems.iterrows():\n self.__itemid2idx[str(itemid['itemid'])] = itemidx\n self.__itemidx2id[itemidx] = str(itemid['itemid'])\n for useridx, userid in enumerate(self._uservisits['userid'].unique()):\n self.__userid2idx[str(userid)] = useridx\n self.__useridx2id[useridx] = str(userid)\n userid = self._uservisits['userid'].values\n itemid = self._uservisits['itemid'].values\n rating = self._uservisits['rating'].values\n useridx = [self.__userid2idx[str(int(uid))] for uid in userid]\n itemidx = [self.__itemid2idx[str(int(iid))] for iid in itemid]\n rating = list(map(numpy.double, rating))\n self.__itemuser = csr_matrix((rating, (useridx, itemidx)), shape=(len(set(useridx)), len(set(itemidx))))\n self.__recommender = implicit.als.AlternatingLeastSquares(factors=self.__numtopics)\n self.__recommender.fit(self.__itemuser)", "def recommend_NMF(new_user,movies_num,movies_ratings):\n list_id_movies = movies_ratings['movieId'].unique()\n nmf,Q = load_NMF_model()\n new_user_vector = pd.DataFrame(new_user, index=list_id_movies).T\n new_user_vector_filled = new_user_vector.fillna(3)\n #calculate Matrix P (Genres)\n P = nmf.transform(new_user_vector_filled)\n #make predictions\n predictions = np.dot(P,Q)\n recommendations = pd.DataFrame(predictions.reshape(-1), index=list_id_movies).T\n #Remove already watched movies:\n not_watched_movies_mask = np.isnan(new_user_vector)\n not_watched = recommendations[not_watched_movies_mask]\n\n top_movies_ids = not_watched.T.sort_values(by=[0], ascending=False).index[:movies_num]\n\n Top_recommended = movieId_to_title(top_movies_ids,movies_ratings) \n return Top_recommended", "def fit(self, df):\n\n # generate continuous indices if this hasn't been done\n if self.index2item is None:\n self.set_index(df)\n\n logger.info(\"Collecting user affinity matrix\")\n if not np.issubdtype(df[self.col_rating].dtype, np.number):\n raise TypeError(\"Rating column data type must be numeric\")\n\n # copy the DataFrame to avoid modification of the input\n temp_df = df[[self.col_user, self.col_item, self.col_rating]].copy()\n\n if self.time_decay_flag:\n logger.info(\"Calculating time-decayed affinities\")\n # if time_now is None use the latest time\n if not self.time_now:\n self.time_now = df[self.col_timestamp].max()\n\n # apply time decay to each rating\n temp_df[self.col_rating] *= exponential_decay(\n value=df[self.col_timestamp],\n max_val=self.time_now,\n half_life=self.time_decay_half_life,\n )\n\n # group time decayed ratings by user-item and take the sum as the user-item affinity\n temp_df = (\n temp_df.groupby([self.col_user, self.col_item]).sum().reset_index()\n )\n else:\n # without time decay use the latest user-item rating in the dataset as the affinity score\n logger.info(\"De-duplicating the user-item counts\")\n temp_df = temp_df.drop_duplicates(\n [self.col_user, self.col_item], keep=\"last\"\n )\n\n logger.info(\"Creating index columns\")\n # map users and items according to the two dicts. Add the two new columns to temp_df.\n temp_df.loc[:, self.col_item_id] = temp_df[self.col_item].map(self.item2index)\n temp_df.loc[:, self.col_user_id] = temp_df[self.col_user].map(self.user2index)\n\n # retain seen items for removal at prediction time\n self.seen_items = temp_df[[self.col_user_id, self.col_item_id]].values\n\n # affinity matrix\n logger.info(\"Building user affinity sparse matrix\")\n self.user_affinity = self.compute_affinity_matrix(\n temp_df, self.n_users, self.n_items\n )\n\n # calculate item co-occurrence\n logger.info(\"Calculating item co-occurrence\")\n item_cooccurrence = self.compute_coocurrence_matrix(\n temp_df, self.n_users, self.n_items\n )\n\n # free up some space\n del temp_df\n\n self.item_frequencies = item_cooccurrence.diagonal()\n\n logger.info(\"Calculating item similarity\")\n if self.similarity_type == sar.SIM_COOCCUR:\n self.item_similarity = item_cooccurrence\n elif self.similarity_type == sar.SIM_JACCARD:\n logger.info(\"Calculating jaccard\")\n self.item_similarity = jaccard(item_cooccurrence).astype(\n df[self.col_rating].dtype\n )\n elif self.similarity_type == sar.SIM_LIFT:\n logger.info(\"Calculating lift\")\n self.item_similarity = lift(item_cooccurrence).astype(\n df[self.col_rating].dtype\n )\n else:\n raise ValueError(\n \"Unknown similarity type: {0}\".format(self.similarity_type)\n )\n\n # free up some space\n del item_cooccurrence\n\n logger.info(\"Done training\")", "def fit_data(self, matrix, user_features=None, item_features=None):\r\n matrix.sort_index(inplace=True)\r\n matrix.sort_index(inplace=True, axis=1)\r\n dataset = Dataset()\r\n dataset.fit((x for x in matrix.index),\r\n (x for x in matrix.columns))\r\n interactions = pd.melt(matrix.replace(0, np.nan).reset_index(),\r\n id_vars='index',\r\n value_vars=list(matrix.columns[1:]),\r\n var_name='plu_id',\r\n value_name='rating').dropna().sort_values('index')\r\n interactions.columns = ['crd_no', 'plu_id', 'rating']\r\n self.interactions, self.weights = dataset.build_interactions([tuple(x) for x in interactions.values])\r\n\r\n if user_features is not None:\r\n user_features.sort_index(inplace=True)\r\n dataset.fit_partial(users=user_features.index,\r\n user_features=user_features)\r\n self.user_features = dataset.build_user_features(\r\n ((index, dict(row)) for index, row in user_features.iterrows()))\r\n else:\r\n self.user_features = None\r\n if item_features is not None:\r\n item_features.sort_index(inplace=True)\r\n dataset.fit_partial(items=item_features.index,\r\n item_features=item_features)\r\n self.item_features = dataset.build_item_features(\r\n ((index, dict(row)) for index, row in item_features.iterrows()))\r\n else:\r\n self.item_features = None", "def __init__(self, ratings, rank, user_reg_loss, item_reg_loss):\n self._ratings = ratings\n self._users_num, self._items_num = ratings.shape\n self._rank = rank\n self._user_reg_loss = user_reg_loss\n self._item_reg_loss = item_reg_loss\n \n self._train_function = self._als_partial_step_explicit\n \n # DONE init latent factors for user and item matrix\n # losowo ustalamy inicjalne wartości X i Y\n self._user_factors = np.random.random((self._users_num, rank))\n self._item_factors = np.random.random((self._items_num, rank))", "def predict_rating(user_id,item_id):\n user_preference = latent_user_preferences[user_id]\n item_preference = latent_item_features[item_id]\n return user_preference.dot(item_preference)", "def compute_affinity_matrix(self, df, n_users, n_items):\n\n return sparse.coo_matrix(\n (df[self.col_rating], (df[self.col_user_id], df[self.col_item_id])),\n shape=(n_users, n_items),\n ).tocsr()", "def init_MF(train, num_features):\n num_user = train.shape[1]\n num_item = train.shape[0]\n user_features = np.random.rand(num_features,num_user) # user_features shape (20,943)\n item_features = np.random.rand(num_item, num_features) # item_features shape (1152,20)\n return user_features, item_features", "def feature_matrix(df, user_id=None, item_id=None):\n print(\"get feature matrix\")\n df1 = df.drop_duplicates(subset=['user_id'], keep='first', inplace=False)\n user_x = None\n if user_id is not None:\n user_x = int(np.argwhere(df1['user_id'].values == user_id))\n user_features = df1[['average_stars']].values\n csr_user_features = sparse.csr_matrix(user_features)\n\n df2 = df.drop_duplicates(\n subset=['business_id'],\n keep='first',\n inplace=False)\n item_x = None\n if item_id is not None:\n item_x = int(np.argwhere(df2['business_id'].values == item_id))\n item_features = df2.iloc[:, 10:].values\n\n csr_item_features = sparse.csr_matrix(item_features)\n return csr_user_features, csr_item_features, user_x, item_x", "def simMatrix(self, d = 1/5):\n \n self.fit_baseline(d)\n self.evalBaseline()\n \n \n df_mat = np.array(self.df[[\"user ind\", \"item ind\", \"rating\"]].merge(self.r_b, on = [\"user ind\", \"item ind\"]))\n df_ind = df_mat[:,:2].astype(int)\n df_rat = df_mat[:,2] - df_mat[:,3]\n \n \n self.M = np.zeros((self.n_us, self.n_it))\n \n \n widgets = ['Test: ', Percentage(), ' ', Bar(\"#\"), ' ', ETA()]\n pbar = ProgressBar(widgets = widgets, maxval = self.n_us)\n pbar.start()\n \n for us in self.user_ind:\n it = df_ind[np.where(df_ind[:,0] == us)[0], 1]\n rat1 = df_rat[np.where(df_ind[:,0] == us)[0]]\n self.M[us,it] = rat1\n \n pbar.update(us)\n \n pbar.finish()\n \n #self.M = self.UI.toarray()\n pbar = ProgressBar(widgets = widgets, maxval = self.n_it * (self.n_it - 1) / 2)\n pbar.start()\n \n self.S = np.empty((self.n_it, self.n_it)) * np.nan\n \n for i1 in range(self.n_it):\n # self.S[i1,i1] = 1\n x1 = self.M[:,i1]\n for i2 in range(i1+1,self.n_it):\n x2 = self.M[:,i2]\n I = np.logical_and(x1, x2)\n if (len(I) > 1):\n self.S[i1,i2] = self.S[i2,i1] = Sim.cos2(x1.T[I], self.M[:,i2].T[I])\n \n pbar.update((self.n_it)*(i1+1) - (i1+2)*(i1+1)/2)\n \n pbar.finish()\n \n return self.S", "def score_items(X, U, mu,\n scoremethod='lowhigh',\n missingmethod='none',\n feature_weights=[]):\n\n # Use U to model and then reconstruct the data in X.\n # 1. Project all data in X into space defined by U,\n # then reconstruct it.\n if missingmethod.lower() != 'ignore':\n # All missing values should have been replaced with 0,\n # or non-existent.\n # 1a. Subtract the mean and project onto U\n proj = np.dot(U.T, (X - mu))\n # 1b. Reconstruct by projecting back up and adding mean\n reproj = np.dot(U, proj) + mu\n # 1c. Compute the residual\n #print('X:', X.T)\n #print('reproj:', reproj.T)\n err = X - reproj\n #print('err:', err.T)\n #input()\n \n else:\n # Missing method must be 'ignore' (Brand 2002)\n (err, reproj) = compute_error_with_missing(X, U, mu)\n\n # 2. Compute reconstruction error\n if scoremethod == 'low': # Blank out all errors > 0\n err[err>0] = 0\n elif scoremethod == 'high': # Blank out all errors < 0\n err[err<0] = 0\n else: # default, count everything\n pass\n \n # Weight features if requested\n if len(feature_weights) > 0:\n for i in range(len(feature_weights)):\n err[i,:] = err[i,:] * feature_weights[i]\n\n if missingmethod.lower() == 'ignore':\n # Only tally error for observed features.\n # This means that items with missing values are not penalized\n # for those features, which is probably the best we can do.\n scores = np.nansum(np.array(np.power(err, 2)), axis=0)\n else:\n scores = np.sum(np.array(np.power(err, 2)), axis=0)\n\n #print('scores:', scores)\n #print('reproj:', reproj)\n #input()\n return (scores, reproj)", "def ratings_to_matrix(ratings_df, user_col, item_col, rating_col, forced_shape=None):\n users_num = ratings_df.user_id.max() + 1\n items_num = ratings_df.item_id.max() + 1\n \n if forced_shape:\n users_num = max(users_num, forced_shape[0])\n items_num = max(items_num, forced_shape[1])\n \n ratings_mat = np.zeros([users_num, items_num])\n for rating in ratings_df.itertuples():\n ratings_mat[rating[user_col], rating[item_col]] = rating[rating_col]\n \n return ratings_mat", "def __init__(self, user_factors, item_factors):\n self._user_factors = np.copy(user_factors)\n self._item_factors = np.copy(item_factors)\n \n self._users_num = user_factors.shape[0]\n self._items_num = item_factors.shape[0]\n\n assert user_factors.shape[1] == item_factors.shape[1]", "def fit(self, ratings):\n # Training proceeds in 2 steps:\n # 1. Normalize item vectors to be mean-centered and unit-normalized\n # 2. Compute similarities with pairwise dot products\n self._timer = util.Stopwatch()\n\n init_rmat, users, items = matrix.sparse_ratings(ratings)\n n_items = len(items)\n _logger.info('[%s] made sparse matrix for %d items (%d ratings from %d users)',\n self._timer, len(items), init_rmat.nnz, len(users))\n\n rmat, item_means = self._mean_center(ratings, init_rmat, items)\n\n rmat = self._normalize(rmat)\n\n _logger.info('[%s] computing similarity matrix', self._timer)\n smat = self._compute_similarities(rmat)\n\n _logger.info('[%s] got neighborhoods for %d of %d items',\n self._timer, np.sum(np.diff(smat.rowptrs) > 0), n_items)\n\n _logger.info('[%s] computed %d neighbor pairs', self._timer, smat.nnz)\n\n self.item_index_ = items\n self.item_means_ = item_means\n self.item_counts_ = np.diff(smat.rowptrs)\n self.sim_matrix_ = smat\n self.user_index_ = users\n self.rating_matrix_ = init_rmat\n\n return self", "def important_factors_based_on_ratings(data: pd.DataFrame) -> np.ndarray:\n # Turn labels into binary classification for equal class distribution\n data = utils.add_ratings_binary(data)\n # Get feature and label data for classifcation from original dataset\n X, y = utils.get_rating_features_labels(data)\n\n # Grab features from feature matrix\n features = X.columns\n\n # split data into train and test set\n X_train, X_test, y_train, y_test = model_selection.train_test_split(X.values, y, test_size=0.2) \n\n # Instantiate and train xgboost model for rating classfication\n xgb_model = xgboost.XGBClassifier()\n xgb_model.fit(X_train, y_train)\n\n # Grab feature importance scores from trained model\n feature_importance = xgb_model.feature_importances_\n # Find indices of top 2 important features\n top_important_features_ind = np.argpartition(feature_importance, -2)[-2:]\n\n print(f\"The top 2 important features are {features[top_important_features_ind]}\")\n\n return feature_importance", "def get_user_feature_matrix(user_dict, user_index, aspect_index, N):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n count_dict[aspect] += 1\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = 1 + (N - 1) * (2 / (1 + exp(-count)) - 1)\n return result", "def personalization(prediction, n):\n # prediction\n # n top n recommendation\n\n top_n = get_top_n(prediction, n)\n\n rec_dict = {}\n for uid, user_ratings in top_n.items():\n rec_dict[uid] = [iid for (iid, _) in user_ratings]\n\n rec_user_ls = [pred[0] for pred in prediction]\n rec_item_ls = [pred[1] for pred in prediction]\n\n unique_rec_user_ls = np.unique(rec_user_ls)\n unique_rec_item_ls = np.unique(rec_item_ls)\n\n # assign each item with index number\n unique_rec_item_dict = {item: ind for ind,\n item in enumerate(unique_rec_item_ls)}\n\n n_unique_rec_user = len(unique_rec_user_ls)\n n_unique_rec_item = len(unique_rec_item_ls)\n\n # recommended user item matrix\n rec_matrix = np.zeros(shape=(n_unique_rec_user, n_unique_rec_item))\n\n # represent recommended item for each user as binary 0/1\n for user in range(n_unique_rec_user):\n # get userid\n user_id = unique_rec_user_ls[user]\n # get rec item list\n item_ls = rec_dict[user_id]\n\n for item_id in item_ls:\n # get item index\n item = unique_rec_item_dict[item_id]\n rec_matrix[user, item] = 1\n\n # calculate cosine similarity matrix across all user recommendations\n similarity = cosine_similarity(X=rec_matrix, dense_output=False)\n # calculate average of upper triangle of cosine matrix\n upper_right = np.triu_indices(similarity.shape[0], k=1)\n # personalization is 1-average cosine similarity\n score = 1 - np.mean(similarity[upper_right])\n return score", "def update_item_feature(\n train, item_features, user_features, lambda_item,\n nz_item_userindices, I):\n for n, item_n in enumerate(nz_item_userindices):\n nnz_users_per_item = len(item_n[1]) # Number of users who rated item n\n if (nnz_users_per_item == 0): nnz_users_per_item = 1\n # Least squares solution\n A_n = np.dot(user_features[:,item_n[1]], user_features[:,item_n[1]].T) + lambda_item * nnz_users_per_item * I\n V_n = np.dot(user_features[:,item_n[1]], train.T[item_n[1],item_n[0]].todense())\n #if (n%3 == 0): print(\"item_n: {}\".format(item_n[0]), np.linalg.det(A_n))\n if (np.linalg.det(A_n) != 0): item_features.T[:,item_n[0]] = np.linalg.solve(A_n,V_n)\n else: \n A_n[0,0] += 1; A_n[1,1] += 1; A_n[2,2] += 1; A_n[3,3] += 1; A_n[4,4] += 1; A_n[5,5] += 1 # if matrix A_n is singular, slightly modify several values\n item_features.T[:,item_n[0]] = np.linalg.solve(A_n,V_n)", "def item_user_matrix(X):\n X['user_id'] = X['user_id'].astype(\"category\")\n X['song_id'] = X['song_id'].astype(\"category\")\n\n row = X['song_id'].cat.codes.copy()\n col = X['user_id'].cat.codes.copy()\n\n nrow = len(X['song_id'].cat.categories)\n ncol = len(X['user_id'].cat.categories)\n\n item_user = csr_matrix((X['score'], (row, col)), shape=(nrow, ncol))\n\n user = dict(enumerate(X['user_id'].cat.categories))\n user_index = {u: i for i, u in user.items()}\n\n item = dict(enumerate(X['song_id'].cat.categories))\n item_index = {s: i for i, s in item.items()}\n\n return item_user, item_index, user_index", "def fit(self, ratings_mat):\n self.logger.debug(\"starting fit\")\n # self.n = ratings.max()['user']+1\n # self.p = ratings.max()['movie']+1\n self.ratings_mat = ratings_mat\n self.k = ratings_mat.shape[0]//20\n\n #ratings_array = ratings[ratings.columns[:-1].values].values\n\n #self.ratings_mat = np.zeros((self.n, self.p))\n\n #for i, rating in ratings.iterrows():\n # self.ratings_mat[( rating['user'], rating['movie'] )] = rating['rating']\n\n self.cosine_dists = squareform(pdist(ratings_mat, 'cosine'))\n\n #if a user has no ratings data, cosine dist will return a nan. In this case, we assume they are as different as possible, since we cannot predict using those users anyways\n self.cosine_dists = 1 - np.nan_to_num(1 - self.cosine_dists)\n\n self.similarity_ranks = self.cosine_dists.argsort(axis = 1)\n\n # ...\n\n self.logger.debug(\"finishing fit\")\n return(self)", "def FM(user_feature_columns, item_feature_columns, l2_reg_embedding=1e-6, loss_type='softmax', temperature=0.05,\n sampler_config=None, seed=1024,\n ):\n\n embedding_matrix_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding,\n seed=seed,\n seq_mask_zero=True)\n\n user_features = build_input_features(user_feature_columns)\n user_inputs_list = list(user_features.values())\n user_sparse_embedding_list, _ = input_from_feature_columns(user_features,\n user_feature_columns,\n l2_reg_embedding, seed=seed,\n support_dense=False,\n embedding_matrix_dict=embedding_matrix_dict)\n\n item_features = build_input_features(item_feature_columns)\n item_inputs_list = list(item_features.values())\n item_sparse_embedding_list, _ = input_from_feature_columns(item_features,\n item_feature_columns,\n l2_reg_embedding, seed=seed,\n support_dense=False,\n embedding_matrix_dict=embedding_matrix_dict)\n\n user_dnn_input = concat_func(user_sparse_embedding_list, axis=1)\n user_vector_sum = Lambda(lambda x: reduce_sum(x, axis=1, keep_dims=False))(user_dnn_input)\n user_vector_sum = l2_normalize(user_vector_sum)\n\n item_dnn_input = concat_func(item_sparse_embedding_list, axis=1)\n item_vector_sum = Lambda(lambda x: reduce_sum(x, axis=1, keep_dims=False))(item_dnn_input)\n item_vector_sum = l2_normalize(item_vector_sum)\n\n if loss_type == \"logistic\":\n score = inner_product(user_vector_sum, item_vector_sum, temperature)\n output = PredictionLayer(\"binary\", False)(score)\n\n elif loss_type == \"softmax\":\n output = InBatchSoftmaxLayer(sampler_config._asdict(), temperature)(\n [user_vector_sum, item_vector_sum, item_features[sampler_config.item_name]])\n else:\n raise ValueError(' `loss_type` must be `logistic` or `softmax` ')\n\n model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output)\n\n model.__setattr__(\"user_input\", user_inputs_list)\n model.__setattr__(\"user_embedding\", user_vector_sum)\n\n model.__setattr__(\"item_input\", item_inputs_list)\n model.__setattr__(\"item_embedding\", item_vector_sum)\n\n return model", "def getUserItemMatrix(self):\n\t\tdf = self.getrating()\n\n\t\trows_index = df.user_id.unique()\n\t\tcolumn_index = df.venue_id.unique() \n\n\t\trow_len = len(rows_index)\n\t\tcol_len = len(column_index)\n\n\t\tX = lil_matrix((row_len, col_len))\n\t\trow_map = dict(zip(rows_index, range(row_len)))\n\t\tcol_map = dict(zip(column_index, range(col_len)))\n\n\t\t# Get mapping table for rows and columns\n\t\td = {}\n\t\td[\"row\"] = row_map\n\t\td[\"col\"] = col_map\n\n\t\tfor index, row in df.iterrows():\n\t\t\tX[d[\"row\"][row[\"user_id\"]], d[\"col\"][row[\"venue_id\"]]] = row[\"Rating\"]\n\n\t\tX = X.tocsr() # Allow efficient row slicing\n\n\t\treturn [d,X]", "def find_predictions(actives, train_rdd_gbitem_dict, train_rdd_gbuser_dict, num_items):\n active_user = actives[0][0]\n active_item = actives[0][1]\n\n # -----------------------------------\n # train_rdd_gbitem_dict = (item, ([(user,r),(user,r)...],avg_of_item))\n # train_rdd_gbuser_dict = (user, [(item,r),(item,r)...]\n\n if active_user not in train_rdd_gbuser_dict and active_item not in train_rdd_gbitem_dict:\n return (active_user, active_item), 2.5\n\n # all user, ratings that have rated active_item\n if active_item in train_rdd_gbitem_dict:\n active_item_avg = train_rdd_gbitem_dict[active_item][1]\n active_item_dict = dict(train_rdd_gbitem_dict[active_item][0]) # {user: rating, user: rating, ...}\n else:\n # item not found in training set\n # new item problem.\n average_of_user_list = train_rdd_gbuser_dict[active_user]\n average_of_user = sum([x[1] for x in average_of_user_list]) / len(average_of_user_list)\n return (active_user, active_item), average_of_user\n\n # user rated items - all (item, ratings) that the user has rated\n if active_user in train_rdd_gbuser_dict:\n active_user_rated_items = train_rdd_gbuser_dict[active_user] # [(item, rating), (item, rating), ...]\n else:\n # user not found in training set\n # new user problem.\n return (active_user, active_item), train_rdd_gbitem_dict[active_item][1]\n\n similarity_list = []\n for item, rating in active_user_rated_items:\n item_dict = dict(train_rdd_gbitem_dict[item][0])\n item_avg = train_rdd_gbitem_dict[item][1]\n similarity = find_similarity(dict(active_item_dict), active_item_avg, dict(item_dict), item_avg)\n similarity_list.append((rating, similarity))\n\n # Have obtained similarity list for active item and item from the above code.\n # Filter according to a top 'N' items and then take avg rating.\n # similarity_list.sort(key=lambda x: x[1], reverse=True)\n # similarity_list = similarity_list[:len(similarity_list) // 4]\n # similarity_list = [(x[0], x[1]*abs(x[1])**1.5) for x in similarity_list]\n # print(similarity_list)\n pred_rating = find_weighted_average(similarity_list, num_items)\n\n # for i in similarity_list:\n # print(i)\n # print(\"Pred-rating: \", pred_rating)\n\n return (active_user, active_item), pred_rating", "def convert_fidelity_matrix(fidelities, fidelity, sx, factor):\n\n matrix = np.zeros((sx, fidelities - 1))\n for idx in range(0, fidelity):\n matrix[:, idx] = np.ones((sx)) * factor[fidelity]\n\n return matrix", "def train_nmf(movies_ratings):\n \n #pivot the dataframe\n movies_ratings = movies_ratings.pivot_table(index='userId', columns='movieId', values='rating')\n #Fill Nan with 3.0 rating:\n movies_ratings.fillna(3.0, inplace=True)\n\n nmf_model = NMF(\n n_components=20,\n init='random',\n random_state=10,\n max_iter=10000\n )\n nmf_model.fit(movies_ratings)\n\n #save nmf model\n pickle.dump(nmf_model,open(\"models/nmf_model.sav\", 'wb'))", "def metric_test(self):\n k = 10\n latent_factor = 10\n n_users = 10\n n_items = 12\n\n interactions, user_features, item_features = util.generate_dummy_data_with_indicator (num_users=n_users, num_items=n_items, interaction_density=.5)\n print (\"interactiosn shape={}\".format( np.shape(interactions) ))\n print (\"user features shape={}\".format( np.shape(user_features.toarray()) ))\n print (\"item features shape={}\".format( np.shape(item_features.toarray()) ))\n\n model = TensorRec(n_components=latent_factor)\n\n model.fit(interactions, user_features, item_features, epochs=19)\n\n ranks = model.predict_rank(user_features=user_features, item_features=item_features)\n\n print (\"Ranks shape={}\".format(np.shape(ranks)))\n\n self.assertTrue(np.shape(interactions) == np.shape(ranks))\n\n tr_recall_result = eval.recall_at_k(predicted_ranks=ranks, test_interactions=interactions, k=k, preserve_rows=False)\n # print (tr_recall_result.mean())\n\n tr_precision_result = eval.precision_at_k(predicted_ranks=ranks, test_interactions=interactions, k=k, preserve_rows=False)\n # print(tr_precision_result.mean())\n\n # we need csr for interactions data\n interactions_ = interactions.tocsr()\n recall_result = metrics.recall_at_k(ranks, interactions_, k=k, preserve_rows=False)\n # print(recall_result.mean())\n\n precision_result = metrics.precision_at_k(ranks, interactions_, k=k, preserve_rows=False)\n # print (precision_result.mean())\n\n self.assertTrue (tr_recall_result.mean() == recall_result.mean())\n self.assertTrue (tr_precision_result.mean() == precision_result.mean())", "def update_user_feature(\n train, user_features, item_features, lambda_user,\n nz_user_itemindices, I):\n for d, user_d in enumerate(nz_user_itemindices): # iterate over non zero users\n nnz_items_per_user = len(user_d[1]) # Number of items user d has rated\n if (nnz_items_per_user == 0): nnz_items_per_user = 1\n \n # Least squares solution\n A_d = np.dot(item_features[user_d[1]].T, item_features[user_d[1]]) + lambda_user * nnz_items_per_user * I\n V_d = np.dot(item_features[user_d[1]].T, train[user_d[1],user_d[0]].todense())\n user_features[:,user_d[0]] = np.linalg.solve(A_d,V_d)", "def _estimate_ratings(self, U, V):\n Rhat = np.matmul(U, V.transpose()) # estimate R with UV^T\n Rhat = np.where(self.R_missing, 0, Rhat) # fill in missing values of R with 0s\n return Rhat", "def estimate(self, u, j):\n if not (self.trainset.knows_user(u) and self.trainset.knows_item(j)):\n raise PredictionImpossible('User and/or item is unknown.')\n\n u_ratings = self.trainset.ur[u]\n\n if self.weighting == 'linear':\n weight = sum(self.freqs[i, j, self.to_index(r)] for i, r in u_ratings)\n score = sum(self.sums[i, j, self.to_index(r)] for i, r in u_ratings)\n return score / weight\n\n # self.weighting == 'log' or None\n weights = [self.freqs[i, j, self.to_index(r)] for i, r in u_ratings]\n reciprocals = [1 / w if w else 0 for w in weights]\n scores = [self.sums[i, j, self.to_index(r)] for i, r in u_ratings]\n scores = [s * w for s, w in zip(scores, reciprocals)]\n\n if self.weighting is None:\n return np.mean(scores)\n # self.weighting == 'log'\n logs = [np.log(w + 1) if w >= 1 else 0 for w in weights]\n return np.dot(scores, logs) / np.sum(logs)", "def compute_user_user_sim_base_on_common_items(self):\n self.sim_matrix = {}\n for item in self.items.values():\n # convert to list of tuples for indexing\n users = list(item.covered_users.items())\n item_popularity = len(users)\n # iter through all user pairs\n for i in range(len(users)-1):\n for j in range(i+1, len(users)):\n user_A_info, user_B_info = users[i], users[j]\n # remember to update pair wise!\n self.update_user_user_sim(user_A_info, user_B_info,\n item_popularity)\n self.update_user_user_sim(user_B_info, user_A_info,\n item_popularity)", "def compute_pmf(X_inp, rank, c1, c2):\n\n X_arr = []\n u_arr = []\n v_arr = []\n d_arr = []\n\n X = deepcopy(X_inp)\n\n v_init = np.ones((np.shape(X)[1],1))\n\n for i in range(rank):\n X_arr.append(X)\n (d,u,v) = compute_factor(X, v_init, c1, c2)\n\n\n assert abs(npla.norm(u) - 1 ) < 1e-3\n assert abs(npla.norm(v) - 1 ) < 1e-3\n\n d_arr.append(d)\n u_arr.append(u)\n v_arr.append(v)\n\n toSub = np.outer(u,v)\n assert np.shape(toSub) == np.shape(X)\n X -= d * toSub\n\n return (X_arr, u_arr, v_arr, d_arr)", "def item_based_CF(G):\n\n\tcode, pmid = nx.bipartite.sets(G)\n\tX = nx.bipartite.biadjacency_matrix(G,pmid,column_order=code)\n\tmean_X = np.mean(X,axis=0)\n\tadjusted_X = X - mean_X\n\tsimilarity = cosine_similarity(X.T)\n\trating = mean_X + np.dot(adjusted_X,similarity)/np.sum(np.abs(similarity),axis=1)\n\tdf = pd.DataFrame(data=rating,index=pmid,columns=code)\n\n\treturn df", "def predict_rating(self, user_id, item_id):\n user_preference = self.latent_user_preferences[user_id]\n item_feature = self.latent_item_features[item_id]\n return user_preference.dot(item_feature)", "def create_user_item_array(self):\n user_em = self.user_factors.weight.detach()\n item_em = self.product_factors.weight.detach()\n user_b = self.user_bias.weight.detach()\n item_b = self.product_bias.weight.detach()\n\n user_item_array = (item_em + item_b) @ (user_em + user_b).transpose(0, 1)\n preds = self._prob_to_class(user_item_array).numpy()\n\n return preds", "def als(user_ids : numpy.ndarray, item_ids : numpy.ndarray,\n ratings : numpy.ndarray, num_item_factors : int,\n num_users: int, num_items : int, min_r_decrease=0.01,\n max_iterations=200, algorithm=1):\n # allocate \"user_factors\" and \"item_factors\"\n num_user_factors = num_item_factors + 1\n user_factors = numpy.random.uniform(-1, 1, num_users * num_user_factors)\n item_factors = numpy.random.uniform(-1, 1, num_items * num_item_factors)\n\n # argument construction\n user_ids_ptr = user_ids.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n item_ids_ptr = item_ids.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n ratings_length = len(ratings)\n ratings_ptr = ratings.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n user_factors_length = len(user_factors)\n user_factors_ptr = user_factors.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n item_factors_length = len(item_factors)\n item_factors_ptr = item_factors.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n iterations = _dll.als_from_python(\n user_ids_ptr, item_ids_ptr, ratings_length, ratings_ptr,\n num_item_factors, user_factors_length, user_factors_ptr,\n item_factors_length, item_factors_ptr, ctypes.c_double(min_r_decrease),\n max_iterations, algorithm)\n\n return user_factors, item_factors, iterations", "def NMF(model, maxIter=100, beliefs=None, verbose=False):\n if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]\n \n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter 0: \"+str(lnZ))\n\n for t in xrange(1,maxIter+1): # for each iteration:\n # Update all the beliefs via coordinate ascent:\n for Xi in model.X: # for each variable, \n bNew = 0.0 # compute E[ log f ] as a function of Xi:\n for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:\n m = f.log() # E[log f_a] = \\sum \\log f_a \\prod b_v\n for v in f.vars - [Xi]: m *= beliefs[v]\n bNew += m.marginal([Xi]) # sum them up to get E[log f]\n bNew -= bNew.max() # (numerical issues)\n bNew = bNew.exp()\n bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z\n beliefs[Xi] = bNew\n #\n # Compute the lower bound on the partition function:\n # E_b [ log f ] + H(b) = \\sum_a E[log f_a] + \\sum_i H(b_i) for independent beliefs\n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter \"+str(t)+\": \"+str(lnZ))\n return lnZ,beliefs", "def final_recommendation_score_matrix_helper_for_different_knn_types(self, type_of_method, index_matrix, value_matrix):\n if type_of_method == \"user\":\n for i in range(index_matrix.shape[1]):\n temp = index_matrix[:,i].astype(int)\n k_similar_users_profiles = self.utility_matrix[temp]\n k_similar_users_similarity_values = value_matrix[:,i]\n self.final_score_matrix[i,:] = np.dot(k_similar_users_similarity_values,k_similar_users_profiles)\n elif type_of_method == \"item\":\n for i in range(index_matrix.shape[1]):\n temp = index_matrix[:,i].astype(int)\n k_similar_item_profiles = self.utility_matrix.T[temp]\n k_similar_item_similarity_values = value_matrix[:,i]\n self.final_score_matrix[:,i] = np.dot(k_similar_item_similarity_values,k_similar_item_profiles)", "def get_users_features_matrix(games_features_matrix: csr_matrix, users_games_matrix: csr_matrix) -> csr_matrix:\n logging.getLogger(__name__).debug('Users features matrix calculating...')\n users_features_matrix = users_games_matrix * games_features_matrix\n logging.getLogger(__name__).debug('users_features_matrix.shape: ' + str(users_features_matrix.shape))\n return users_features_matrix", "def NMF(X,r):\n nmf_mdl = nmf.NMF(X,num_bases=r)\n nmf_mdl.initialization()\n nmf_mdl.factorize()\n return nmf_mdl.W,nmf_mdl.H,np.dot(nmf_mdl.W,nmf_mdl.H)", "def fit(self, data, num_features, lambda_user, lambda_item, gamma):\n user_matrix, item_matrix = self.init_matrix(data, num_features)\n nnz_users, nnz_items = data.nonzero()\n nnz_data = list(zip(nnz_users, nnz_items))\n for it in tqdm(range(self.num_epochs)):\n gamma /= 1.1\n np.random.shuffle(nnz_data)\n for u, i in nnz_data:\n user = user_matrix[u, :]\n item = item_matrix[:, i]\n err = data[u, i] - user @ item\n user_matrix[u, :] += gamma * (err * item - lambda_user * user)\n item_matrix[:, i] += gamma * (err * user - lambda_item * item)\n\n self.user_matrix = user_matrix\n self.item_matrix = item_matrix", "def evaluate_hmdb51_fusion():\n vlen = 0\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_hmdb51_pooled_python/'\n fv_root = '/home/syq/research_final/data/dense-traj/fv_hmdb51_python/'\n hmdb_splits = 'testTrainMulti_7030_splits/'\n categories = os.listdir(fv_root)\n weight = 1.0\n weights = [i / 20.0 for i in range(21)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(3)\n for splitnum in range(1,4):\n ts = time.time()\n trainfiles, testfiles = hmdb51_splits.loadsplit(categories,\n hmdb_splits,\n splitnum)\n print 'Have %d train files' % len(trainfiles)\n print 'Have %d test files' % len(testfiles)\n\n if not vlen:\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(trainfiles[0][0][:-4],\n ob_suffix)),\"rb\")\n vlen_ob = len(np.load(fp))\n fp.close()\n print \"OB vector length is %d\" % vlen_ob\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(trainfiles[0][0][:-4],\n fv_suffix)),\"rb\")\n vlen_fv = len(np.load(fp))\n fp.close()\n print \"IDTFV vector length is %d\" % vlen_fv\n\n Dtrain_ob = np.zeros( (len(trainfiles),vlen_ob), np.float32 )\n Dtrain_fv = np.zeros( (len(trainfiles),vlen_fv), np.float32 )\n\n Ytrain = np.ones ( (len(trainfiles) )) * -1000\n\n for fi,f in enumerate(trainfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtrain_ob[fi][:] = np.load(fp)\n fp.close()\n Ytrain[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtrain_fv[fi][:] = np.load(fp)\n fp.close()\n\n Dtest_ob = np.zeros( (len(testfiles),vlen_ob), np.float32 )\n Dtest_fv = np.zeros( (len(testfiles),vlen_fv), np.float32 )\n\n Ytest = np.ones ( (len(testfiles) )) * -1000\n\n for fi,f in enumerate(testfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtest_ob[fi][:] = np.load(fp)\n fp.close()\n Ytest[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtest_fv[fi][:] = np.load(fp)\n fp.close()\n\n \"\"\"\n Early fusion\n Dtrain = np.hstack((Dtrain_ob, Dtrain_fv))\n Dtest = np.hstack((Dtest_ob, Dtest_fv))\n\n clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n acc = clf.fit(Dtrain, Ytrain).score(Dtest, Ytest)\n \"\"\"\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # Late fusion\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', splitnum, 'late fusion acc', acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[splitnum-1] = acc\n acc_to_weights[weight] = accs\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"hmdb51_weight_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights\n\n \"\"\"\n with open('fv_hmdb51_accs.txt', 'w') as f:\n f.write(\"%s\\nMean:%.3f\" % (str(accs), np.mean(accs)))\n \"\"\"", "def mrr(self):\n _test = self.drop_bad_ratings()\n merged = pd.merge(left=_test, right=self.predict, on=['user', 'item'], how='right')[\n ['user', 'item', 'rating_x', 'rating_y']]\n nott = np.vectorize(lambda x: not x)\n mrrs = []\n for user in merged.user.unique():\n frame = merged[merged.user == user].sort_values(by='rating_y', ascending=False)\n true_ratings = frame.rating_x.values\n positions = np.where(nott(np.isnan(true_ratings)))[0]\n if len(positions) > 0:\n mrrs.append(1 / (positions[0] + 1))\n else:\n mrrs.append(0)\n\n return sum(mrrs) / len(mrrs)", "def feature_processing(array2d):\n new_array2d = np.zeros([array2d.shape[0], 29])\n # items/ orders\n new_array2d[:, 0] = array2d[:, 4] / array2d[:, 3]\n # cancels / orders\n new_array2d[:, 1] = array2d[:, 5] / array2d[:, 3]\n # returns / items\n new_array2d[:, 2] = array2d[:, 6] / array2d[:, 4]\n # voucher / orders\n new_array2d[:, 3] = array2d[:, 10] / array2d[:, 3]\n # female_items / female_items + male_items\n new_array2d[:, 4] = array2d[:, 15] / ([1 if x == 0 else x for x in (array2d[:, 15] + array2d[:, 16])])\n # male_items / female_items + male_items\n new_array2d[:, 5] = array2d[:, 16] / ([1 if x == 0 else x for x in (array2d[:, 15] + array2d[:, 16])])\n # unisex_items / items\n new_array2d[:, 6] = array2d[:, 17] / array2d[:, 4]\n # wapp_items / items\n new_array2d[:, 7] = array2d[:, 18] / array2d[:, 4]\n # wftw_items / items\n new_array2d[:, 8] = array2d[:, 19] / array2d[:, 4]\n # mapp_items / items\n new_array2d[:, 9] = array2d[:, 20] / array2d[:, 4]\n # wacc_items / items\n new_array2d[:, 10] = array2d[:, 21] / array2d[:, 4]\n # macc_items / items\n new_array2d[:, 11] = array2d[:, 22] / array2d[:, 4]\n # mftw_items / items\n new_array2d[:, 12] = array2d[:, 23] / array2d[:, 4]\n # wspt_items / items\n new_array2d[:, 13] = array2d[:, 24] / array2d[:, 4]\n # mspt_items / items\n new_array2d[:, 14] = array2d[:, 25] / array2d[:, 4]\n # curvy_items / items\n # Curvy item has a strong correlation with gender, however they are very right-skewed use np.power(1/6) to smooth it\n new_array2d[:, 15] = np.power(array2d[:, 26] / array2d[:, 4], 1 / 6)\n # sacc_items / items\n new_array2d[:, 16] = array2d[:, 27] / array2d[:, 4]\n # msite_orders / orders\n new_array2d[:, 17] = array2d[:, 28] / array2d[:, 3]\n # desktop_orders / orders\n new_array2d[:, 18] = array2d[:, 29] / array2d[:, 3]\n # android_orders / orders\n new_array2d[:, 19] = array2d[:, 30] / array2d[:, 3]\n # ios_orders / orders\n new_array2d[:, 20] = array2d[:, 31] / array2d[:, 3]\n # other_device_orders / orders\n new_array2d[:, 21] = array2d[:, 32] / array2d[:, 3]\n # work_orders / orders\n new_array2d[:, 22] = array2d[:, 33] / array2d[:, 3]\n # home_orders / orders\n new_array2d[:, 23] = array2d[:, 34] / array2d[:, 3]\n # parcelpoint_orders / orders\n new_array2d[:, 24] = array2d[:, 35] / array2d[:, 3]\n # other_collection_orders / orders\n new_array2d[:, 25] = array2d[:, 36] / array2d[:, 3]\n # average_discount_onoffer\n new_array2d[:, 26] = array2d[:, 39]\n # average_discount_used\n new_array2d[:, 27] = array2d[:, 40]\n # revenue / order\n new_array2d[:, 28] = array2d[:, 41] / array2d[:, 3]\n\n # normalize by each feature\n new_array2d = normalize(new_array2d, axis=0, norm='max')\n return new_array2d", "def __init__(\n self,\n n_users,\n n_products,\n n_factors=20,\n optimizer=torch.optim.SGD,\n lr=0.001,\n l2=0,\n momentum=0,\n loss_fn=nn.BCEWithLogitsLoss,\n activation=nn.Sigmoid,\n ):\n super(NNMatrixFactorization, self).__init__()\n\n self.l2 = l2\n self.lr = lr\n self.momentum = momentum\n self.user_factors = ScaledEmbedding(n_users, n_factors)\n self.product_factors = ScaledEmbedding(n_products, n_factors)\n self.user_bias = ZeroEmbedding(n_users, 1)\n self.product_bias = ZeroEmbedding(n_products, 1)\n\n self.activation = activation()\n self.loss_fn = loss_fn()\n self.optimizer = optimizer(\n self.parameters(), lr=self.lr, weight_decay=self.l2, momentum=self.momentum\n )", "def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2", "def predict(user_id, item_id, ratings):\n\n # convert long to wide\n ratings_wide = ratings.pivot(index='user', columns='movie', values='rating')\n\n # compute user similarities\n similarities = compute_similarities(user_id, ratings_wide)\n \n prediction = predict_rating(item_id, ratings_wide, similarities, N=N_NEIGHBORS)\n \n return prediction", "def __factor_matrix(self, R, K, alpha, steps, beta, error_limit):\n # Transform regular array to numpy array\n R = numpy.array(R)\n\n # Generate P - N x K\n # Use random values to start. Best performance\n N = len(R)\n M = len(R[0])\n P = numpy.random.rand(N, K)\n\n # Generate Q - M x K\n # Use random values to start. Best performance\n Q = numpy.random.rand(M, K)\n Q = Q.T\n\n error = 0\n\n # iterate through max # of steps\n for step in xrange(steps):\n\n # iterate each cell in r\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # get the eij (error) side of the equation\n eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])\n\n for k in xrange(K):\n # (*update_rule) update pik_hat\n P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])\n\n # (*update_rule) update qkj_hat\n Q[k][j] = Q[k][j] + alpha * ( 2 * eij * P[i][k] - beta * Q[k][j] )\n\n # Measure error\n error = self.__error(R, P, Q, K, beta)\n\n # Terminate when we converge\n if error < error_limit:\n break\n\n # track Q, P (learned params)\n # Q = Products x feature strength\n # P = Users x feature strength\n self.Q = Q.T\n self.P = P\n\n self.__print_fit_stats(error, N, M)", "def recommend(self, user_id, N=10):\n scores = self.user_factors[user_id] @ self.product_factors.T\n best = np.argpartition(scores, -N)[-N:]\n return sorted(zip(best, scores[best]), key=lambda x: -x[1])", "def weighted_majority_vote(c_pred,m_pred,f_pred,acc_c,acc_m,acc_f, dataset):\n c,m,f = np.argmax(c_pred),np.argmax(m_pred),np.argmax(f_pred)\n coarse = np.zeros(2)\n middle = np.zeros(4)\n fine = np.zeros(10)\n\n if dataset == 'cifar10':\n middle = np.zeros(5)\n coarse[c] = 1\n middle[m] = 1\n fine[f] = 1\n res = np.zeros(10)\n w1 = np.log(acc_c/(1.-acc_c))\n w2 = np.log(acc_m/(1.-acc_m))\n w3 = np.log(acc_f/(1.-acc_f))\n if dataset == 'cifar10':\n for i in range(10):\n if i <2:\n res[i] = w1*coarse[0] + w2*middle[0] + w3*fine[i]\n elif 2<=i <4:\n res[i] = w1*coarse[0] + w2*middle[1] + w3*fine[i]\n elif 4 <=i<6:\n res[i] = w1*coarse[1] + w2*middle[2] + w3*fine[i]\n elif 6<=i<8:\n res[i] = w1*coarse[1] + w2*middle[3] + w3*fine[i]\n else:\n res[i] = w1*coarse[1] + w2*middle[4] + w3*fine[i]\n else :\n for i in range(10):\n if i <3:\n res[i] = w1*coarse[0] + w2*middle[0] + w3*fine[i]\n elif 3<=i <5:\n res[i] = w1*coarse[0] + w2*middle[1] + w3*fine[i]\n elif 5 <=i<8:\n res[i] = w1*coarse[1] + w2*middle[2] + w3*fine[i]\n else:\n res[i] = w1*coarse[1] + w2*middle[3] + w3*fine[i]\n index = np.argmax(res)\n return(index)", "def _compute_mu_factor2(*input_mols):\n mu_factor = 1\n for mol in input_mols:\n mu_factor *= np.prod(fact(mol))\n return mu_factor", "def update_model(self):\n temp_uservisits = self._uservisits\n unique_users = temp_uservisits['userid'].unique()\n for itemidx, itemid in self._allitems.iterrows():\n self.__itemid2idx[str(itemid['itemid'])] = itemidx\n self.__itemidx2id[itemidx] = str(itemid['itemid'])\n for useridx, userid in enumerate(unique_users):\n self.__userid2idx[str(userid)] = useridx\n self.__useridx2id[useridx] = str(userid)\n useritem = set(temp_uservisits[temp_uservisits['userid'] == userid]['itemid'].astype('str').values)\n allitem = set(self.__itemid2idx.keys())\n itemsnotinuser = allitem - useritem\n self.__useridx2nvitems[useridx] = list(itemsnotinuser)\n temp = pandas.DataFrame([{\"userid\": userid, \"itemid\": t, \"rating\": 0, \"timestamp\": \"NA\"} for t in itemsnotinuser])\n temp_uservisits = pandas.concat([temp_uservisits, temp])\n userid = temp_uservisits['userid'].values\n itemid = temp_uservisits['itemid'].values\n rating = temp_uservisits['rating'].values\n useridx = [self.__userid2idx[str(int(uid))] for uid in userid]\n itemidx = [self.__itemid2idx[str(int(iid))] for iid in itemid]\n model = self.__get_model(num_users=len(temp_uservisits['userid'].unique()), num_items=len(self._allitems),\n mf_dim=self.__numtopics)\n if self.__learner.lower() == \"adagrad\":\n model.compile(optimizer=Adagrad(lr=self.__learningrate), loss='binary_crossentropy')\n elif self.__learner.lower() == \"rmsprop\":\n model.compile(optimizer=RMSprop(lr=self.__learningrate), loss='binary_crossentropy')\n elif self.__learner.lower() == \"adam\":\n model.compile(optimizer=Adam(lr=self.__learningrate), loss='binary_crossentropy')\n else:\n model.compile(optimizer=SGD(lr=self.__learningrate), loss='binary_crossentropy')\n\n for epoch in range(self.__numiterations):\n t1 = time.time()\n hist = model.fit([numpy.array(useridx), numpy.array(itemidx)], numpy.array(rating),\n batch_size=self.__batchsize, nb_epoch=1, verbose=0, shuffle=True)\n t2 = time.time()\n self.__recommender1 = model\n rating = list(map(numpy.double, rating))\n self.__itemuser = csr_matrix((rating, (useridx, itemidx)), shape=(len(set(useridx)), len(set(itemidx))))\n self.__recommender2 = implicit.als.AlternatingLeastSquares(factors=self.__numtopics)\n self.__recommender2.fit(self.__itemuser)", "def create_matrix(ratings_df, jokes_df):\r\n \"\"\" note: empty entries are populated with zeros \"\"\"\r\n\r\n matrix_handler = matrix_object()\r\n\r\n num_joke_features = 5\r\n\r\n ''' add all joke features '''\r\n for row_idx in range(0, jokes_df.shape[0]):\r\n joke_idx = int(jokes_df.iloc[row_idx][\"Idx\"])\r\n isAggressive = jokes_df.iloc[row_idx][\"isAggressive\"]\r\n isIncongruence = jokes_df.iloc[row_idx][\"isIncongruence\"]\r\n generation = jokes_df.iloc[row_idx][\"Generation\"]\r\n isMillenial = (generation == \"Millenial\")\r\n isGenX = (generation == \"Gen X\")\r\n isGenZ = (generation == \"Gen Z\")\r\n\r\n if(int(isMillenial) == 1.0 and int(isGenX) == 1.0):\r\n raise Valueerror()\r\n\r\n matrix_handler.add_value(joke_idx - 1, 0, int(isAggressive))\r\n matrix_handler.add_value(joke_idx - 1, 1, int(isIncongruence))\r\n matrix_handler.add_value(joke_idx - 1, 2, int(isMillenial))\r\n matrix_handler.add_value(joke_idx - 1, 3, int(isGenX))\r\n matrix_handler.add_value(joke_idx - 1, 4, int(isGenZ))\r\n\r\n ''' add all ratings '''\r\n for row_idx in range(0, ratings_df.shape[0]):\r\n for joke_idx in range(1, 122):\r\n col_name = \"joke\" + str(joke_idx)\r\n matrix_handler.add_value(joke_idx - 1, row_idx + num_joke_features, ratings_df.iloc[row_idx][col_name])\r\n\r\n matrix = matrix_handler.compile_matrix()\r\n new_df = matrix_handler.to_df(matrix)\r\n\r\n return matrix, new_df", "def feature_selection_classifier_3(array2d):\n newArray2d = np.zeros([array2d.shape[0], 18])\n # female_items / female_items + male_items\n newArray2d[:, 0] = array2d[:, 4]\n # male_items / female_items + male_items\n newArray2d[:, 1] = array2d[:, 5]\n # wapp_items / items\n newArray2d[:, 2] = array2d[:, 7]\n # wftw_items / items\n newArray2d[:, 3] = array2d[:, 8]\n # mapp_items / items\n newArray2d[:, 4] = array2d[:, 9]\n # wacc_items / items\n newArray2d[:, 5] = array2d[:, 10]\n # macc_items / items\n newArray2d[:, 6] = array2d[:, 11]\n # mftw_items / items\n newArray2d[:, 7] = array2d[:, 12]\n # curvy_items / items\n newArray2d[:, 8] = array2d[:, 15]\n # msite_orders / orders\n newArray2d[:, 9] = array2d[:, 17]\n # desktop_orders / orders\n newArray2d[:, 10] = array2d[:, 18]\n # android_orders / orders\n newArray2d[:, 11] = array2d[:, 19]\n # ios_orders / orders\n newArray2d[:, 12] = array2d[:, 20]\n # other_device_orders / orders\n newArray2d[:, 13] = array2d[:, 21]\n # home_orders / orders\n newArray2d[:, 14] = array2d[:, 23]\n # other_collection_orders / orders\n newArray2d[:, 15] = array2d[:, 25]\n # average_discount_onoffer\n newArray2d[:, 16] = array2d[:, 26]\n # average_discount_used\n newArray2d[:, 17] = array2d[:, 27]\n return newArray2d", "def fit(self):\n\n rmse_old = .0\n for epoch in range(self.epochs):\n error_final = .0\n\n for user, item, feedback in self.feedback_triples:\n pu = self.p[user] + self.y_sum_rows(user)\n\n # Calculate error\n eui = feedback - self._predict_svd_plus_plus_score(user, item, pu, False)\n error_final += (eui ** 2.0)\n\n # update bu and bi\n self.bu[user] += self.bias_learn_rate * (eui - self.delta_bias * self.bu[user])\n self.bi[item] += self.bias_learn_rate * (eui - self.delta_bias * self.bi[item])\n\n # Adjust the factors\n norm_eui = eui / self.n_u[user]\n\n i_f = self.q[item]\n\n # Compute factor updates\n delta_u = np.subtract(np.multiply(eui, i_f), np.multiply(self.delta, self.p[user]))\n self.p[user] += np.multiply(self.learn_rate, delta_u)\n\n delta_i = np.subtract(np.multiply(eui, pu), np.multiply(self.delta, i_f))\n self.q[item] += np.multiply(self.learn_rate, delta_i)\n\n # update y (implicit factor)\n common_update = norm_eui * i_f\n\n for j in self.items_id_seen_by_user[user]:\n delta_y = np.subtract(common_update, self.delta * self.y[j])\n self.y[j] += self.learn_rate * delta_y\n\n rmse_new = np.sqrt(error_final / self.train_set[\"number_interactions\"])\n\n if np.fabs(rmse_new - rmse_old) <= self.stop_criteria:\n break\n else:\n rmse_old = rmse_new", "def ndpm(self):\n\n merged = pd.merge(left=self.test, right=self.predict, on=['user', 'item'], how='inner')[\n ['user', 'rating_x', 'rating_y']]\n ndpms = []\n for user in merged.user.unique():\n frame = merged[merged.user == user]\n if frame.shape[0] <= 1:\n continue\n C_plus = self.num_of_ordered_positive(frame, 'rating_x', 'rating_y')\n C_minus = self.num_of_ordered_negative(frame, 'rating_x', 'rating_y')\n C_u = self.num_of_ordered(frame, 'rating_x')\n if C_u == 0:\n continue\n C_s = self.num_of_ordered(frame, 'rating_y')\n C_u0 = C_u - (C_plus + C_minus)\n ndpms.append(1 - (C_minus + 0.5 * C_u0) / C_u)\n\n return sum(ndpms) / len(ndpms)", "def gen_W(users, items, ratings):\n\n # initiate graph\n user = users.tolist()\n item = items.tolist()\n rating = ratings.tolist()\n B = nx.Graph()\n B.add_nodes_from(user, bipartite=0)\n B.add_nodes_from(item, bipartite=1)\n\n # create edges\n for i in range(len(user)):\n B.add_edges_from([(user[i], item[i])])\n B[user[i]][item[i]]['weight'] = rating[i]\n\n users_unique = users.unique() \n items_unique = items.unique()\n\n # BiAdjacency matrix - for bipartite network\n W = biadjacency_matrix(B, users_unique,items_unique).toarray()\n\n # sparce form of Biadjacency matrix\n W = spa.csr_matrix(W)\n print('Shape of W: '+str(W.shape))\n\n return W, users_unique, items_unique", "def alternate_ls (u_num, Y, P, C, reg):\n\n\t# get # of items/users and # of latent factors\n\t[i_num, f_num] = Y.shape\n\n\t# output buffer\n\tX = np.zeros((u_num, f_num))\n\n\t# precalculate YtY to improve the performance\n\tYtY = Y.T * Y\n\n\t# iterate over each user/item\n\tfor u in range(u_num):\n\n\t\t# store the diagonal elements of the matrix Cu discussed in the paper in a vector\n\t\tCu = C[u,:]\n\n\t\t# store the coresponding row/column of the preference matrix\n\t\tPu = P[u,:]\n\n\t\t# compute Cu-I\n\t\tCu_I = Cu - 1\n\n\t\t# calculate Yt(Cu-I)Y\n\t\tYtCu_IY = np.zeros((f_num, f_num))\n\t\tCuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I\n\t\tfor row in range(f_num):\n\t\t\tfor col in range(f_num):\n\t\t\t\tYtCu_IY[row,col] = Y[:,row].T * CuIY[:,col]\n\t\t\n\t\t# left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1)\n\t\tleft_inv = YtY + YtCu_IY + reg*np.eye(f_num)\n\t\tleft = np.linalg.inv(left_inv)\n\n\t\t# right term : YtCuPu\n\t\tright = Y.T * np.multiply(Cu.T, Pu.T)\n\n\t\t# compute the latent factor of the user/item\n\t\tx = left * right\n\t\t\n\t\t# store it in a matrix\n\t\tX[u,:] = x.T\n\n\t# return an MxF or NxF matrix\n\treturn np.matrix(X)", "def calc_score(self, user_id, item_id): \n p = np.dot(self.U[user_id], self.V[item_id])\n if self.trunc_score_rule==None:pass\n else: p=self.trunc_score_rule(p)\n \n return p", "def evaluate_ucf50_fusion():\n accs = np.zeros(3)\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_ucf50_pooled_python/'\n fv_root = '/home/syq/research_final/data/features/fv_ucf50_python/'\n fv_groups, full, sets = utility.split_data(fv_root,\n suffix=fv_suffix,\n useLooCV=False)\n\n ob_groups, _, _ = utility.split_data(ob_root,\n suffix=ob_suffix,\n useLooCV=False)\n weights = [i / 20.0 for i in range(8, 13)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(2)\n for i in xrange(2):\n ts = time.time()\n Dtrain_fv, Dtest_fv, Ytrain, Ytest = utility.load_groups(\n fv_groups, np.setdiff1d(full, sets[i]),\n sets[i], scale=False, verbose=False)\n\n Dtrain_ob, Dtest_ob, Ytrain, Ytest = utility.load_groups(\n ob_groups, np.setdiff1d(full, sets[i]),\n sets[i], scale=False, verbose=False)\n\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # weighted averaging\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n latefusion_acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', i, 'late fusion acc', latefusion_acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[i] = latefusion_acc\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"fv_ucf50_accs_5fold_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights", "def _predict_user_item(self, user, item):\n if not isinstance(user, int):\n user = self._user_to_ndx[user]\n if not isinstance(item, int):\n item = self._item_to_ndx[item]\n\n try:\n rating_mean = self._averages[user]\n except AttributeError:\n raise RuntimeError('Must fit before predicting')\n\n other_users = [other for other in self._users if other != user and\n np.isfinite(self._votes[other][item])]\n weights = np.array([self._weight(user, other)\n for other in other_users])\n deviations = np.array([self._votes[other][item] - self._averages[other]\n for other in other_users])\n\n weight_sum = np.sum(np.absolute(weights))\n if weight_sum < _EPSILON:\n return rating_mean # No similar users, so guess their avg rating\n\n norm_const = 1 / weight_sum\n\n weighted_avg = np.sum(weights * deviations)\n return rating_mean + norm_const * weighted_avg", "def construct_classifier_for_user(user_indexed_reviews, restaurant_indexed_reviews):\t\n # compute all_reviews\n all_reviews = []\n for restaurant in restaurant_indexed_reviews:\n reviews_content = ''\n for user in restaurant_indexed_reviews[restaurant]:\n reviews = restaurant_indexed_reviews[restaurant][user]\n for review in reviews:\n reviews_content += review['text'][0:len(review['text'])-1]\n all_reviews.append(reviews_content)\n\n print 'extract feature...'\n # construct classifier\n X_total, y_total, restaurant_feature = extracttfidf_user(user_indexed_reviews, all_reviews, restaurant_indexed_reviews)\n\n print 'construct classifier...'\n i = 0\n for user in user_indexed_reviews:\n print i\n i += 1\n classifier = MultinomialNB(alpha=.01)\n X_train = X_total[user]\n y_train = y_total[user]\n if X_train != None:\n try:\n classifier.fit(X_train, y_train)\n update_rating(user, restaurant_feature, classifier, user_indexed_reviews, restaurant_indexed_reviews)\n except:\n continue", "def model_frequency(ref, alt, bases_all_reads, f_vector, error_rates):\n # model Mx: NO alt allele\n if ref == alt:\n return np.array([error_rates[ref][eb] for eb in \"ACGT-\"])\n # model Mxy: two alleles\n mat = np.zeros((K, 5))\n for b, base in enumerate(\"ACGT-\"):\n for i, observed_base in enumerate(bases_all_reads):\n mat[i][b] = error_rates[base][observed_base]\n\n m2 = (mat * f_vector)[:, [bases_index[alt], bases_index[ref]]]\n alt_ref_f_vector = (m2 / m2.sum(axis=1)[:, np.newaxis]).sum(axis=0) / K\n\n l = [alt_ref_f_vector[0], ]*5\n l[bases_index[ref]] = alt_ref_f_vector[1]\n updated_f_vector = np.array(l)\n\n return updated_f_vector", "def __init__(self, customer_vendor_full, valid_rating_mean):\r\n super(User_CF, self).__init__()\r\n self.customer_vendor_full = customer_vendor_full\r\n self.customer_vendor_ratings = self.select_features()\r\n self.customer_vendor_matrix = self.customer_vendor_ratings.pivot(\r\n index='customer_id', columns='vendor_id', values='mean_rating') # (26779, 100)\r\n self.rating_matrix = self.customer_vendor_matrix.fillna(0).values.astype(np.float32)\r\n self.valid_rating_mean = valid_rating_mean\r\n self.vendor2rating = self.get_vendors_mean()\r\n self.customer_similarity, = self.get_similarity()", "def __init__(self, dim=20, nIter=5, lamb=0.05, alph=40,\n user_features=[\"user\"], item_features=[\"item\"]):\n self.setParams(dim,nIter, lamb, alph)\n self.user_features = {}\n self.item_features = {}\n self.factors = {}\n\n self.user_column_names = user_features\n self.item_column_names = item_features", "def evaluate_model(test_user_item_matrix, user_mat, portfolio_mat):\r\n n = np.count_nonzero(~np.isnan(test_user_item_matrix))\r\n\r\n # keep track of the sum of squares\r\n sse = 0\r\n\r\n for user_id in test_user_item_matrix.index:\r\n for offer_id in test_user_item_matrix.columns.values:\r\n if ~np.isnan(test_user_item_matrix.loc[user_id, offer_id]):\r\n pred = predict(test_user_item_matrix, user_mat, portfolio_mat, user_id, offer_id)\r\n if pred:\r\n diff = test_user_item_matrix.loc[user_id, offer_id] - pred\r\n sse += diff ** 2\r\n return sse / n", "def make_predictions(movies, ratings_train, ratings_test):\n ###TODO\n \n user_result = [] \n \n for index,row in ratings_test.iterrows():\n userid_test = row['userId']\n #print(\"userid_test::\",userid_test) \n movieid_test = row['movieId'] \n #print(\"movieid_test::\",movieid_test) \n x = list(movies[movies.movieId==movieid_test]['features'])[0]\n #print(\"CSR_GOTT+X::\",x)\n #print(\"TYPE of CSR_GOTT_X::\",type(x))\n subset_train = ratings_train[ratings_train.userId == userid_test]\n #print(\"SUB MOVIE SET::\",subset_train)\n #print(\"TYPE of SUB MOVIE SET::\",type(x))\n total_if_zero=0\n rating_if_zero=0\n sum_main_result=0\n sum_cosine=0 \n for index1,row1 in subset_train.iterrows():\n userid_train = row1['userId']\n #print(\"userid_train::\",userid_train) \n if(userid_test == userid_train ):\n #print(\"HII IN IFFF:::\")\n movieid_train = row1['movieId']\n #print(\"movieid_train::\",movieid_train)\n rating_train = row1['rating']\n #print(\"rating_train::\",rating_train)\n total_if_zero = total_if_zero + 1 \n rating_if_zero = rating_if_zero + rating_train\n y = list(movies[movies.movieId==movieid_train]['features'])[0]\n #print(\"CSR_GOTT_Y::\",y)\n #print(\"TYPE of CSR_GOTT_Y::\",type(y))\n result_cos = cosine_sim(x,y)\n sum_main_result += result_cos * rating_train\n sum_cosine += result_cos \n \n if(sum_main_result != 0):\n user_result.append(sum_main_result/sum_cosine)\n #print(\"user_result::\",user_result) \n else:\n user_result.append(rating_if_zero / total_if_zero)\n #print(\"user_result::\",user_result) \n \n return_result_arr = np.array(user_result) \n \n return return_result_arr\n \n pass", "def predict(self, user_id, item_id):\n # DONEreturn prediction for given pair\n return self._user_factors[user_id, : ].dot(self._item_factors[item_id, :])", "def get_user_vector(self, user_id):\r\n if user_id in self.df_ratmat.index:\r\n # user from base dataset\r\n return np.array([self.df_ratmat.loc[user_id]])\r\n # construct a vector out of app user rating data\r\n movies_rated = self.get_valid_user_ratings(user_id)\r\n movie_size = self.df_ratmat.shape[1]\r\n cols = [str(i) for i in range(1, movie_size + 1)]\r\n df = pd.DataFrame(columns=cols)\r\n new_row = {}\r\n for i, r in movies_rated[['item_id', 'rating']].iterrows():\r\n new_row[str(int(r['item_id']))] = int(r['rating'])\r\n df = df.append(new_row, ignore_index=True)\r\n # mark 0 (=not rated) if not rated by the user\r\n return df.fillna(0)", "def _factorsX(self, inputs):\n return tensor.dot(inputs[0], self.wxf)", "def print_maxes(mat):\r\n u_to_likes = load_or_create(\"/Matrix/UserIdToLikes.matrix\", create_matrix_user_likes)\r\n dict_names = load_or_create('/DICT/MovieIdToName.dict', create_dict_names)\r\n dict_ecc = load_or_create('/DICT/MovieIdToItemEccentricity.dict', create_dict_ecc)\r\n user_to_ecc = load_or_create('/DICT/UserIdToUserEccentricity.dict',createDictUserIdToUserEccentricity)\r\n dict_userid_to_moviesliked = load_or_create('/DICT/UserIdToLikedMovies.dict', create_dict_user_id_to_liked_items)\r\n\r\n dict_userid_to_recommends = dict()\r\n print(\"STARTING ECC CALC\")\r\n recommends = []\r\n for i in range(int(mat.shape[0]*0.5)):\r\n row = mat.getrow(i)\r\n if len(row.nonzero()[0]) != 0:\r\n # print(u_to_likes.getrow(i).nonzero()[1])\r\n if len(u_to_likes.getrow(i).nonzero()[1])<10 and user_to_ecc[i+1]>0:\r\n # print(\"Amount of recommends:\",len(row.nonzero()[0]))\r\n row = row.toarray()[0].tolist()\r\n max_val = max(val for val in row if str(row.index(val) + 1) not in dict_userid_to_moviesliked[i+1])\r\n print('SUM is:',sum(val for val in row if str(row.index(val) + 1) not in dict_userid_to_moviesliked[i+1]))\r\n print('SUM with all is:',sum(val for val in row))\r\n\r\n index_max=row.index(max_val) + 1\r\n\r\n recommends.append(\r\n [max_val, row.index(max_val) + 1, i + 1, [i + 1 for i in u_to_likes.getrow(i).nonzero()[1]],\r\n [row.index(max_val) + 1],user_to_ecc[i+1]])\r\n\r\n recommends = sorted(recommends, key=itemgetter(0))\r\n\r\n for i in recommends[-100:]:\r\n print(\"MAX id:\", i[1])\r\n print(\"MAX val:\", i[0])\r\n print(\"Users ECC:\",i[5])\r\n print(\"for user:\", i[2])\r\n print(\"MOVIES HE ALREADY LIKED\", 50 * \"=\")\r\n item_names_print(i[3], dict_names, dict_ecc)\r\n print(\"Movie Well recommend:\" + 50 * '*')\r\n item_names_print(i[4], dict_names, dict_ecc)\r\n print(50 * \"#\")", "def test_matrix(self, tol):\n\n res_static = qml.QFT.compute_matrix(2)\n res_dynamic = qml.QFT(wires=[0, 1]).matrix()\n res_reordered = qml.QFT(wires=[0, 1]).matrix([1, 0])\n\n expected = np.array(\n [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.0 + 0.5j, -0.5 + 0.0j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.0 - 0.5j, -0.5 + 0.0j, 0.0 + 0.5j],\n ]\n )\n\n assert np.allclose(res_static, expected, atol=tol, rtol=0)\n assert np.allclose(res_dynamic, expected, atol=tol, rtol=0)\n\n expected_permuted = [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.0 + 0.5j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, -0.0 - 0.5j, 0.0 + 0.5j],\n ]\n assert np.allclose(res_reordered, expected_permuted, atol=tol, rtol=0)", "def calculate_total(self, calcu_user_n=20, user_n=20, item_n=10, seed=1):\n self._split_data(seed=seed)\n self._set_top(user_n=user_n, item_n=item_n)\n test_user_list = list(set(self.test['userId'].unique()))\n user_list = [test_user_list[random.randint(0, len(test_user_list)) - 1]\n for i in range(calcu_user_n)]\n hit = 0 # Hit score\n all_recom = 0 # num of all recommendations, calculate the accuracy rate\n like_item = 0 # num of the item the user likes in the test set, calculate the recall rate\n all_recom_set = set()\n all_item = set(self.train['movieId'].unique())\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate all evaluation indicators...')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user, )\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n like_item += len(user_item)\n all_recom += len(recom_item)\n all_recom_set.update(recom_item)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n print('\\nCalculate over')\n print('Precision is: ', hit / (all_recom * 1.0))\n print('Recall is: ', hit / (like_item * 1.0))\n print('Coverage is: ', len(all_recom_set) / (len(all_item) * 1.0))\n print('Popularity is:', (ret / n * 1.0))", "def prediction(prediction_file_name, clusters_list, svd_use_flag):\n \n coords = misc_functions.getWindowCoords()\n \n test_users = range(coords[0], coords[2] + 1) \n test_items = range(coords[1], coords[3] + 1)\n \n #print \"len(test_users) = \", len(test_users)\n #print \"len(test_items) = \", len(test_items)\n #print \"test_items = \", test_items\n \n # this matrix to be written as result finally\n #misc_functions.step()\n prediction_matrix = zeros((len(test_users), len(test_items)), dtype = float)\n \n training_matrix = scipy.io.mmio.mmread(\"history.mtx\").tocsr()\n \n item_X_meta_matrix = scipy.io.mmio.mmread(\"../../../well_done/items-metas_global.mtx\").toarray()\n \n # getting meta matrices for corresponding using metas\n meta_ctr = 0\n meta_matrices = []\n for meta in METAS_TO_USE:\n if svd_use_flag:\n meta_matrice_file_name = \"users-\" + METAS_TO_USE[meta] + \".svd.mtx\"\n else:\n meta_matrice_file_name = \"users-\" + METAS_TO_USE[meta] + \".mtx\"\n exec(\"meta_matrices.append(scipy.io.mmio.mmread(\\\"\" + meta_matrice_file_name + \"\\\").toarray())\")\n\n #user_counter = 0\n #for user in test_users:\n for cur_cluster in clusters_list:\n \n #print \"cur_cluster[0] = \", cur_cluster[0]\n user = int (cur_cluster[0].split(\"\\t\")[1])\n #print \"user #\", user\n \n #user_metas = {} - changed to list because of problem with dimension\n user_metas = []\n \n values = zeros((len(METAS_TO_USE), len(test_items)), dtype = float)\n meta_ctr = 0\n for meta in METAS_TO_USE:\n \n #print \" meta_matrices = \", meta_matrices\n #print \" meta_matrices[meta_ctr] = \", meta_matrices[meta_ctr]\n user_vector = meta_matrices[meta_ctr][user]\n #print \" user_vector = \", user_vector\n #print \" len(user_metas) = \", len(user_metas)\n #print \" meta_ctr = \", meta_ctr\n #print \"meta = \", meta\n #misc_functions.step()\n \n # normalizing counts of visited metas to use them as weights later\n if max(user_vector) != 0:\n user_metas.append(1.0 * user_vector / max(user_vector))\n else:\n user_metas.append(zeros((len(user_vector), ), dtype = float))\n #print \" user_metas[meta_ctr] = \", user_metas[meta_ctr]\n #print \" user_metas[meta_ctr].shape = \", user_metas[meta_ctr].shape\n \n #for item in test_items:\n for cluster in cur_cluster[1 : ]:\n start_cluster_item = int(cluster.split(\"\\t\")[0])\n stop_cluster_item = int(cluster.split(\"\\t\")[2])\n \n cluster_items = range(start_cluster_item, stop_cluster_item + 1)\n \n for item in cluster_items:\n meta_value = item_X_meta_matrix[item, meta]\n \n # PRICE\n if meta == 8:\n meta_value = priceToPriceCat(meta_value)\n \n # CITY HEURISTIC\n if meta == 11:\n if user_metas[meta_ctr][meta_value - 1] < CITY_TRESHOLD:\n values[:, item - coords[1]] *= CITY_COEF\n \"\"\"\n # DAYTIME\n if meta == 17:\n meta_value = dayTime(meta_value)\n \"\"\"\n \n #print \" meta_value = \", meta_value\n #print \" item = \", item\n #step()\n values[meta_ctr][item - coords[1]] = (user_metas[meta_ctr])[meta_value - 1]\n \n \"\"\"HEURISTICS \"\"\"\n \n \n \n \n \n \"\"\"\\\\ HEURISTICS \"\"\"\n\n meta_ctr += 1\n #print \"values[:, 0:10] = \", values[:, 0:10]\n prediction_vector = numpy.sum(META_WEIGHTS * values, axis = 0)\n #print \"prediction_vector[0:10] = \", prediction_vector[0:10]\n #print \"sum(prediction_vector) = \", sum(prediction_vector)\n prediction_matrix[user - coords[0]] = prediction_vector\n \n #step()\n \n# ===== END OF MAIN CYCLE ===== \n\n result_matrix = scipy.sparse.csr_matrix(prediction_matrix)\n scipy.io.mmio.mmwrite(prediction_file_name, result_matrix, field = 'real', precision = 5)", "def test_init_ratings():\n env = FixedRating(num_users=50,\n num_items=50,\n rating_frequency=1.0,\n num_init_ratings=100)\n env.seed(0)\n _, _, ratings = env.reset()\n assert len(ratings) == 100\n for (user_id, item_id), (rating, context) in ratings.items():\n assert context.shape == (0,)\n assert user_id < 50\n assert item_id < 50\n if rating == 5.0:\n assert item_id >= 25\n else:\n assert item_id < 25", "def proba_fm(m_pred,f_pred, dataset):\n p = np.zeros(10)\n if dataset == 'cifar10':\n for i in range(10):\n if i <4:\n if i <2:\n p[i] = (m_pred[0])*(f_pred[i]/np.sum(f_pred[0:2]))\n else:\n p[i] = (m_pred[1])*(f_pred[i]/np.sum(f_pred[2:4]))\n else:\n if i <6:\n p[i] = (m_pred[2])*(f_pred[i]/np.sum(f_pred[4:6]))\n elif i <8:\n p[i] = (m_pred[3])*(f_pred[i]/np.sum(f_pred[6:8]))\n else:\n p[i] = (m_pred[4])*(f_pred[i]/np.sum(f_pred[8:]))\n else :\n for i in range(10):\n if i <5:\n if i <3:\n p[i] = (m_pred[0])*(f_pred[i]/np.sum(f_pred[0:3]))\n else:\n p[i] = (m_pred[1])*(f_pred[i]/np.sum(f_pred[3:5]))\n else:\n if i <8:\n p[i] = (m_pred[2])*(f_pred[i]/np.sum(f_pred[5:8]))\n else:\n p[i] = (m_pred[3])*(f_pred[i]/np.sum(f_pred[8:]))\n return(p)", "def _create_sparse_train_and_test(ratings, n_users, n_items):\n \n # pick a random set of data as testing data, sorted ascending\n test_set_size = len(ratings) / TEST_SET_RATIO\n test_set_idx = np.random.choice(xrange(len(ratings)), size=test_set_size, replace=False)\n test_set_idx = sorted(test_set_idx)\n \n # use the remaining data to create a training set\n ts_ratings = ratings[test_set_idx]\n tr_ratings = np.delete(ratings, test_set_idx, axis=0)\n \n # create training and test matrices as coo_matrix\n u_tr, i_tr, r_tr = zip(*tr_ratings)\n tr_sparse = coo_matrix((r_tr, (u_tr, i_tr)), shape=(n_users, n_items))\n u_ts, i_ts, r_ts = zip(*ts_ratings)\n test_sparse = coo_matrix((r_ts, (u_ts, i_ts)), shape=(n_users, n_items))\n \n return tr_sparse, test_sparse", "def prediction(uid, pair, rating_bd, sim_bd, item_bd):\n iid, real_rating = pair[0], pair[1]\n if iid not in sim_bd.value.keys():\n return ()\n iid_neighbors = [\n (i[0], i[1], rating_bd.value[i[0]]) for i in sim_bd.value[iid]]\n average_iid_rating = item_bd.value[iid][0]\n sim_rating = []\n for info in iid_neighbors:\n niid, nsim, ratings = info\n sim_rating += [\n (iid, nsim, rating[1] - item_bd.value[niid][0], rating[2])\n for rating in ratings if uid in rating[0]]\n if len(sim_rating) != 0:\n sim_ratings = [\n (line[1] * line[2], abs(line[1]), line[3])\n for line in sim_rating]\n predicted_rating_no_decay = average_iid_rating + sum(\n map(lambda line: line[0], sim_ratings)) / sum(\n map(lambda line: line[1], sim_ratings))\n predicted_rating_decay = \\\n average_iid_rating + add_decay(sim_ratings)\n else:\n predicted_rating_no_decay = average_iid_rating\n predicted_rating_decay = average_iid_rating\n return iid, real_rating, \\\n self.bound_rating(predicted_rating_no_decay), \\\n self.bound_rating(predicted_rating_decay)", "def select_next(X, U, mu,\n scoremethod='lowhigh',\n missingmethod='none',\n feature_weights=[],\n oldscores=[], oldreproj=[]):\n\n print(\"------------ SELECTING --------------\")\n if len(U) == 0:\n printt(\"Empty DEMUD model: selecting item number %d from data set\" % \\\n (opts['iitem']))\n return opts['iitem'], [], []\n\n if X.shape[1] < 1 or len(U) == 0 or len(mu) == 0:\n printt(\"Error: No data in X and/or U and/or mu.\")\n return None, [], []\n\n if X.shape[0] != U.shape[0] or X.shape[0] != mu.shape[0]:\n printt(\"Mismatch in dimensions; must have X mxn, U mxk, mu mx1.\")\n return None, [], []\n\n # If oldscores is empty, compute the score for each item\n if len(oldscores) == 0:\n (scores, reproj) = score_items(X, U, mu, scoremethod, missingmethod)\n elif len(oldreproj) == 0:\n printt(\"Error: oldscores provided, but not oldreproj.\")\n return None, [], []\n else: # both are valid, so use them here\n (scores, reproj) = (oldscores, oldreproj)\n\n # Select and return index of item with max reconstruction error,\n # plus the updated scores and reproj\n m = scores.argmax()\n #print('mu:',mu)\n #print('selected:', X[:,m])\n #print('selected-mu:', (X-mu)[:,m])\n #print('reproj:', reproj[:,m])\n #print('reproj-mu:', (reproj-mu)[:,m])\n #input()\n\n return m, scores, reproj", "def test_conversion():\r\n f1 = factor([0,1],[2,2],scipy.rand(4))\r\n f2 = factor([1,2],[2,2],scipy.rand(4))\r\n f3 = factor([3],[2],scipy.rand(2))\r\n\r\n F = FactorList([f1,f2,f3])\r\n theta = factors2ExpFam(F)\r\n F2 = expfam2Factors(theta)\r\n ratio = F2.JointDistn().val/ (F.JointDistn().val)\r\n ratio = ratio/ratio[0]\r\n print scipy.allclose(ratio,1)", "def get5x5matrix(self): #modified from nxvasc get3x3matrix()\n try:\n i = na.identity(3)\n \n self.d124 = i.copy()\n self.ds124 = na.zeros(124,na.float64)\n \n for k in range(1,124):\n self.d124 = na.concatenate((self.d124,i))\n# print len(self.d124)\n count = 0\n a = []\n for k in range(-2,3):\n for j in range(-2,3):\n for i in range(-2,3):\n if( i != 0 or j != 0 or k != 0 ):\n self.ds124[count] = math.sqrt(i**2+j**2+k**2)\n count += 1\n a.append(i)\n a.append(j)\n a.append(k)\n# print len(a)\n a = na.reshape(na.array(a),(372,1))\n# print len(self.d124)\n self.d124 = na.concatenate((self.d124,a),axis=1)\n except Exception as error:\n print(\"failed in get5x5matrix(): \", error)", "def fit(self, user_x_product, latent_features_guess=2, learning_rate=0.0002, steps=5000, regularization_penalty=0.02, convergeance_threshold=0.001):\n print 'training model...'\n return self.__factor_matrix(user_x_product, latent_features_guess, learning_rate, steps, regularization_penalty, convergeance_threshold)", "def update_factors(self,u,i,j,update_u=True,update_i=True):\n #print(\"session run\")\n loss_v = self.sess.run(self.train_step , feed_dict={\n self.u: u,\n self.i: i,\n self.j: j})\n\n returnText = \"\"\n\n if self.alg_type == \"TFL\" or self.alg_type == \"TFLWM\":\n sum_lambda = 0\n for k in self.sim_matrix_names:\n sum_lambda += abs(self.sim_lambda[k].eval())\n #print(sum_lambda,self.sim_lambda)\n for k in self.sim_matrix_names:\n if math.isnan(sum_lambda):\n print(\"sim_lambda overflow\")\n tf.assign(self.sim_lambda[k], [self.sim_lambda_zero], validate_shape=False).eval()\n returnText = \"sim_lambda overflow\"\n else:\n tf.assign(self.sim_lambda[k], self.sim_lambda[k].eval()/sum_lambda).eval()\n else:\n for k in self.sim_matrix_names:\n val = self.sim_lambda[k].eval()\n if math.isnan(val[0]):\n print(\"sim_lambda overflow\")\n tf.assign(self.sim_lambda[k], [self.sim_lambda_zero], validate_shape=False).eval()\n returnText = \"sim_lambda overflow\"\n if val[0] <= 0.0:\n tf.assign(self.sim_lambda[k], [self.delta], validate_shape=False).eval()\n elif val[0] >= 1.0:\n tf.assign(self.sim_lambda[k], [self.one - self.delta], validate_shape=False).eval()\n\n return returnText", "def calc_adv_U(self):\n num_U = 0\n adv_U = numpy.zeros((3,3), float)\n\n for atm in self:\n ## use the atom's U matrix if it exists, otherwise use the\n ## temperature factor\n\n if atm.U is not None:\n adv_U += atm.U\n num_U += 1\n\n return adv_U / num_U", "def calculate_user_similarity_profile(self, ratings_vector):\r\n num_users, num_movies = self.ratings_matrix.get_shape()\r\n\r\n user_similarities = sp.dok_matrix((1, num_users))\r\n for i in range(num_users):\r\n\r\n user_similarities[0, i] = self.calculate_pairwise_user_similarity(self.ratings_matrix.getrow(i), ratings_vector)\r\n\r\n return user_similarities.tocsr()", "def calc_metrics(metric_scores_list):\n\n N_split, N_miss, N_add, Q_P, Q_R, Q_F, N_gt, N_pred = [], [], [], [], [], [], [], []\n Q_rand, Q_jaccard, Q_aggregated_jaccard, Q_ctc, Q_piou = [], [], [], [], []\n tp, fp, fn = [], [], []\n\n for score in metric_scores_list:\n N_split.append(score['N_split']), N_miss.append(score['N_miss']), N_add.append(score['N_add'])\n Q_P.append(score['Q_P']), Q_R.append(score['Q_R']), Q_F.append(score['Q_F'])\n Q_rand.append(score['Q_rand']), Q_jaccard.append(score['Q_jaccard'])\n Q_aggregated_jaccard.append(score['Q_aggregated_jaccard'])\n if \"Q_ctc\" in score:\n Q_ctc.append(score['Q_ctc']), \n Q_piou.append(score['Q_piou'])\n N_gt.append(score['N_gt']), N_pred.append(score['N_pred'])\n tp.append(score['tp']), fp.append(score['fp']), fn.append(score['fn'])\n\n N_split, N_miss, N_add = np.array(N_split), np.array(N_miss), np.array(N_add)\n N_gt, N_pred = np.array(N_gt), np.array(N_pred)\n tp, fp, fn = np.array(tp), np.array(fp), np.array(fn)\n Q_P_macro, Q_R_macro, Q_F_macro = np.mean(np.array(Q_P)), np.mean(np.array(Q_R)), np.mean(np.array(Q_F))\n Q_P_micro = np.sum(tp) / (np.sum(tp) + np.sum(fp)) if (np.sum(tp) + np.sum(fp)) > 0 else 0\n Q_R_micro = np.sum(tp) / (np.sum(tp) + np.sum(fn)) if (np.sum(tp) + np.sum(fn)) > 0 else 0\n Q_rand_macro, Q_jaccard_macro = np.mean(np.array(Q_rand)), np.mean(np.array(Q_jaccard))\n Q_aggregated_jaccard_macro = np.mean(np.array(Q_aggregated_jaccard))\n Q_ctc_macro, Q_piou_macro = np.mean(np.array(Q_ctc)), np.mean(np.array(Q_piou))\n\n metrics = {\n 'Q_split_micro': float(np.sum(N_split) / np.sum(N_gt)),\n 'Q_split_macro': float(np.mean(N_split / N_gt)),\n 'Q_miss_micro': float(np.sum(N_miss) / np.sum(N_gt)),\n 'Q_miss_macro': float(np.mean(N_miss / N_gt)),\n 'Q_add_micro': float(np.sum(N_add) / np.sum(N_gt)),\n 'Q_add_macro': float(np.mean(N_add / N_gt)),\n 'N_gt': int(np.sum(N_gt)),\n 'N_pred': int(np.sum(N_pred)),\n 'Q_rand_macro': float(Q_rand_macro),\n 'Q_jaccard_macro': float(Q_jaccard_macro),\n 'Q_aggregated_jaccard_macro': float(Q_aggregated_jaccard_macro),\n 'Q_ctc_macro': float(Q_ctc_macro),\n 'Q_piou_macro': float(Q_piou_macro),\n 'Q_P_micro': float(Q_P_micro),\n 'Q_P_macro': float(Q_P_macro),\n 'Q_R_micro': float(Q_R_micro),\n 'Q_R_macro': float(Q_R_macro),\n 'Q_F_macro': float(Q_F_macro),\n 'Q_F_micro': float(2 * Q_P_micro * Q_R_micro / (Q_P_micro + Q_R_micro)) if (Q_P_micro + Q_R_micro) > 0 else 0\n }\n return metrics", "def multiply_fisher_factor(self, vector: jnp.ndarray) -> jnp.ndarray:\n return utils.scalar_mul(\n self.multiply_fisher_factor_unweighted(vector), jnp.sqrt(self.weight))", "def predict(self, u, i, P=None, Q=None, F=None, w=None, user_bias=None, item_bias=None):\n if P is None:\n P = self.P\n if Q is None:\n Q = self.Q\n if F is None:\n F = self.F\n if w is None:\n w = self.w\n if user_bias is None:\n user_bias = self.user_bias\n if item_bias is None:\n item_bias = self.item_bias\n\n known_user = self._known('user', u)\n known_item = self._known('item', i)\n rui_hat = self.mean\n if known_user:\n rui_hat += user_bias[u]\n if known_item:\n rui_hat += item_bias[i]\n if known_user and known_item:\n F_sum = np.sum(F[self.Fr[u], :], axis=0)\n #F_num = np.sqrt(self.Fr[u].shape[0]) # Try without sqrt\n F_num = self.Fr[u].shape[0]\n if F_num > 0:\n F_sum /= F_num\n rui_hat += (F_sum + w * P[u, :]).dot(Q[:, i])\n # Apply potential non-linearity (activation) g\n rui_hat = self.g(rui_hat)\n return rui_hat", "def build_user_user_similarity_matrix(self, event_data):\n self.compute_user_user_sim_base_on_common_items()\n self.standardize_sim_values()", "def test_lu_factor():\n\t#[A, b] = lu_read('test1.txt')\n\t# it is poor form to read an external file into a test function, as above\n\tA = np.array([\n\t\t[ 2., 3., -4., 2.],\n\t\t[-4., -5., 6., -3.],\n\t\t[ 2., 2., 1., 0.],\n\t\t[-6., -7., 14., -4.]])\t\n\tLU,p = lu_factor(A, pivot=False)\n\tLU_soln = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\tassert norm(LU - LU_soln) < 1.e-10\t\n\n\n\t# test 2\n\t[A2, b2] = lu_read('test2.txt')\t\t\t\t\t\t# read a matrix and RHS vector\n\tLU2,p2 = lu_factor(A2) \t\t\t\t\t\t\t\t# change display to False when LU_FACTOR working\n\tLU_soln2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\tassert norm(LU2 - LU_soln2) < 1.e-10" ]
[ "0.6282623", "0.62621325", "0.60587424", "0.6045789", "0.6040702", "0.6002245", "0.5951851", "0.59179777", "0.59059614", "0.58943605", "0.589419", "0.587945", "0.5858747", "0.58264637", "0.5798391", "0.57919794", "0.574544", "0.5737683", "0.5693354", "0.5668427", "0.56560904", "0.565145", "0.5631024", "0.5595122", "0.55878645", "0.5560108", "0.55080867", "0.5492173", "0.5471623", "0.54714626", "0.5467762", "0.54663336", "0.5445127", "0.5433413", "0.5393641", "0.53686476", "0.53676146", "0.53589565", "0.53349036", "0.53346205", "0.53162575", "0.52985704", "0.52616876", "0.5258938", "0.5252249", "0.52483475", "0.52311176", "0.52225137", "0.5220475", "0.52171636", "0.5198918", "0.51938695", "0.5181593", "0.51795137", "0.517728", "0.51635116", "0.5155167", "0.5149963", "0.51439816", "0.51246136", "0.5115969", "0.5105796", "0.5086189", "0.50842553", "0.5078923", "0.5075725", "0.5066789", "0.5055791", "0.505126", "0.5040315", "0.50370866", "0.5033983", "0.5028434", "0.5021411", "0.5020064", "0.5016048", "0.4992419", "0.49900427", "0.49797973", "0.49795952", "0.49742973", "0.49736246", "0.49720296", "0.49668035", "0.49609035", "0.49598455", "0.49560553", "0.49484748", "0.4937664", "0.49338803", "0.4931834", "0.4930841", "0.49298579", "0.49219868", "0.4919247", "0.49165457", "0.49121004", "0.49035972", "0.49034655", "0.49016014" ]
0.72323316
0
calculate the confidence of each useritem pair
def cal_confidence(dat): alpha = 40.0 confidence = np.zeros(dat.shape) confidence = 1 + alpha * dat return np.matrix(confidence)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _predict_user_item(self, user, item):\n if not isinstance(user, int):\n user = self._user_to_ndx[user]\n if not isinstance(item, int):\n item = self._item_to_ndx[item]\n\n try:\n rating_mean = self._averages[user]\n except AttributeError:\n raise RuntimeError('Must fit before predicting')\n\n other_users = [other for other in self._users if other != user and\n np.isfinite(self._votes[other][item])]\n weights = np.array([self._weight(user, other)\n for other in other_users])\n deviations = np.array([self._votes[other][item] - self._averages[other]\n for other in other_users])\n\n weight_sum = np.sum(np.absolute(weights))\n if weight_sum < _EPSILON:\n return rating_mean # No similar users, so guess their avg rating\n\n norm_const = 1 / weight_sum\n\n weighted_avg = np.sum(weights * deviations)\n return rating_mean + norm_const * weighted_avg", "def find_predictions(actives, train_rdd_gbitem_dict, train_rdd_gbuser_dict, num_items):\n active_user = actives[0][0]\n active_item = actives[0][1]\n\n # -----------------------------------\n # train_rdd_gbitem_dict = (item, ([(user,r),(user,r)...],avg_of_item))\n # train_rdd_gbuser_dict = (user, [(item,r),(item,r)...]\n\n if active_user not in train_rdd_gbuser_dict and active_item not in train_rdd_gbitem_dict:\n return (active_user, active_item), 2.5\n\n # all user, ratings that have rated active_item\n if active_item in train_rdd_gbitem_dict:\n active_item_avg = train_rdd_gbitem_dict[active_item][1]\n active_item_dict = dict(train_rdd_gbitem_dict[active_item][0]) # {user: rating, user: rating, ...}\n else:\n # item not found in training set\n # new item problem.\n average_of_user_list = train_rdd_gbuser_dict[active_user]\n average_of_user = sum([x[1] for x in average_of_user_list]) / len(average_of_user_list)\n return (active_user, active_item), average_of_user\n\n # user rated items - all (item, ratings) that the user has rated\n if active_user in train_rdd_gbuser_dict:\n active_user_rated_items = train_rdd_gbuser_dict[active_user] # [(item, rating), (item, rating), ...]\n else:\n # user not found in training set\n # new user problem.\n return (active_user, active_item), train_rdd_gbitem_dict[active_item][1]\n\n similarity_list = []\n for item, rating in active_user_rated_items:\n item_dict = dict(train_rdd_gbitem_dict[item][0])\n item_avg = train_rdd_gbitem_dict[item][1]\n similarity = find_similarity(dict(active_item_dict), active_item_avg, dict(item_dict), item_avg)\n similarity_list.append((rating, similarity))\n\n # Have obtained similarity list for active item and item from the above code.\n # Filter according to a top 'N' items and then take avg rating.\n # similarity_list.sort(key=lambda x: x[1], reverse=True)\n # similarity_list = similarity_list[:len(similarity_list) // 4]\n # similarity_list = [(x[0], x[1]*abs(x[1])**1.5) for x in similarity_list]\n # print(similarity_list)\n pred_rating = find_weighted_average(similarity_list, num_items)\n\n # for i in similarity_list:\n # print(i)\n # print(\"Pred-rating: \", pred_rating)\n\n return (active_user, active_item), pred_rating", "def predict(self, user_id, item_id):\n # DONEreturn prediction for given pair\n return self._user_factors[user_id, : ].dot(self._item_factors[item_id, :])", "def predict_rating(user_id,item_id):\n user_preference = latent_user_preferences[user_id]\n item_preference = latent_item_features[item_id]\n return user_preference.dot(item_preference)", "def predict_rating(self, user_id, item_id):\n user_preference = self.latent_user_preferences[user_id]\n item_feature = self.latent_item_features[item_id]\n return user_preference.dot(item_feature)", "def calc_score(self, user_id, item_id): \n p = np.dot(self.U[user_id], self.V[item_id])\n if self.trunc_score_rule==None:pass\n else: p=self.trunc_score_rule(p)\n \n return p", "def compute_user_user_sim_base_on_common_items(self):\n self.sim_matrix = {}\n for item in self.items.values():\n # convert to list of tuples for indexing\n users = list(item.covered_users.items())\n item_popularity = len(users)\n # iter through all user pairs\n for i in range(len(users)-1):\n for j in range(i+1, len(users)):\n user_A_info, user_B_info = users[i], users[j]\n # remember to update pair wise!\n self.update_user_user_sim(user_A_info, user_B_info,\n item_popularity)\n self.update_user_user_sim(user_B_info, user_A_info,\n item_popularity)", "def getConfidence(self,LeftTup,RightTup):\n\n tup=LeftTup+RightTup\n _intersection=self.getSupport(tup)\n _LHS=self.getSupport(LeftTup)\n _confidence=_intersection/_LHS\n return (_confidence)", "def precision(self, user_list):\n hit = 0\n all_recom = 0\n print('Calculate precision: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n all_recom += len(recom_item)\n print('\\nprecision is: ', hit / (all_recom * 1.0))\n return hit / (all_recom * 1.0)", "def inner_product(user_profiles, item_attributes, normalize_users=True, normalize_items=False):\n if normalize_users:\n # this is purely an optimization that prevents numpy from having\n # to multiply huge numbers\n user_profiles = normalize_matrix(user_profiles, axis=1)\n if normalize_items:\n item_attributes = normalize_matrix(item_attributes.T, axis=1).T\n assert user_profiles.shape[1] == item_attributes.shape[0]\n scores = np.dot(user_profiles, item_attributes)\n return scores", "def prediction(uid, pair, rating_bd, sim_bd, item_bd):\n iid, real_rating = pair[0], pair[1]\n if iid not in sim_bd.value.keys():\n return ()\n iid_neighbors = [\n (i[0], i[1], rating_bd.value[i[0]]) for i in sim_bd.value[iid]]\n average_iid_rating = item_bd.value[iid][0]\n sim_rating = []\n for info in iid_neighbors:\n niid, nsim, ratings = info\n sim_rating += [\n (iid, nsim, rating[1] - item_bd.value[niid][0], rating[2])\n for rating in ratings if uid in rating[0]]\n if len(sim_rating) != 0:\n sim_ratings = [\n (line[1] * line[2], abs(line[1]), line[3])\n for line in sim_rating]\n predicted_rating_no_decay = average_iid_rating + sum(\n map(lambda line: line[0], sim_ratings)) / sum(\n map(lambda line: line[1], sim_ratings))\n predicted_rating_decay = \\\n average_iid_rating + add_decay(sim_ratings)\n else:\n predicted_rating_no_decay = average_iid_rating\n predicted_rating_decay = average_iid_rating\n return iid, real_rating, \\\n self.bound_rating(predicted_rating_no_decay), \\\n self.bound_rating(predicted_rating_decay)", "def predict(self, users, items):\n users_t = torch.tensor(users, dtype=torch.int64, device=self.device)\n items_t = torch.tensor(items, dtype=torch.int64, device=self.device)\n with torch.no_grad():\n scores = torch.mul(\n torch.cat(\n (self.user_encode(users_t)[0], self.user_emb(users_t)), dim=1\n ),\n torch.cat(\n (self.item_encode(items_t)[0], self.item_emb(items_t)), dim=1\n ),\n ).sum(dim=1)\n return scores", "def estimate(self, u, i):\n\n if u not in self.user_means:\n return(np.mean([self.global_mean,\n self.item_means[i]]))\n\n if i not in self.item_means:\n return(np.mean([self.global_mean,\n self.user_means[u]]))\n\n return(np.mean([self.global_mean,\n self.user_means[u],\n self.item_means[i]]))", "def confidence(self) -> float:\n return float(self.class_scores[self.class_num])", "def cal_hit_gbratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n #print({d['user'].iloc[0]:d['ratings'].to_list() for i,d in top_k.groupby('user')})\n score = 0.0\n # golden items hit in the top_K items\n score_1 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)]) for i,d in top_k.groupby('user')}\n score_2 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)]) for i,d in top_k.groupby('user')} \n score_ratio = [(score_1[d]-score_2[d]/self._test_ratings[d]) if self._test_ratings[d]!=0 else 0 for d in self._test_ratings.keys()]\n\n #print(np.mean(score_ratio))\n #print(score_1)\n #score = score_1 + score_2\n return np.mean(score_ratio)", "def _weight(self, user_a, user_i):\n a_votes = self._votes[user_a]\n i_votes = self._votes[user_i]\n\n a_mean = self._averages[user_a]\n i_mean = self._averages[user_i]\n\n item_ndx = np.logical_and(np.isfinite(a_votes),\n np.isfinite(i_votes))\n\n a_deviations = a_mean - a_votes[item_ndx]\n i_deviations = i_mean - i_votes[item_ndx]\n\n numerator = np.sum(a_deviations * i_deviations)\n denominator = (np.sum(np.power(a_deviations, 2)) *\n np.sum(np.power(i_deviations, 2)))\n\n if denominator < _EPSILON:\n return 0\n return numerator / np.sqrt(denominator)", "def coverage(self, user_list):\n all_recom_set = set()\n all_item = set(self.train['movieId'].values)\n print('\\nCalculated coverage: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n all_recom_set.update(recom_item)\n print('\\nCoverage is: ', len(all_recom_set) / (len(all_item) * 1.0))\n return len(all_recom_set) / (len(all_item) * 1.0)", "def pred_for_user(self,u):\r\n ids=np.where(self.Y_data_n[:,0]==u)[0]\r\n items_rated_by_u=Y_data_n[ids,1].tolist()\r\n pred_ratings=[]\r\n for i in range(self.n_items):\r\n if i not in items_rated_by_u:\r\n pred_ratings.append(self.pred(u,i))\r\n return pred_ratings", "def prediction(self, user, item):\n return self._prob_to_class(self(user, item))", "def score(self, user_idx, item_idx=None):\n if item_idx is None:\n if self.train_set.is_unk_user(user_idx):\n raise ScoreException(\n \"Can't make score prediction for (user_id=%d)\" % user_idx\n )\n\n known_item_scores = self.V.dot(self.U[user_idx, :])\n return known_item_scores\n else:\n if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(\n item_idx\n ):\n raise ScoreException(\n \"Can't make score prediction for (user_id=%d, item_id=%d)\"\n % (user_idx, item_idx)\n )\n\n user_pred = self.V[item_idx, :].dot(self.U[user_idx, :])\n\n user_pred = sigmoid(user_pred)\n if self.train_set.min_rating == self.train_set.max_rating:\n user_pred = scale(user_pred, 0.0, self.train_set.max_rating, 0.0, 1.0)\n else:\n user_pred = scale(\n user_pred,\n self.train_set.min_rating,\n self.train_set.max_rating,\n 0.0,\n 1.0,\n )\n\n return user_pred", "def calc_rmse(self, data):\n res= data.score- data[['userid','itemid']].apply(lambda row:self.calc_score(row[0], row[1]),axis=1)\n res=[el**2 for el in np.array(res)]\n return np.sqrt(np.sum(res)/data.shape[0])", "def calc_confidence(itemsets, support_counts, N, separator=-1):\n X = ' & '.join(sorted(itemsets[ : separator]))\n X_and_Y = ' & '.join(sorted(itemsets))\n\n if X_and_Y in support_counts.keys():\n supp_X_to_Y = support_counts[X_and_Y] / N\n conf_X_to_Y = support_counts[X_and_Y] / support_counts[X] # supports[X] is present if supports[key] is\n return (supp_X_to_Y, conf_X_to_Y)\n\n return None", "def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()", "def estimate(self, u, j):\n if not (self.trainset.knows_user(u) and self.trainset.knows_item(j)):\n raise PredictionImpossible('User and/or item is unknown.')\n\n u_ratings = self.trainset.ur[u]\n\n if self.weighting == 'linear':\n weight = sum(self.freqs[i, j, self.to_index(r)] for i, r in u_ratings)\n score = sum(self.sums[i, j, self.to_index(r)] for i, r in u_ratings)\n return score / weight\n\n # self.weighting == 'log' or None\n weights = [self.freqs[i, j, self.to_index(r)] for i, r in u_ratings]\n reciprocals = [1 / w if w else 0 for w in weights]\n scores = [self.sums[i, j, self.to_index(r)] for i, r in u_ratings]\n scores = [s * w for s, w in zip(scores, reciprocals)]\n\n if self.weighting is None:\n return np.mean(scores)\n # self.weighting == 'log'\n logs = [np.log(w + 1) if w >= 1 else 0 for w in weights]\n return np.dot(scores, logs) / np.sum(logs)", "def item_based_prediction(self, line, rating_bd, sim_bd, item_bd):\n def sort_by_time(pairs):\n \"\"\"For each user, sort its rating records based on its datetime.\n More specifically, if time_a > time_b,\n then: time_a <- x, time_b <- x + 1.\n \"\"\"\n pairs = sorted(pairs, key=lambda line: line[2], reverse=False)\n order = 0\n out = []\n for i in range(len(pairs)):\n if i != 0 and pairs[i][2] == pairs[i - 1][2]:\n out += [(pairs[i][0], pairs[i][1], order)]\n else:\n order += 1\n out += [(pairs[i][0], pairs[i][1], order)]\n return out\n\n def f_decay(cur, t_ui):\n return np.exp(- self.alpha * (cur - t_ui))\n\n def add_decay(pairs):\n \"\"\"add decay rate to the pairs.\n Args:\n pairs: sim * rating, sim, time\n \"\"\"\n new_pairs = sort_by_time(pairs)\n current_time = max(map(lambda line: line[2], new_pairs)) + 1\n final_pairs = [\n (pair[0] * f_decay(current_time, pair[2]),\n pair[1] * f_decay(current_time, pair[2]))\n for pair in new_pairs]\n return sum(map(lambda line: line[0], final_pairs)) / sum(\n map(lambda line: line[1], final_pairs))\n\n def prediction(uid, pair, rating_bd, sim_bd, item_bd):\n \"\"\"do the prediction. It can either add decay rate or not,\n which is decided by `method`.\n \"\"\"\n iid, real_rating = pair[0], pair[1]\n if iid not in sim_bd.value.keys():\n return ()\n iid_neighbors = [\n (i[0], i[1], rating_bd.value[i[0]]) for i in sim_bd.value[iid]]\n average_iid_rating = item_bd.value[iid][0]\n sim_rating = []\n for info in iid_neighbors:\n niid, nsim, ratings = info\n sim_rating += [\n (iid, nsim, rating[1] - item_bd.value[niid][0], rating[2])\n for rating in ratings if uid in rating[0]]\n if len(sim_rating) != 0:\n sim_ratings = [\n (line[1] * line[2], abs(line[1]), line[3])\n for line in sim_rating]\n predicted_rating_no_decay = average_iid_rating + sum(\n map(lambda line: line[0], sim_ratings)) / sum(\n map(lambda line: line[1], sim_ratings))\n predicted_rating_decay = \\\n average_iid_rating + add_decay(sim_ratings)\n else:\n predicted_rating_no_decay = average_iid_rating\n predicted_rating_decay = average_iid_rating\n return iid, real_rating, \\\n self.bound_rating(predicted_rating_no_decay), \\\n self.bound_rating(predicted_rating_decay)\n\n uid, pairs = line\n return uid, [\n prediction(uid, pair, rating_bd, sim_bd, item_bd)\n for pair in pairs]", "def prediction(pair, uid_allneighbor_info, user_bd, uid):\n iid, real_rating, time = pair\n average_uid_rating = user_bd.value[uid][0]\n sim_rating = []\n for info in uid_allneighbor_info:\n uid, sim, ratings = info\n sim_rating += [\n (rating[0], sim, rating[1] - average_uid_rating)\n for rating in ratings if iid in rating[0]]\n\n if len(sim_rating) != 0:\n sim_rating = [\n (line[0], line[1] * line[2], abs(line[1]))\n for line in sim_rating]\n predicted_rating = average_uid_rating + sum(\n map(lambda line: line[1], sim_rating)) / sum(\n map(lambda line: line[2], sim_rating))\n else:\n predicted_rating = average_uid_rating\n return iid, real_rating, self.bound_rating(predicted_rating)", "def score(self, user_idx, item_idx=None):\n if self.variant == 'c2pf' or self.variant == 'tc2pf':\n if item_idx is None:\n user_pred = self.Beta * self.Theta[user_idx, :].T + self.Xi * self.Theta[user_idx, :].T\n else:\n user_pred = self.Beta[item_idx, :] * self.Theta[user_idx, :].T + self.Xi * self.Theta[user_idx, :].T\n elif self.variant == 'rc2pf':\n if item_idx is None:\n user_pred = self.Xi * self.Theta[user_idx, :].T\n else:\n user_pred = self.Xi[item_idx,] * self.Theta[user_idx, :].T\n else:\n if item_idx is None:\n user_pred = self.Beta * self.Theta[user_idx, :].T + self.Xi * self.Theta[user_idx, :].T\n else:\n user_pred = self.Beta[item_idx, :] * self.Theta[user_idx, :].T + self.Xi * self.Theta[user_idx, :].T\n # transform user_pred to a flatten array,\n user_pred = np.array(user_pred, dtype='float64').flatten()\n\n return user_pred", "def classify(user, itemName, itemVector):\n # first find nearest neighbor\n nearest = computeNearestNeighbor(itemName, itemVector, items)[0][1]\n rating = users[user][nearest]\n return rating", "def getSupport(item):\n return float(freqSet[item]) / len(transactionList)", "def prediction_item(item_id):\r\n # Data preprosessing\r\n reader = Reader(rating_scale=(0, 5))\r\n load_df = Dataset.load_from_df(ratings_df, reader)\r\n a_train = load_df.build_full_trainset()\r\n\r\n predictions = []\r\n for ui in a_train.all_users():\r\n predictions.append(model.predict(iid=item_id, uid=ui, verbose=False))\r\n return predictions", "def AUC(points):\n\tauc = 0.0\n\tfor point2, point1 in zip(points[1:], points[:-1]):\n\t\t#print(point2, point1)\n\t\tbase = (point2[0] - point1[0]) / 100.0\n\t\theight = ( (point2[1] - point1[1])/2.0 + point1[1] ) / 100.0\n\t\tauc += (base*height)\n\treturn auc", "def predict(self, users, items):\n self.eval()\n users_t = torch.tensor(users, dtype=torch.int64, device=self.device)\n items_t = torch.tensor(items, dtype=torch.int64, device=self.device)\n\n with torch.no_grad():\n # scores = torch.mul(\n # self.user_embedding(users_t), self.item_embedding(items_t)\n # ).sum(dim=1)\n ua_embeddings, ia_embeddings = self.forward(self.norm_adj)\n u_g_embeddings = ua_embeddings[users_t]\n i_g_embeddings = ia_embeddings[items_t]\n scores = self.f(torch.mul(u_g_embeddings, i_g_embeddings).sum(dim=1))\n return scores", "def create_user_item_array(self):\n user_em = self.user_factors.weight.detach()\n item_em = self.product_factors.weight.detach()\n user_b = self.user_bias.weight.detach()\n item_b = self.product_bias.weight.detach()\n\n user_item_array = (item_em + item_b) @ (user_em + user_b).transpose(0, 1)\n preds = self._prob_to_class(user_item_array).numpy()\n\n return preds", "def predict(self, user, item):\n return self.user_vectors[user, :].dot(self.item_vectors[item, :].T)", "def calculate_item_relevance_scores(self, user_similarity_profile):\r\n scores = user_similarity_profile.dot(self.ratings_matrix.matrix)\r\n return self.ratings_matrix.normalize_score_vector(scores)", "def _get_recommend(self, user):\n return self.user_cf.calculate(target_user_id=user, user_n=self.user_n,\n item_n=self.item_n, type=2)", "def calc_bpr_loss(\n self, user_emd, item_emd, user_list, pos_item_list, neg_item_list\n ):\n u_e = user_emd[user_list]\n pi_e = item_emd[pos_item_list]\n ni_e = item_emd[neg_item_list]\n p_scores = torch.mul(u_e, pi_e).sum(dim=1)\n n_scores = torch.mul(u_e, ni_e).sum(dim=1)\n\n l1 = torch.sum(-F.logsigmoid(p_scores - n_scores))\n\n u_e_p = self.user_embedding(user_list)\n pi_e_p = self.item_embedding(pos_item_list)\n ni_e_p = self.item_embedding(neg_item_list)\n\n l2 = self.reg_loss(u_e_p, pi_e_p, ni_e_p)\n\n return l1 + l2 * self.reg_weight", "def compute_kappa_score(self, scorer1, scorer2, items, matrix_weights):\r\n #matrice nb items\r\n df_n_items = pd.DataFrame(data=0, index=items, columns=items)\r\n for score1, score2 in zip(scorer1, scorer2):\r\n df_n_items[score1][score2] = df_n_items[score1][score2] + 1\r\n #nb items * total number of scoring items\r\n df_proba = df_n_items / len(scorer1)\r\n #df_proba with weights\r\n df_proba_w = df_proba * matrix_weights\r\n #relative observed agreement among raters\r\n p_a = df_proba_w.sum().sum()\r\n #hypothetical probability of chance agreement\r\n p_e = 0\r\n for lig in df_proba.index:\r\n for col in df_proba.columns:\r\n p_e = p_e + df_proba.loc[lig, :].sum() * \\\r\n df_proba.loc[:, col].sum() * matrix_weights.loc[lig, col]\r\n #kappa score\r\n kappa = 1 - (1 - p_a) / (1 - p_e)\r\n return kappa", "def compute_error(data, user_features, item_features, nz):\n sum_err = 0\n for d, n in nz:\n err = data[d,n] - np.dot(item_features[d,:],user_features[:,n])\n sum_err += err**2\n rmse = 0.5*sum_err/len(nz)\n return rmse", "def estimate(self, item):\n return self.A[item] if item in self.A.keys() else 0", "def predict(self, user, item):\n pred = self.global_bias + self.user_bias[user] + self.item_bias[item]\n pred += self.U[user, :].dot(self.I[:, item])\n return pred", "def test_recommendation_evaluation_3(model, interactions_ds):\n assert recommendation_evaluation(model, interactions_ds[1], cn_test_users=None, k=2, n_pos_interactions=1,\n novelty=False) == \\\n {'HitRatio@2': 0.02, 'NDCG@2': 0.0179, 'Precision@2': 0.01, 'Recall@2': 0.02}", "def calculate_pairwise_user_similarity(self, user1_preferences, user2_preferences):\r\n\r\n shared_items = set(user1_preferences.indices) & set(user2_preferences.indices)\r\n\r\n all_items = set(user1_preferences.indices) | set(user2_preferences.indices)\r\n\r\n num_agreements = sum(1 for x in shared_items if abs(user1_preferences[0, x] - user2_preferences[0, x]) <= 2)\r\n\r\n return (num_agreements / len(all_items) if len(all_items) > 0 else 0)", "def calculate_total(self, calcu_user_n=20, user_n=20, item_n=10, seed=1):\n self._split_data(seed=seed)\n self._set_top(user_n=user_n, item_n=item_n)\n test_user_list = list(set(self.test['userId'].unique()))\n user_list = [test_user_list[random.randint(0, len(test_user_list)) - 1]\n for i in range(calcu_user_n)]\n hit = 0 # Hit score\n all_recom = 0 # num of all recommendations, calculate the accuracy rate\n like_item = 0 # num of the item the user likes in the test set, calculate the recall rate\n all_recom_set = set()\n all_item = set(self.train['movieId'].unique())\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate all evaluation indicators...')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user, )\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n like_item += len(user_item)\n all_recom += len(recom_item)\n all_recom_set.update(recom_item)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n print('\\nCalculate over')\n print('Precision is: ', hit / (all_recom * 1.0))\n print('Recall is: ', hit / (like_item * 1.0))\n print('Coverage is: ', len(all_recom_set) / (len(all_item) * 1.0))\n print('Popularity is:', (ret / n * 1.0))", "def conf(self, success, total):\n try:\n sp = success / total\n conf = binom_conf_interval(success, total, interval='jeffreys')\n uperr = conf[1] - sp # 1 sigma confidence above mean\n loerr = sp - conf[0] # 1 sigma confidence below mean\n return sp, uperr, loerr, 0.5*(uperr+loerr)\n except ValueError as e:\n return 0, 0, 0, 0", "def getSupport(item):\n # return float(freqSet[item])/len(transactionList)\n return float(freqSet[item]) / len(dateStampSet)", "def robbins_confidence(counts, alpha=0.05):\n s = singles(counts)\n n = counts.sum()\n k = sqrt((n+1)/alpha)\n return (s-k)/(n+1), (s+k)/(n+1)", "def calc_ssl_loss(\n self, user_list, pos_item_list, user_sub1, user_sub2, item_sub1, item_sub2\n ):\n\n u_emd1 = F.normalize(user_sub1[user_list], dim=1)\n u_emd2 = F.normalize(user_sub2[user_list], dim=1)\n all_user2 = F.normalize(user_sub2, dim=1)\n v1 = torch.sum(u_emd1 * u_emd2, dim=1)\n v2 = u_emd1.matmul(all_user2.T)\n v1 = torch.exp(v1 / self.ssl_tau)\n v2 = torch.sum(torch.exp(v2 / self.ssl_tau), dim=1)\n ssl_user = -torch.sum(torch.log(v1 / v2))\n\n i_emd1 = F.normalize(item_sub1[pos_item_list], dim=1)\n i_emd2 = F.normalize(item_sub2[pos_item_list], dim=1)\n all_item2 = F.normalize(item_sub2, dim=1)\n v3 = torch.sum(i_emd1 * i_emd2, dim=1)\n v4 = i_emd1.matmul(all_item2.T)\n v3 = torch.exp(v3 / self.ssl_tau)\n v4 = torch.sum(torch.exp(v4 / self.ssl_tau), dim=1)\n ssl_item = -torch.sum(torch.log(v3 / v4))\n\n return (ssl_item + ssl_user) * self.ssl_weight", "def mrr(self):\n _test = self.drop_bad_ratings()\n merged = pd.merge(left=_test, right=self.predict, on=['user', 'item'], how='right')[\n ['user', 'item', 'rating_x', 'rating_y']]\n nott = np.vectorize(lambda x: not x)\n mrrs = []\n for user in merged.user.unique():\n frame = merged[merged.user == user].sort_values(by='rating_y', ascending=False)\n true_ratings = frame.rating_x.values\n positions = np.where(nott(np.isnan(true_ratings)))[0]\n if len(positions) > 0:\n mrrs.append(1 / (positions[0] + 1))\n else:\n mrrs.append(0)\n\n return sum(mrrs) / len(mrrs)", "def predict(self, X):\n \n n_samples = X.shape[0]\n y_pred = self.mean_rating_*np.ones(n_samples)\n for i in np.arange(n_samples):\n if X[i,0] < self.num_user and X[i,1] < self.num_item:\n y_pred[i] += np.dot(self.U[X[i,0],:],self.V[X[i,1],:])\n y_pred[i] = np.clip(y_pred[i],self.min_rating_,\n self.max_rating_)\n \n return y_pred", "def itembase(user_id):\n frame1 = pd.concat([pd.DataFrame(REVIEWS[x]) for x in REVIEWS])\n filtered_data = recommender.filtering_not_city()\n businesses = pd.DataFrame(filtered_data).set_index('business_id')\n frame2 = frame1.drop_duplicates(subset=[\"user_id\", \"business_id\"], keep='last', inplace=False)\n\n utility_matrix = pivot_reviews(frame2)\n\n similarity = create_similarity_matrix_euclid(utility_matrix)\n\n for business in businesses.index:\n neighborhood = select_neighborhood(similarity, utility_matrix, user_id, business)\n prediction = weighted_mean(neighborhood, utility_matrix, user_id)\n businesses.ix[business, 'predicted rating'] = prediction\n\n sorted_prediction = businesses.sort_values(by=['predicted rating'], ascending=False)\n sorted_prediction2 = sorted_prediction.drop(columns=['predicted rating'])\n sorted_prediction2 = sorted_prediction2.reset_index()\n sorted_prediction3 = sorted_prediction.reset_index()\n return sorted_prediction2.to_dict(orient='records'), sorted_prediction3.to_dict(orient='records')", "def single_user_recommendation_vector(self, user_ratings):\r\n\r\n ratings_vector = self.ratings_matrix.get_ratings_vector(user_ratings)\r\n\r\n user_similarity_profile = self.calculate_user_similarity_profile(ratings_vector)\r\n\r\n return self.calculate_item_relevance_scores(user_similarity_profile)", "def compute_user_local_sensitivity(sc, dataset, user_id, num_iters_ls):\n\n res = defaultdict(lambda: 0.0)\n\n original_recs, original_qii = compute_recommendations_and_qii(sc, dataset,\n user_id)\n original_recs = recommendations_to_dd(original_recs)\n\n res[\"recommendee_user_id\"] = user_id\n res[\"recommendee_recs_l1_norm\"] = l1_norm(original_recs)\n res[\"recommendee_qii_l1_norm\"] = l1_norm(original_qii)\n res[\"recommendee_recs_l0_norm\"] = len(original_recs)\n res[\"recommendee_qii_l0_norm\"] = len(original_qii)\n res[\"perturbations\"] = []\n\n all_users = get_user_list(dataset)\n for x in xrange(num_iters_ls):\n if perturb_specific_user:\n other_user_id = perturb_specific_user\n else:\n other_user_id = random.choice(list(set(all_users) - {user_id}))\n print \"Perturbing user\", other_user_id, \"(\", x+1, \"out of\",\\\n num_iters_ls, \")\"\n perturbed_dataset = perturb_user_ratings(sc, dataset, other_user_id)\n start = time.time()\n recs, qii = compute_recommendations_and_qii(sc, perturbed_dataset, user_id)\n stop = time.time()\n recs = recommendations_to_dd(recs)\n rec_ls = calculate_l1_distance(original_recs, recs)\n qii_ls = calculate_l1_distance(original_qii, qii)\n\n report = {}\n report[\"perturbed_user_id\"] = other_user_id\n report[\"perturbed_recs_l1_norm\"] = l1_norm(recs)\n report[\"perturbed_qii_l1_norm\"] = l1_norm(qii)\n report[\"perturbed_recs_l0_norm\"] = len(recs)\n report[\"perturbed_qii_l0_norm\"] = len(qii)\n report[\"recs_ls\"] = rec_ls\n report[\"qii_ls\"] = qii_ls\n report[\"recs_ls_norm\"] = rec_ls/float((len(recs)*4))\n report[\"qii_ls_norm\"] = qii_ls/float((len(qii)*4))\n print \"Local sensitivity of recs: \", rec_ls/float((len(recs)*4))\n print \"Local sensitivity of QII: \", qii_ls/float((len(qii)*4))\n report[\"computation_time\"] = stop - start\n\n\n res[\"perturbations\"].append(report)\n\n for per in res[\"perturbations\"]:\n res[\"avg_recs_ls\"] += float(per[\"recs_ls\"])/len(res[\"perturbations\"])\n res[\"max_recs_ls\"] = max(res[\"max_recs_ls\"], per[\"recs_ls\"])\n res[\"avg_recs_ls_norm\"] +=\\\n float(per[\"recs_ls_norm\"])/len(res[\"perturbations\"])\n res[\"max_recs_ls_norm\"] = max(res[\"max_recs_ls_norm\"],\n per[\"recs_ls_norm\"])\n res[\"avg_qii_ls\"] += float(per[\"qii_ls\"])/len(res[\"perturbations\"])\n res[\"max_qii_ls\"] = max(res[\"max_qii_ls\"], per[\"qii_ls\"])\n res[\"avg_qii_ls_norm\"] +=\\\n float(per[\"qii_ls_norm\"])/len(res[\"perturbations\"])\n res[\"max_qii_ls_norm\"] = max(res[\"max_recs_qii_norm\"],\n per[\"qii_ls_norm\"])\n return dict(res)", "def _calculate_tp_confidences(images, test_class):\n confidences = []\n for (response_json, class_name) in predict(images, desc=f\"[{test_class}] inference\"):\n if response_json[\"status\"] != \"ok\":\n raise Exception(f\"Not OK response in {class_name}\")\n if class_name == test_class and response_json[\"response\"] == class_name:\n confidences.append(response_json[\"confidence\"])\n return confidences", "def _calculate_fp_confidences(images, test_classes):\n confidences = []\n for (response_json, class_name) in predict(images, desc=f\"[{BACKGROUND}] inference\"):\n if response_json[\"status\"] != \"ok\":\n raise Exception(f\"Not OK response in {class_name}\")\n if response_json[\"response\"] in test_classes:\n confidences.append(response_json[\"confidence\"])\n return confidences", "def calculate_error_rates(point_to_weight, classifier_to_misclassified):\n ans = {}\n for c in classifier_to_misclassified:\n misclassified = classifier_to_misclassified[c]\n ans[c] = 0\n for p in misclassified:\n ans[c] += point_to_weight[p]\n return ans", "def stdConfidenceTrades(predictions, buy_confidence=1.5, sell_confidence=1.1):\n smooth_preds = pd.Series(predictions).rolling(5).mean()\n buy_thresh = np.mean(smooth_preds) + buy_confidence * np.std(smooth_preds)\n sell_thresh = np.mean(smooth_preds) - sell_confidence * np.std(smooth_preds)\n buy_positions = np.where(predictions > buy_thresh)[0]\n sell_positions = np.where(predictions < sell_thresh)[0]\n \n buys = buy_positions\n sells = []\n curSell = 0\n for curBuy in buys:\n arr = np.where(sell_positions > curBuy)[0]\n if len(arr):\n sells.append(sell_positions[arr[0]])\n tradePairs = list(zip(buys, sells))\n return tradePairs", "def cre_confidence1(df):\r\n func = lambda x: 1 - np.abs(x.mean())\r\n return df.groupby('creline')['ffb_c'].transform(func)", "def obscurity(user):\n reader = open(\"similarities.txt\", \"r\")\n lines = reader.readlines()\n obscurity = 0.0\n count = 0\n for line in lines:\n a, b, sim = line.split(\"\\t\")\n if user == a:\n count += 1\n obscurity += float(sim)\n\n obscurity /= count\n return obscurity", "def probability(self, item):\n count = self.counter.get(item, 0)\n if self.smoothing_dict:\n smooth_count = self.smoothing_dict.get(count, count)\n assert smooth_count > 0\n return smooth_count / self.smooth_total\n else:\n return count / self.total", "def weighted_average(items, weights):\n assert len(items) > 0\n assert len(items) == len(weights)\n # declare total as the return value which is a decimal\n total = 0.0\n # for all pairs from two lists\n for i in range(len(items)):\n \t# we increment the total for the product of both value\n \ttotal += items[i] * weights[i]\n # we return the total divided by sum of weights\n return total / sum(weights)", "def recall(self, user_list):\n hit = 0\n like_item = 0\n print('\\nCalculate recall: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n like_item += len(user_item)\n print('\\nrecall is: ', hit / (like_item * 1.0))\n return hit / (like_item * 1.0)", "def decision_function(self, user_id=0):\n if user_id != 0:\n cls_scores, class_ids = self.__decision_function\n try:\n index = np.where(class_ids == user_id)\n return cls_scores[index]\n except:\n return -self.__decision_nr_samples\n else:\n return self.__decision_function", "def score_method(pairs_true, pairs_test):\n \n set_true = {tuple(e) for e in pairs_true}\n set_test = {tuple(e) for e in pairs_test}\n true_pos, false_pos, false_neg = confusion_stats(set_true, set_test)\n \n total = true_pos + false_pos + false_neg\n true_pos_rate = true_pos / total\n false_pos_rate = false_pos / total\n false_neg_rate = false_neg / total\n \n return true_pos_rate, false_pos_rate, false_neg_rate", "def user_interaction_score(uv, recommended_News, ranked=True):\n\n iv = recommended_News[\"topical_vector\"]\n\n product = simple_doct_product(uv, iv)\n\n epsilon = 10e-5\n\n if (product + epsilon) > 1.0:\n vui = 0.99\n else:\n vui = beta_distribution(product)\n\n # Awared preference\n ita = beta_distribution(0.98)\n pui = vui * ita\n\n return pui", "def user_based_prediction(self, line, rating_bd, sim_bd, user_bd):\n def prediction(pair, uid_allneighbor_info, user_bd, uid):\n \"\"\"do the prediction. It can either add decay rate or not,\n which is decided by `method`.\n Args:\n pair: (iid, rating, time)\n uid_allneighbor_info: (uid, sim, rating_record)*\n average_uid: average rating of current uid.\n \"\"\"\n iid, real_rating, time = pair\n average_uid_rating = user_bd.value[uid][0]\n sim_rating = []\n for info in uid_allneighbor_info:\n uid, sim, ratings = info\n sim_rating += [\n (rating[0], sim, rating[1] - average_uid_rating)\n for rating in ratings if iid in rating[0]]\n\n if len(sim_rating) != 0:\n sim_rating = [\n (line[0], line[1] * line[2], abs(line[1]))\n for line in sim_rating]\n predicted_rating = average_uid_rating + sum(\n map(lambda line: line[1], sim_rating)) / sum(\n map(lambda line: line[2], sim_rating))\n else:\n predicted_rating = average_uid_rating\n return iid, real_rating, self.bound_rating(predicted_rating)\n\n uid, pairs = line\n uid_allneighbor_info = [\n (u[0], u[1], rating_bd.value[u[0]]) for u in sim_bd.value[uid]]\n return uid, [prediction(\n pair, uid_allneighbor_info, user_bd, uid) for pair in pairs]", "def estimate_user_biases(self):\n self.user_biases = {}\n for user in self.user_ratings:\n # b_u = sum(r_ui - mu - b_i) / len(R(u))\n user_bias = [rating - self.global_mean - self.movie_biases[movie]\n for movie, rating in self.user_ratings[user].items()]\n user_bias = sum(user_bias) / len(user_bias)\n self.user_biases[user] = user_bias", "def ndpm(self):\n\n merged = pd.merge(left=self.test, right=self.predict, on=['user', 'item'], how='inner')[\n ['user', 'rating_x', 'rating_y']]\n ndpms = []\n for user in merged.user.unique():\n frame = merged[merged.user == user]\n if frame.shape[0] <= 1:\n continue\n C_plus = self.num_of_ordered_positive(frame, 'rating_x', 'rating_y')\n C_minus = self.num_of_ordered_negative(frame, 'rating_x', 'rating_y')\n C_u = self.num_of_ordered(frame, 'rating_x')\n if C_u == 0:\n continue\n C_s = self.num_of_ordered(frame, 'rating_y')\n C_u0 = C_u - (C_plus + C_minus)\n ndpms.append(1 - (C_minus + 0.5 * C_u0) / C_u)\n\n return sum(ndpms) / len(ndpms)", "def evaluate(topk_matches, test_user_products, num_recommendations, brand_dict):\n invalid_users = []\n # Compute metrics\n precisions, recalls, ndcgs, hits, fairness = [], [], [], [], []\n test_user_idxs = list(test_user_products.keys())\n for uid in test_user_idxs:\n if uid not in topk_matches or len(topk_matches[uid]) < num_recommendations:\n invalid_users.append(uid)\n continue\n pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid]\n if len(pred_list) == 0:\n continue\n\n dcg = 0.0\n hit_num = 0.0\n for i in range(len(pred_list)):\n if pred_list[i] in rel_set:\n dcg += 1. / (log(i + 2) / log(2))\n hit_num += 1\n # idcg\n idcg = 0.0\n for i in range(min(len(rel_set), len(pred_list))):\n idcg += 1. / (log(i + 2) / log(2))\n ndcg = dcg / idcg\n recall = hit_num / len(rel_set)\n precision = hit_num / len(pred_list)\n hit = 1.0 if hit_num > 0.0 else 0.0\n\n ndcgs.append(ndcg)\n recalls.append(recall)\n precisions.append(precision)\n hits.append(hit)\n fairness.append(calculate_fairness(pred_list, brand_dict))\n\n avg_precision = np.mean(precisions) * 100\n avg_recall = np.mean(recalls) * 100\n avg_ndcg = np.mean(ndcgs) * 100\n avg_hit = np.mean(hits) * 100\n avg_fairness = np.mean(fairness)\n print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f} | Fairness={:.3f} | Invalid users={}'.format(\n avg_ndcg, avg_recall, avg_hit, avg_precision, avg_fairness, len(invalid_users)))", "def confidence(self) -> float:\n return self._confidence", "def get_overall_score(self, user):\n\n quizzes = ['iq', 'math', 'english']\n\n prev_scores = []\n new_scores = []\n\n for quiz in quizzes:\n quiz_obj = self.get_object(quiz)\n queryset = self.get_queryset(user, quiz_obj)\n\n try:\n new_scores.append(queryset[0].marks)\n prev_scores.append(queryset[1].marks)\n except:\n new_scores.append(queryset[0].marks)\n prev_scores.append(0)\n\n import statistics\n\n return statistics.mean(prev_scores), statistics.mean(new_scores)", "def calc_confidence(raw_output, labels_list=None):\n #print(\"raw_output:\", raw_output)\n result = raw_output\n probs = []\n for dc in result:\n for key in dc:\n #print(\"dc:\", dc)\n #print(\"key:\", key)\n logits = dc[key]\n logit = logits[0]\n ps = softmax_probabilities(logit)\n max_ps = max(ps)\n index = np.argmax(ps)\n probs.append(max_ps)\n #print('max_ps:', max_ps)\n #print('index:', index)\n #if labels_list:\n # print('tag:', labels_list[index])\n return probs", "def evaluate(self, dataloader):\n squared_loss = 0\n correct = 0\n total = 0\n\n self.eval()\n with torch.no_grad():\n for user, item, rating in dataloader:\n forward = self(user, item)\n predicted = self._prob_to_class(forward)\n\n squared_loss += self.loss(forward, rating).item() * len(user)\n total += predicted.numel()\n correct += (predicted == rating.view(-1)).sum().item()\n\n mean_loss = squared_loss / total\n\n return mean_loss, f\"{(100 * correct / total):.2f}\"", "def ap(self, result, next_item):\n if next_item in result.index:\n rank = result.index.get_loc(next_item) + 1\n return 1.0 / rank\n else:\n return 0", "def calcUserMeanRating(userRatingGroup):\n userID = userRatingGroup[0]\n ratingSum = 0.0\n ratingCnt = len(userRatingGroup[1])\n if ratingCnt == 0:\n return (userID, 0.0)\n for item in userRatingGroup[1]:\n ratingSum += item[1]\n return (userID, 1.0 * ratingSum / ratingCnt)", "def make_predictions(movies, ratings_train, ratings_test):\n ###TODO\n \n user_result = [] \n \n for index,row in ratings_test.iterrows():\n userid_test = row['userId']\n #print(\"userid_test::\",userid_test) \n movieid_test = row['movieId'] \n #print(\"movieid_test::\",movieid_test) \n x = list(movies[movies.movieId==movieid_test]['features'])[0]\n #print(\"CSR_GOTT+X::\",x)\n #print(\"TYPE of CSR_GOTT_X::\",type(x))\n subset_train = ratings_train[ratings_train.userId == userid_test]\n #print(\"SUB MOVIE SET::\",subset_train)\n #print(\"TYPE of SUB MOVIE SET::\",type(x))\n total_if_zero=0\n rating_if_zero=0\n sum_main_result=0\n sum_cosine=0 \n for index1,row1 in subset_train.iterrows():\n userid_train = row1['userId']\n #print(\"userid_train::\",userid_train) \n if(userid_test == userid_train ):\n #print(\"HII IN IFFF:::\")\n movieid_train = row1['movieId']\n #print(\"movieid_train::\",movieid_train)\n rating_train = row1['rating']\n #print(\"rating_train::\",rating_train)\n total_if_zero = total_if_zero + 1 \n rating_if_zero = rating_if_zero + rating_train\n y = list(movies[movies.movieId==movieid_train]['features'])[0]\n #print(\"CSR_GOTT_Y::\",y)\n #print(\"TYPE of CSR_GOTT_Y::\",type(y))\n result_cos = cosine_sim(x,y)\n sum_main_result += result_cos * rating_train\n sum_cosine += result_cos \n \n if(sum_main_result != 0):\n user_result.append(sum_main_result/sum_cosine)\n #print(\"user_result::\",user_result) \n else:\n user_result.append(rating_if_zero / total_if_zero)\n #print(\"user_result::\",user_result) \n \n return_result_arr = np.array(user_result) \n \n return return_result_arr\n \n pass", "def get_correct(self, predicted, actual):\n ret_ratios = [np.sum(predicted == actual) / len(actual)]\n for i in range(2):\n actual_ones = np.where(actual == i)[0]\n should_be_ones = np.take(predicted, actual_ones)\n actual_ones = np.take(actual, actual_ones)\n ret_ratios.append(np.sum(should_be_ones == actual_ones) / len(actual_ones))\n\n return tuple(ret_ratios)", "def compute_average_user_ratings(user_ratings):\n ave_ratings = {}\n \n for user,value in user_ratings.items():\n sum = 0\n movie_num=0\n for movieId, rating in value.items():\n sum += float(rating)\n movie_num += 1\n average = sum / movie_num\n ave_ratings[user]=average\n return ave_ratings", "def CF_item_predict(train_data_matrix: csr_matrix):\n logging.getLogger(__name__).info(\"Begin to calculate similarity of CF item-item based!\")\n train_data_matrix = train_data_matrix.A\n item_similarity = cosine_similarity(train_data_matrix.T)\n logging.getLogger(__name__).info(\"Begin to predict based on CF item-item!\")\n item_prediction = predict(train_data_matrix, item_similarity, type='item')\n return item_prediction", "def predict_proba_confidence(clf, X, y_true):\n class_labels = clf.classes_\n y_pred_proba = clf.predict_proba(X)[:,1]\n ent = [entropy(i) for i in y_pred_proba]\n\n return sum(ent)/len(ent)", "def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total", "def probabilities_score(model_id, test_set_id, rubric_id):\n result = {'true_average_probability': 0, 'false_average_probability': 0}\n # right answers\n answers = db.get_rubric_answers(test_set_id, rubric_id)\n # rubrication results\n rubrication_result = db.get_rubrication_probability(model_id, test_set_id, rubric_id)\n\n true_number = 0\n true_probability = 0\n false_number = 0\n false_probability = 0\n\n for key in rubrication_result:\n if answers[key]:\n true_number += 1\n true_probability += rubrication_result[key]\n else:\n false_number +=1\n false_probability += rubrication_result[key]\n\n if true_number:\n result['true_average_probability'] = true_probability / true_number\n\n if false_number:\n result['false_average_probability'] = false_probability / false_number\n\n return result", "def get_similarity(user1: Rating, user2: Rating) -> float:\n shared = 0.0\n for m_id in user1:\n if m_id in user2:\n shared += user1[m_id] * user2[m_id]\n norm1 = 0.0\n for m_id in user1:\n norm1 = norm1 + user1[m_id] ** 2\n norm2 = 0.0\n for m_id in user2:\n norm2 = norm2 + user2[m_id] ** 2\n return (shared * shared) / (norm1 * norm2)", "def standardize_sim_values(self):\n for user_id_A, row in self.sim_matrix.items(): # row is reference\n lA = len(self.users[user_id_A].covered_items)\n for user_id_B in row.keys():\n lB = len(self.users[user_id_B].covered_items)\n row[user_id_B] /= sqrt(lA*lB)\n assert row[user_id_B] <= 1", "def _build_user_model(self, user_ids):\n res_dict = {}\n for user_id in user_ids:\n specific_user_log = self._user_log[self._user_log['user_id'] == user_id]\n log_vecs = pd.merge(specific_user_log, self._item_vector, how='left', on=['item_id'])\n assert (sum(log_vecs['vec'].notnull()) == log_vecs.shape[0]), 'Item vector sheet has null values'\n res_dict[user_id] = ContentBasedAlgo._calc_dim_average(np.array(log_vecs['vec'].values.tolist()))\n return res_dict", "def user_based_recommendation(\n self, test_dataRDD,\n user_based_dict_bd, userbased_sim_pair_dict_bd, user_info_bd):\n sim_pair_dict_keys = set(userbased_sim_pair_dict_bd.value.keys())\n return test_dataRDD.filter(\n lambda line: line[0] in sim_pair_dict_keys).map(\n lambda line: self.user_based_prediction(\n line, user_based_dict_bd,\n userbased_sim_pair_dict_bd, user_info_bd))", "def prec_recall_f1_score(pred_items, gold_items):\n common = Counter(gold_items) & Counter(pred_items)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(pred_items)\n recall = 1.0 * num_same / len(gold_items)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1", "def calculate_penalty(self):\n if AT.PENALTY not in self.attributes:\n return (0, 1)\n return self.attributes[AT.PENALTY].calculate(self)", "def surprisal(self, item):\n return - math.log(self.probability(item))", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def predictRating(toPredict, candidateList):\n\n ratingRelevantCandidates = []\n\n #Remove candidates with no rating specified\n for candidate in candidateList:\n currentCandidate = candidate[1]\n\n if float(currentCandidate['vote_avg']) > 0:\n ratingRelevantCandidates.append((float(currentCandidate['vote_avg']), candidate))\n\n #print(\"ratings::::::::\",currentCandidate['vote_avg'])\n\n #Remove outlier candidates based on rating\n ratingMean = np.mean([x[0] for x in ratingRelevantCandidates])\n print(\"ratingMean\", ratingMean)\n ratingSD = np.std([x[0] for x in ratingRelevantCandidates])\n print(\"ratingSD\", ratingSD)\n\n finalRatings = [x for x in ratingRelevantCandidates if (float(x[0]) < ratingMean + ratingSD)]#1.5 *\n finalRatings = [x for x in finalRatings if (float(x[0]) > ratingMean - ratingSD)]#.75 *\n\n finalRatingCandidatesWithWeight = []\n\n #Weight each candidate based on vote count, direct and actor popularity and matching score from part 1\n for candidate in finalRatings:\n directorPoints = compareDirectorPoints(toPredict['director'], candidate[1][1]['director'])\n actorPoints = compareActorPoints(toPredict['cast'], candidate[1][1]['cast'])\n voteCountPoints = int(candidate[1][1]['vote_count'])\n matchPoints = candidate[1][0] / np.max([float(x[1][0]) for x in finalRatings]) * 100\n candidateWeight = PREDICTION_MATCHPOINTS_WEIGHT * matchPoints \\\n + PREDICTION_ACTOR_WEIGHT * actorPoints \\\n + PREDICTION_DIRECTOR_WEIGHT * directorPoints \\\n + PREDICTION_VOTECOUNT_WEIGHT * voteCountPoints\n\n finalRatingCandidatesWithWeight.append((candidateWeight, candidate[0]))\n\n #Calculate the prediction\n sumRatingCandidateWeights = np.sum([float(x[0]) for x in finalRatingCandidatesWithWeight])\n sumRatingTimesCandidateWeight = np.sum([float(x[0]) * float(x[1]) for x in finalRatingCandidatesWithWeight])\n\n ratingPrediction = float(sumRatingTimesCandidateWeight / sumRatingCandidateWeights)\n\n return ratingPrediction", "def calculate_metrics(predictions, expected):\n # type: (np.ndarray, np.ndarray) -> (float, float, float)\n clients_count = predictions.shape[0]\n products_count = predictions.shape[1]\n\n true_positive = 0.0\n true_negative = 0.0\n false_positive = 0.0\n false_negative = 0.0\n\n total = float(clients_count * products_count)\n\n for c in range(0, clients_count):\n for p in range(0, products_count):\n if predictions[c, p] == expected[c, p]:\n if predictions[c, p] == 1:\n true_positive += 1\n else:\n true_negative += 1\n else:\n if predictions[c, p] == 1:\n false_positive += 1\n else:\n false_negative += 1\n\n accuracy = float(true_positive + true_negative) / total\n if true_positive + false_positive == 0:\n precision = 0\n else:\n precision = true_positive / float(true_positive + false_positive)\n\n if true_positive + false_negative == 0:\n recall = 0\n else:\n recall = true_positive / float(true_positive + false_negative)\n\n return accuracy, precision, recall", "def calc_probabilities(applications):\n sum_advantage = sum(app.get_advantage() for app in applications)\n return [app.get_advantage() / sum_advantage for app in applications]", "def precision_and_recall_k(model, user_dict, item_dict, train_user_list, test_user_list, klist, batch=256):\n # get all user emd and item emd\n user_emb, item_emb, umap, imap = prepare_user_item_emd(model, user_dict, item_dict)\n user_emb = user_emb.detach().cpu()\n item_emb = item_emb.detach().cpu()\n\n # Calculate max k value\n max_k = max(klist)\n\n # Compute all pair of training and test record\n result = None\n for i in range(0, user_emb.shape[0], batch):\n # Create already observed mask\n mask = user_emb.new_ones([min([batch, user_emb.shape[0] - i]), item_emb.shape[0]])\n for j in range(batch):\n if i + j >= user_emb.shape[0]:\n break\n items_idx = train_user_list[i + j]\n if (items_idx != []):\n emd_index = [imap[i] for i in items_idx]\n mask[j].scatter_(dim=0, index=t.tensor(list(emd_index)), value=t.tensor(0.0))\n # Calculate prediction value\n\n cur_result = t.mm(user_emb[i:i + min(batch, user_emb.shape[0] - i), :], item_emb.t())\n cur_result = t.sigmoid(cur_result)\n assert not t.any(t.isnan(cur_result))\n # Make zero for already observed item\n cur_result = t.mul(mask, cur_result)\n _, cur_result = t.topk(cur_result, k=max_k, dim=1)\n result = cur_result if result is None else t.cat((result, cur_result), dim=0)\n\n result = result.cpu()\n # Sort indice and get test_pred_topk\n precisions, recalls = [], []\n for k in klist:\n precision, recall = 0, 0\n for i in range(user_emb.shape[0]):\n test = set(test_user_list[i])\n pred = set(result[i, :k].numpy().tolist())\n val = len(test & pred)\n precision += val / max([min([k, len(test)]), 1])\n recall += val / max([len(test), 1])\n precisions.append(precision / user_emb.shape[0])\n recalls.append(recall / user_emb.shape[0])\n return precisions, recalls", "def runApriori(data_iter, minSupport, minConfidence):\n itemSet, transactionList = getItemSetTransactionList(data_iter)\n freqSet = defaultdict(int)\n largeSet = dict()\n# Global dictionary which stores (key=n-itemSets,value=support)\n# which satisfy minSupport\n assocRules = dict()\n# Dictionary which stores Association Rules\n oneCSet = returnItemsWithMinSupport(itemSet,\n transactionList,\n minSupport,\n freqSet)\n currentLSet = oneCSet\n k = 2\n while(currentLSet != set([])):\n largeSet[k-1] = currentLSet\n currentLSet = joinSet(currentLSet, k)\n currentCSet = returnItemsWithMinSupport(currentLSet,\n transactionList,\n minSupport,\n freqSet)\n currentLSet = currentCSet\n k = k + 1\n\n\n def getSupport(item):\n \"\"\"local function which Returns the support of an item\"\"\"\n return float(freqSet[item]) / len(transactionList)\n toRetItems = []\n for key, value in largeSet.items():\n toRetItems.extend([(tuple(item), getSupport(item))\n for item in value])\n toRetRules = []\n for key, value in largeSet.items()[1:]:\n for item in value:\n _subsets = map(frozenset, [x for x in subsets(item)])\n for element in _subsets:\n remain = item.difference(element)\n if len(remain) > 0:\n confidence = getSupport(item) / getSupport(element)\n if confidence >= minConfidence:\n toRetRules.append(((tuple(element), tuple(remain)),\n confidence))\n return toRetItems, toRetRules", "def get_final_score(self, user):\n\n iq_subject_score = self.get_overall_score(user=user)\n\n try:\n speech_score = UserQuizMark.objects.filter(user=user, quiz=self.get_object('speech_training')).latest(\n 'timestamp').marks\n drawing_score = UserQuizMark.objects.filter(user=user, quiz=self.get_object('drawing')).latet(\n 'timestamp').marks\n except UserQuizMark.DoesNotExist:\n raise Http404\n\n avg_speech_drawing_score = speech_score + drawing_score\n\n return (iq_subject_score + avg_speech_drawing_score) / 2", "def confidence(self):\n return self._confidence", "def confidence(self):\n return self._confidence", "def calculate_profit(self):", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs" ]
[ "0.6476335", "0.6331386", "0.6273888", "0.62594044", "0.62203914", "0.6011443", "0.58879673", "0.58871895", "0.58729005", "0.58711815", "0.582148", "0.57926047", "0.57851225", "0.5779903", "0.57435066", "0.5729716", "0.5727403", "0.57262325", "0.56652945", "0.5631243", "0.5606004", "0.55863273", "0.5575825", "0.5567298", "0.55563915", "0.55298156", "0.5524738", "0.5513865", "0.55011195", "0.54940677", "0.5490233", "0.54680383", "0.5458408", "0.5444532", "0.54409534", "0.5428717", "0.53974956", "0.5388125", "0.5382578", "0.5376942", "0.5376332", "0.5352854", "0.53489393", "0.5326687", "0.53149056", "0.52835125", "0.52819353", "0.5281739", "0.52793497", "0.5235703", "0.52348727", "0.5217693", "0.52168936", "0.5207541", "0.52046466", "0.51921827", "0.51912487", "0.5190546", "0.5188543", "0.5187897", "0.5171303", "0.5166009", "0.51519424", "0.5151301", "0.5132136", "0.51312673", "0.5130951", "0.51308686", "0.5130636", "0.511869", "0.51185876", "0.511847", "0.5110344", "0.510806", "0.51024896", "0.5101832", "0.5099396", "0.509473", "0.50895804", "0.50865966", "0.50834215", "0.5083275", "0.5082051", "0.50801057", "0.50781155", "0.5077338", "0.50733185", "0.507178", "0.5065903", "0.5063173", "0.50595987", "0.50484115", "0.50463843", "0.5042974", "0.50404656", "0.50363314", "0.503613", "0.503613", "0.50351405", "0.50321203" ]
0.50412726
94
calculate the preference of each useritem pair
def cal_preference(dat): preference = np.ones(dat.shape) preference[dat == 0] = 0 return np.matrix(preference)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _predict_user_item(self, user, item):\n if not isinstance(user, int):\n user = self._user_to_ndx[user]\n if not isinstance(item, int):\n item = self._item_to_ndx[item]\n\n try:\n rating_mean = self._averages[user]\n except AttributeError:\n raise RuntimeError('Must fit before predicting')\n\n other_users = [other for other in self._users if other != user and\n np.isfinite(self._votes[other][item])]\n weights = np.array([self._weight(user, other)\n for other in other_users])\n deviations = np.array([self._votes[other][item] - self._averages[other]\n for other in other_users])\n\n weight_sum = np.sum(np.absolute(weights))\n if weight_sum < _EPSILON:\n return rating_mean # No similar users, so guess their avg rating\n\n norm_const = 1 / weight_sum\n\n weighted_avg = np.sum(weights * deviations)\n return rating_mean + norm_const * weighted_avg", "def find_predictions(actives, train_rdd_gbitem_dict, train_rdd_gbuser_dict, num_items):\n active_user = actives[0][0]\n active_item = actives[0][1]\n\n # -----------------------------------\n # train_rdd_gbitem_dict = (item, ([(user,r),(user,r)...],avg_of_item))\n # train_rdd_gbuser_dict = (user, [(item,r),(item,r)...]\n\n if active_user not in train_rdd_gbuser_dict and active_item not in train_rdd_gbitem_dict:\n return (active_user, active_item), 2.5\n\n # all user, ratings that have rated active_item\n if active_item in train_rdd_gbitem_dict:\n active_item_avg = train_rdd_gbitem_dict[active_item][1]\n active_item_dict = dict(train_rdd_gbitem_dict[active_item][0]) # {user: rating, user: rating, ...}\n else:\n # item not found in training set\n # new item problem.\n average_of_user_list = train_rdd_gbuser_dict[active_user]\n average_of_user = sum([x[1] for x in average_of_user_list]) / len(average_of_user_list)\n return (active_user, active_item), average_of_user\n\n # user rated items - all (item, ratings) that the user has rated\n if active_user in train_rdd_gbuser_dict:\n active_user_rated_items = train_rdd_gbuser_dict[active_user] # [(item, rating), (item, rating), ...]\n else:\n # user not found in training set\n # new user problem.\n return (active_user, active_item), train_rdd_gbitem_dict[active_item][1]\n\n similarity_list = []\n for item, rating in active_user_rated_items:\n item_dict = dict(train_rdd_gbitem_dict[item][0])\n item_avg = train_rdd_gbitem_dict[item][1]\n similarity = find_similarity(dict(active_item_dict), active_item_avg, dict(item_dict), item_avg)\n similarity_list.append((rating, similarity))\n\n # Have obtained similarity list for active item and item from the above code.\n # Filter according to a top 'N' items and then take avg rating.\n # similarity_list.sort(key=lambda x: x[1], reverse=True)\n # similarity_list = similarity_list[:len(similarity_list) // 4]\n # similarity_list = [(x[0], x[1]*abs(x[1])**1.5) for x in similarity_list]\n # print(similarity_list)\n pred_rating = find_weighted_average(similarity_list, num_items)\n\n # for i in similarity_list:\n # print(i)\n # print(\"Pred-rating: \", pred_rating)\n\n return (active_user, active_item), pred_rating", "def predict_rating(user_id,item_id):\n user_preference = latent_user_preferences[user_id]\n item_preference = latent_item_features[item_id]\n return user_preference.dot(item_preference)", "def compute_user_user_sim_base_on_common_items(self):\n self.sim_matrix = {}\n for item in self.items.values():\n # convert to list of tuples for indexing\n users = list(item.covered_users.items())\n item_popularity = len(users)\n # iter through all user pairs\n for i in range(len(users)-1):\n for j in range(i+1, len(users)):\n user_A_info, user_B_info = users[i], users[j]\n # remember to update pair wise!\n self.update_user_user_sim(user_A_info, user_B_info,\n item_popularity)\n self.update_user_user_sim(user_B_info, user_A_info,\n item_popularity)", "def predict_rating(self, user_id, item_id):\n user_preference = self.latent_user_preferences[user_id]\n item_feature = self.latent_item_features[item_id]\n return user_preference.dot(item_feature)", "def predict(self, user_id, item_id):\n # DONEreturn prediction for given pair\n return self._user_factors[user_id, : ].dot(self._item_factors[item_id, :])", "def item_based_recommend(user_id, user_item_time_dict, item2item_sim, sim_item_topk, recall_item_num, item_topk_click):\n \n # fetch the user's history clicks\n hist_items = user_item_time_dict[user_id]\n user_hist_items = []\n for (item_list, click_time) in hist_items:\n user_hist_items.extend(item_list)\n user_hist_items_ = {item_id for item_id in user_hist_items}\n \n item_rank = {}\n for item in user_hist_items:\n try:\n for another_item, wij in sorted(item2item_sim[item].items(), key=lambda x: x[1], reverse=True)[:sim_item_topk]:\n if another_item in user_hist_items_:\n continue\n\n item_rank.setdefault(another_item, 0)\n item_rank[another_item] += wij\n except:\n continue\n \n # fill the item_rank if the number of news in item_rank is less than recall_item_num\n if len(item_rank) < recall_item_num:\n for i, item in enumerate(item_topk_click):\n if item in item_rank.items():\n continue\n item_rank[item] = - i - 100 # set a random negative number\n if len(item_rank) == recall_item_num:\n break\n \n item_rank = sorted(item_rank.items(), key=lambda x: x[1], reverse=True)[:recall_item_num]\n \n return item_rank", "def calculate_pairwise_user_similarity(self, user1_preferences, user2_preferences):\r\n\r\n shared_items = set(user1_preferences.indices) & set(user2_preferences.indices)\r\n\r\n all_items = set(user1_preferences.indices) | set(user2_preferences.indices)\r\n\r\n num_agreements = sum(1 for x in shared_items if abs(user1_preferences[0, x] - user2_preferences[0, x]) <= 2)\r\n\r\n return (num_agreements / len(all_items) if len(all_items) > 0 else 0)", "def pair_items_mapper(self, user_id, values):\r\n\t pass #your code here\r", "def _get_recommend(self, user):\n return self.user_cf.calculate(target_user_id=user, user_n=self.user_n,\n item_n=self.item_n, type=2)", "def recommend(self, user_id, N=10):\n scores = self.user_factors[user_id] @ self.product_factors.T\n best = np.argpartition(scores, -N)[-N:]\n return sorted(zip(best, scores[best]), key=lambda x: -x[1])", "def prediction(pair, uid_allneighbor_info, user_bd, uid):\n iid, real_rating, time = pair\n average_uid_rating = user_bd.value[uid][0]\n sim_rating = []\n for info in uid_allneighbor_info:\n uid, sim, ratings = info\n sim_rating += [\n (rating[0], sim, rating[1] - average_uid_rating)\n for rating in ratings if iid in rating[0]]\n\n if len(sim_rating) != 0:\n sim_rating = [\n (line[0], line[1] * line[2], abs(line[1]))\n for line in sim_rating]\n predicted_rating = average_uid_rating + sum(\n map(lambda line: line[1], sim_rating)) / sum(\n map(lambda line: line[2], sim_rating))\n else:\n predicted_rating = average_uid_rating\n return iid, real_rating, self.bound_rating(predicted_rating)", "def ParejaRecommendation(user_preferences: dict, matcher: NodeMatcher):\r\n parejaE = user_preferences[\"tiene_pareja\"]\r\n equal_styles = list(matcher.match(\"User\", pareja = parejaE))\r\n return equal_styles", "def get_user_preferences(self, user_id):\n # User training ratings\n user_ratings = self.ratings[(self.ratings['user_id'] == user_id)]\n\n # Get rating-movie information\n movies_user = pd.merge(user_ratings, self.movies, on='movie_id')\n\n # Get count of genres\n genres_sum = movies_user[self.genres].sum()\n genres_sum_mat = genres_sum.as_matrix()\n\n # Weight by average of genre within user\n mean_ratings = np.zeros(len(self.genres))\n for i, g in enumerate(genres_sum.index):\n mean_ratings[i] = movies_user[movies_user[g] == True]['rating'].mean()\n\n # Multiply and replace nans to 0\n cleared = np.nan_to_num(genres_sum_mat * mean_ratings)\n return cleared / np.sum(cleared)", "def recommendation_item_based(user_id, item_id):\n try:\n user_id = int(user_id)\n item_id = int(item_id)\n return str( item_based( user_item, user_id, item_id) )\n except AssertionError as ae:\n return str(ae)\n except Exception as e:\n traceback.print_exc()\n return \"0\"", "def __insertandretrieve_recommendation(self, userid=None, itemid=None):\n def recommendation2rec(recommendationsip=None):\n recs = []\n for recommendation in recommendationsip:\n recs.append(self.__itemidx2id[recommendation[0]])\n return recs\n userid = str(userid)\n itemid = str(itemid)\n if userid in list(self.__userid2idx.keys()):\n useridx = self.__userid2idx[userid]\n userarray = numpy.asarray([useridx, ] * len(self.__itemidx2id.keys()))\n itemarray = numpy.asarray(list(self.__itemidx2id.keys()))\n predicted_ratings = self.__recommender1.predict([userarray, itemarray], batch_size=10, verbose=0)\n item_rating = {}\n for item, pr in zip(itemarray, predicted_ratings):\n item_rating[item] = pr[0]\n recommendations = sorted(item_rating.items(), key=lambda value: value[1], reverse=True)[:self._num_recommendations]\n recommendations = recommendation2rec(recommendationsip=recommendations)\n else:\n if itemid in list(self.__itemid2idx.keys()):\n itemidx = self.__itemid2idx[itemid]\n recommendations = self.__recommender2.similar_items(itemidx, N=self._num_recommendations)\n recommendations = recommendation2rec(recommendationsip=recommendations)\n else:\n recommendations = list(self.__itemid2idx.keys())\n random.shuffle(recommendations)\n recommendations = recommendations[:self._num_recommendations]\n return recommendations", "def rank_potential_items(self, target_user_id, top_k_users):\n items_rank = {}\n target_user = self.users[target_user_id]\n for user_id in top_k_users:\n sim_user = self.users[user_id]\n sim = self.sim_matrix[target_user_id][user_id]\n for item_id, item_time in sim_user.covered_items.items():\n if self.ensure_new and (item_id in target_user.covered_items):\n continue # skip item that already been bought\n if self.timestamp:\n # note that time context model cannot be evaluated\n # properly using offline data, this is just a demon\n # user's interest for this history item\n t_now = 1146454548\n time_elapse = Model.time_elapse(item_time, t_now)\n score = time_elapse*sim\n else:\n score = sim\n try:\n items_rank[item_id] += score\n except KeyError:\n items_rank[item_id] = score\n # assert len(items_rank) >= self.n\n return items_rank", "def userSuggestions(database):\n firstname=str(input(\"who do you want to have follow suggestions for :\"))\n usr,find=getByName(database,firstname)\n if not find:\n print(\"the User could not be found\")\n return\n else:\n following=[]\n followers=[]\n for folower in usr.folowed:\n followers.append(folower)\n for folowed in usr.folow:\n following.append(folowed)\n results=[]\n print(\"On what do you want your suggestions to be based on?\\n1. Mutual Interests\\n2. Mutual Connections\\n3. Both\")\n choice=int(input(\"Your choice :\"))\n for key ,usrs in database.items():\n if key not in following: \n correspondant=0\n if choice == 1 or choice == 3:\n for interest in usr.interest:\n if interest in usrs.interest:\n correspondant+=1\n if choice == 2 or choice == 3:\n for folower in followers:\n for folows in usrs.folowed:\n if key == folows:\n correspondant+=1\n results.append([key,correspondant])\n for i in range(len(results)):\n for j in range(0, len(results)-i-1):\n if results[j][1] > results[j+1][1] :\n results[j], results[j+1] = results[j+1], results[j]\n for k in range(5):\n print(results[k][0])", "def get_player_item_val(self, choice_of_item, user):\n\n for key in user.player_inventory:\n\n if choice_of_item == key:\n return user.player_inventory[key]\n\n return False", "def pred_for_user(self,u):\r\n ids=np.where(self.Y_data_n[:,0]==u)[0]\r\n items_rated_by_u=Y_data_n[ids,1].tolist()\r\n pred_ratings=[]\r\n for i in range(self.n_items):\r\n if i not in items_rated_by_u:\r\n pred_ratings.append(self.pred(u,i))\r\n return pred_ratings", "def profesionalRecommendation(user_preferences: dict, matcher: NodeMatcher):\r\n profesional = user_preferences[\"vida_profesional\"]\r\n equal_styles = list(matcher.match(\"User\", prof = profesional))\r\n return equal_styles", "def lookup(self,user_preferences):\n res = list()\n \n fit_area = set()\n fit_price = set()\n fit_food = set()\n \n if user_preferences[0] == \"any\" or user_preferences[0] == 0:\n fit_area = set(range(len(self.area)))\n else:\n for i,a in enumerate(self.area):\n if a == user_preferences[0]:\n fit_area.add(i)\n if user_preferences[1] == \"any\" or user_preferences[1] == 0:\n fit_price = set(range(len(self.price_range)))\n else:\n for j,p in enumerate(self.price_range):\n if p == user_preferences[1]:\n fit_price.add(j)\n if user_preferences[2] == \"any\" or user_preferences[2] == 0:\n fit_food = set(range(len(self.food_types)))\n else:\n for k,f in enumerate(self.food_types):\n if f == user_preferences[2]:\n fit_food.add(k)\n option_numbers = fit_area.intersection(fit_price, fit_food)\n if option_numbers:\n for i in option_numbers:\n res.append(self.restaurant_names[i])\n \n return res", "def popularity(self,train = None,test = None,k = 8,nitem = 10):\n train = train or self.traindata\n test = test or self.testdata\n item_popularity = dict()\n for user ,items in train.items():\n for item in items.keys():\n item_popularity.setdefault(item,0)\n item_popularity[item] += 1\n ret = 0\n n = 0\n for user in train.keys():\n rank = self.recommend(user, train, k = k, nitem = nitem)\n for item ,_ in rank.items():\n ret += math.log(1+item_popularity[item])\n n += 1\n return ret / (n * 1.0)", "def get_recommendations(prefs, person, similarity=sim_pearson):\n totals = {}\n similarity_sums = {}\n\n for other in prefs:\n if other == person:\n continue\n\n sim = similarity(prefs, person, other)\n\n if sim <= 0:\n continue\n\n for item in prefs[other]:\n if item not in prefs[person] or prefs[person][item] == 0:\n totals.setdefault(item, 0)\n totals[item] += prefs[other][item] * sim\n similarity_sums.setdefault(item, 0)\n similarity_sums[item] += sim\n\n # Normalized list\n rankings = [(total / similarity_sums[item], item)\n for item, total in totals.items()]\n\n # Returns normalized score, not an r that would be between -1 and 1\n rankings.sort()\n rankings.reverse()\n return rankings", "def recommend(self):\n\t\t\n\t\titems = self.unique_biz_id.items()\n\n\t\t# business id is essentially restaurant id, replace the naming \n\t\tfor rest1, i1 in items:\n\t\t\tfor rest2, i2 in items:\n\t\t\t\tif i1 < i2:\n\t\t\t\t\tsim, nsup = self.calculate_similarity( rest1 = rest1, rest2 = rest2 )\n\t\t\t\t\tself.database_sim[i1][i2] = sim\n\t\t\t\t\tself.database_sim[i2][i1] = sim\n\t\t\t\t\tself.database_sup[i1][i2] = nsup\n\t\t\t\t\tself.database_sup[i2][i1] = nsup\n\t\t\t\telif i1 == i2:\n\t\t\t\t\tnsup = self.df[ self.df['business_id'] == rest1 ]['user_id'].count()\n\t\t\t\t\tself.database_sim[i1][i1] = 1.0\n\t\t\t\t\tself.database_sup[i1][i1] = nsup", "def calculate_item_relevance_scores(self, user_similarity_profile):\r\n scores = user_similarity_profile.dot(self.ratings_matrix.matrix)\r\n return self.ratings_matrix.normalize_score_vector(scores)", "def prediction(self, user, item):\n return self._prob_to_class(self(user, item))", "def __insertandretrieve_recommendation(self, userid=None, itemid=None):\n def recommendation2rec(recommendationsip=None):\n recs = []\n for recommendation in recommendationsip:\n recs.append(self.__itemidx2id[recommendation[0]])\n return recs\n userid = str(userid)\n itemid = str(itemid)\n if userid in list(self.__userid2idx.keys()):\n useridx = self.__userid2idx[userid]\n recommendations = self.__recommender.recommend(useridx, self.__useritem, N=self._num_recommendations)\n recommendations = recommendation2rec(recommendationsip=recommendations)\n else:\n if itemid in list(self.__itemid2idx.keys()):\n itemidx = self.__itemid2idx[itemid]\n recommendations = self.__recommender.similar_items(itemidx, N=self._num_recommendations)\n recommendations = recommendation2rec(recommendationsip=recommendations)\n else:\n recommendations = list(self.__itemid2idx.keys())\n random.shuffle(recommendations)\n recommendations = recommendations[:self._num_recommendations]\n return recommendations", "def recommend(r ,username, users):\r\n # first find nearest neighbor\r\n nearest = computeNearestNeighbor(r, username, users)[0][1]\r\n recommendations = []\r\n # now find bands neighbor rated that user didn't\r\n neighborRatings = users[nearest]\r\n userRatings = users[username]\r\n for artist in neighborRatings:\r\n if not artist in userRatings:\r\n recommendations.append((artist, neighborRatings[artist]))\r\n # using the fn sorted for variety - sort is more efficient\r\n return sorted(recommendations, key=lambda artistTuple: artistTuple[1], reverse = True)", "def score(self, user_idx, item_idx=None):\n if item_idx is None:\n if self.train_set.is_unk_user(user_idx):\n raise ScoreException(\n \"Can't make score prediction for (user_id=%d)\" % user_idx\n )\n\n known_item_scores = self.V.dot(self.U[user_idx, :])\n return known_item_scores\n else:\n if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(\n item_idx\n ):\n raise ScoreException(\n \"Can't make score prediction for (user_id=%d, item_id=%d)\"\n % (user_idx, item_idx)\n )\n\n user_pred = self.V[item_idx, :].dot(self.U[user_idx, :])\n\n user_pred = sigmoid(user_pred)\n if self.train_set.min_rating == self.train_set.max_rating:\n user_pred = scale(user_pred, 0.0, self.train_set.max_rating, 0.0, 1.0)\n else:\n user_pred = scale(\n user_pred,\n self.train_set.min_rating,\n self.train_set.max_rating,\n 0.0,\n 1.0,\n )\n\n return user_pred", "def user_based_recommendation(\n self, test_dataRDD,\n user_based_dict_bd, userbased_sim_pair_dict_bd, user_info_bd):\n sim_pair_dict_keys = set(userbased_sim_pair_dict_bd.value.keys())\n return test_dataRDD.filter(\n lambda line: line[0] in sim_pair_dict_keys).map(\n lambda line: self.user_based_prediction(\n line, user_based_dict_bd,\n userbased_sim_pair_dict_bd, user_info_bd))", "def popularity(self, user_list):\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate popularity: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n ret /= n * 1.0\n print('\\npopularity: ', ret)\n return ret", "def prediction(uid, pair, rating_bd, sim_bd, item_bd):\n iid, real_rating = pair[0], pair[1]\n if iid not in sim_bd.value.keys():\n return ()\n iid_neighbors = [\n (i[0], i[1], rating_bd.value[i[0]]) for i in sim_bd.value[iid]]\n average_iid_rating = item_bd.value[iid][0]\n sim_rating = []\n for info in iid_neighbors:\n niid, nsim, ratings = info\n sim_rating += [\n (iid, nsim, rating[1] - item_bd.value[niid][0], rating[2])\n for rating in ratings if uid in rating[0]]\n if len(sim_rating) != 0:\n sim_ratings = [\n (line[1] * line[2], abs(line[1]), line[3])\n for line in sim_rating]\n predicted_rating_no_decay = average_iid_rating + sum(\n map(lambda line: line[0], sim_ratings)) / sum(\n map(lambda line: line[1], sim_ratings))\n predicted_rating_decay = \\\n average_iid_rating + add_decay(sim_ratings)\n else:\n predicted_rating_no_decay = average_iid_rating\n predicted_rating_decay = average_iid_rating\n return iid, real_rating, \\\n self.bound_rating(predicted_rating_no_decay), \\\n self.bound_rating(predicted_rating_decay)", "def part_two(rucksacks: list) -> int:\n summ = 0\n for i in range(0, len(rucksacks), 3):\n first_group = set(rucksacks[i])\n second_group = set(rucksacks[i + 1])\n third_group = set(rucksacks[i + 2])\n badge = first_group.intersection(second_group).intersection(third_group)\n badge = list(badge)[0] # extract item id from set\n summ += PRIORITY.get(badge, 0)\n return summ", "def update_weight(user, user_favs, rec_user, rec_user_favs):\n counter = 0\n for photo in user_favs:\n if photo in rec_user_favs:\n counter += 1\n weight, created = Weight.objects.get_or_create(against=user, to=rec_user)\n weight.weight = float(counter)/len(rec_user_favs)\n weight.save()", "def recomenadation_user_based(user_id):\n try:\n user_id = int(user_id)\n return str( user_based( rec_matrix, user_item, user_id ) )\n except AssertionError as ae:\n # if index + 1 not in df.userid.value_counts().index:\n # return {\"sort_ids\": [int(item) for item in df.item_id.value_counts().index[:3]]}\n return str(ae)\n except Exception as e:\n traceback.print_exc()\n return \"0\"", "def precision(self, user_list):\n hit = 0\n all_recom = 0\n print('Calculate precision: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n all_recom += len(recom_item)\n print('\\nprecision is: ', hit / (all_recom * 1.0))\n return hit / (all_recom * 1.0)", "def get_best_matching(self):\r\n from django.db.models import Q, Avg\r\n import copy\r\n import operator\r\n if self.user is None:\r\n raise User.DoesNotExist\r\n users = User.objects.all()\r\n if self.type == FRIENDS_ONLY:\r\n friends = Friends.objects.filter(Q(user_one_id=self.user) | Q(user_two_id=self.user))\r\n if len(friends) > 2:\r\n users = users.filter(Q(id=friends.values('user_one_id')) | Q(id=friends.values('user_two_id')))\r\n\r\n user_sims = {}\r\n prefs = {}\r\n for user in users:\r\n tab = {}\r\n scores = Score.objects.values('game_id', 'score').filter(user_id=user)\r\n for score in scores:\r\n tab.update({score['game_id']: score['score']})\r\n prefs.update({copy.deepcopy(user.id): copy.deepcopy(tab)})\r\n\r\n for user in users:\r\n sim = self.pearson(prefs, self.user, user.id)\r\n user_sims.update({user.id: sim})\r\n\r\n print(user_sims)\r\n del user_sims[self.user] # deletion of user for whom the analysis is beeing performed\r\n user_sims = sorted(user_sims.items(), key=operator.itemgetter(1), reverse=True) # dictionary containing user_ids and users' similarities\r\n if len(user_sims) < 3:\r\n return self.get_most_popular()\r\n\r\n games_f = Score.objects.values('game_id', 'score').filter(user_id=user_sims[0][0]).order_by('-score')[:3]\r\n games_s = Score.objects.values('game_id', 'score').filter(user_id=user_sims[1][0]).order_by('-score')[:3]\r\n\r\n recommended_games = {}\r\n grd = {}\r\n games_f_dict = dict([(g['game_id'], g['score']) for g in games_f])\r\n recommended_games.update(dict(sorted(games_f_dict.items(), key=operator.itemgetter(1), reverse=True)))\r\n\r\n games_s_dict = dict([(g['game_id'], g['score']) for g in games_s])\r\n recommended_games.update(dict(sorted(games_s_dict.items(), key=operator.itemgetter(1), reverse=True)))\r\n\r\n for game in recommended_games:\r\n scores = list(Score.objects.values('game_id').filter(game_id=game).annotate(Avg('score')))\r\n idn = scores[0]['game_id']\r\n avg = scores[0]['score__avg']\r\n grd.update({idn: avg})\r\n\r\n return grd", "def _compute_unique_approval_scores(self, profile: list[set[int]]) -> list[int]:\n unique_approval_scores = np.zeros(self.m, dtype=int)\n for party in range(0, self.m):\n for ballot in profile:\n if ballot == {party}:\n unique_approval_scores[party] += 1\n return list(unique_approval_scores)", "def calc_score(self, user_id, item_id): \n p = np.dot(self.U[user_id], self.V[item_id])\n if self.trunc_score_rule==None:pass\n else: p=self.trunc_score_rule(p)\n \n return p", "def goalsRecommendation(user_preferences: dict, matcher: NodeMatcher):\r\n goalsE = user_preferences[\"Metas_similares\"]\r\n equal_styles = list(matcher.match(\"User\", goals = goalsE))\r\n return equal_styles", "def collect_items_user_dict(self, test_data):\n items_to_fill = {}\n for row in test_data:\n user = row[0]\n item = row[1]\n if item not in items_to_fill:\n items_to_fill[item] = []\n items_to_fill[item] += [user.item()]\n\n return items_to_fill", "def classify(user, itemName, itemVector):\n # first find nearest neighbor\n nearest = computeNearestNeighbor(itemName, itemVector, items)[0][1]\n rating = users[user][nearest]\n return rating", "def score(self, user_idx, item_idx=None):\n if self.variant == 'c2pf' or self.variant == 'tc2pf':\n if item_idx is None:\n user_pred = self.Beta * self.Theta[user_idx, :].T + self.Xi * self.Theta[user_idx, :].T\n else:\n user_pred = self.Beta[item_idx, :] * self.Theta[user_idx, :].T + self.Xi * self.Theta[user_idx, :].T\n elif self.variant == 'rc2pf':\n if item_idx is None:\n user_pred = self.Xi * self.Theta[user_idx, :].T\n else:\n user_pred = self.Xi[item_idx,] * self.Theta[user_idx, :].T\n else:\n if item_idx is None:\n user_pred = self.Beta * self.Theta[user_idx, :].T + self.Xi * self.Theta[user_idx, :].T\n else:\n user_pred = self.Beta[item_idx, :] * self.Theta[user_idx, :].T + self.Xi * self.Theta[user_idx, :].T\n # transform user_pred to a flatten array,\n user_pred = np.array(user_pred, dtype='float64').flatten()\n\n return user_pred", "def prediction_item(item_id):\r\n # Data preprosessing\r\n reader = Reader(rating_scale=(0, 5))\r\n load_df = Dataset.load_from_df(ratings_df, reader)\r\n a_train = load_df.build_full_trainset()\r\n\r\n predictions = []\r\n for ui in a_train.all_users():\r\n predictions.append(model.predict(iid=item_id, uid=ui, verbose=False))\r\n return predictions", "def _pval_pairs(self, idx0, idx1):\n pass", "def likingdatingapps(user_preferences: dict, matcher: NodeMatcher):\r\n appsPreferidas = user_preferences[\"app_citas\"]\r\n equal_styles = list(matcher.match(\"User\", apps = appsPreferidas))\r\n return equal_styles", "def seen(user, item):\n print(\"---\", item.seen_by(user))\n return item.seen_by(user)", "def recall(self, user_list):\n hit = 0\n like_item = 0\n print('\\nCalculate recall: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n like_item += len(user_item)\n print('\\nrecall is: ', hit / (like_item * 1.0))\n return hit / (like_item * 1.0)", "def mainRecommendation(user_preferences: dict, matcher: NodeMatcher):\r\n print(\"***************************************************\\n Recomendaciones principales\\n\"\r\n \"***************************************************\")\r\n pareja = ParejaRecommendation(dict, matcher)\r\n apps = likingdatingapps(dict, matcher)\r\n dificultad = difficultydates(dict, matcher)\r\n importancia = samehobbies(dict, matcher)\r\n imp = study(dict, matcher)\r\n gusto = musictaste(dict, matcher)\r\n region = sameregion(dict, matcher)\r\n gustoDif = different(dict, matcher)\r\n Habits = habits(dict, matcher)\r\n goals = goalsRecommendation(dict, matcher)\r\n prof = profesionalRecommendation(dict, matcher)\r\n similar = similarRecommendation(dict, matcher)\r\n\r\n listaopciones = [pareja, apps, dificultad, importancia, imp, gusto, region, gustoDif, Habits, goals, prof, similar]\r\n\r\n Prospectos = {}\r\n for option in listaopciones:\r\n for element in option:\r\n if Prospectos.has_key(element[\"nombre\"]):\r\n Prospectos[element[\"nombre\"]] = 1\r\n else:\r\n Prospectos[element[\"nombre\"]] = Prospectos[element[\"nombre\"]] + 1", "def item_rank(self, u, a, b, alphas, theta, transition_prob):\n no_of_states = len(alphas[u][0])\n\n # calculate distribution over the states for the user at time t+1\n p_t_plus_1 = []\n for k in range(no_of_states):\n total = 0\n for l in range(no_of_states):\n total += alphas[u][-1][k] * transition_prob[l][k]\n p_t_plus_1.append(total)\n# # print('\\np_t_plus_u1')\n # pprint(p_t_plus_1)\n\n item_rank = defaultdict(float)\n for i in range(len(theta)): # for each item\n item_rank[i] = -sum(p_t_plus_1[k] * (1 + b[k] * theta[i][k])**(-a[k]) for k in range(no_of_states))\n\n# # print(item_rank)\n items = sorted(item_rank, key=item_rank.__getitem__, reverse=True)\n\n return items", "def compute_user_local_sensitivity(sc, dataset, user_id, num_iters_ls):\n\n res = defaultdict(lambda: 0.0)\n\n original_recs, original_qii = compute_recommendations_and_qii(sc, dataset,\n user_id)\n original_recs = recommendations_to_dd(original_recs)\n\n res[\"recommendee_user_id\"] = user_id\n res[\"recommendee_recs_l1_norm\"] = l1_norm(original_recs)\n res[\"recommendee_qii_l1_norm\"] = l1_norm(original_qii)\n res[\"recommendee_recs_l0_norm\"] = len(original_recs)\n res[\"recommendee_qii_l0_norm\"] = len(original_qii)\n res[\"perturbations\"] = []\n\n all_users = get_user_list(dataset)\n for x in xrange(num_iters_ls):\n if perturb_specific_user:\n other_user_id = perturb_specific_user\n else:\n other_user_id = random.choice(list(set(all_users) - {user_id}))\n print \"Perturbing user\", other_user_id, \"(\", x+1, \"out of\",\\\n num_iters_ls, \")\"\n perturbed_dataset = perturb_user_ratings(sc, dataset, other_user_id)\n start = time.time()\n recs, qii = compute_recommendations_and_qii(sc, perturbed_dataset, user_id)\n stop = time.time()\n recs = recommendations_to_dd(recs)\n rec_ls = calculate_l1_distance(original_recs, recs)\n qii_ls = calculate_l1_distance(original_qii, qii)\n\n report = {}\n report[\"perturbed_user_id\"] = other_user_id\n report[\"perturbed_recs_l1_norm\"] = l1_norm(recs)\n report[\"perturbed_qii_l1_norm\"] = l1_norm(qii)\n report[\"perturbed_recs_l0_norm\"] = len(recs)\n report[\"perturbed_qii_l0_norm\"] = len(qii)\n report[\"recs_ls\"] = rec_ls\n report[\"qii_ls\"] = qii_ls\n report[\"recs_ls_norm\"] = rec_ls/float((len(recs)*4))\n report[\"qii_ls_norm\"] = qii_ls/float((len(qii)*4))\n print \"Local sensitivity of recs: \", rec_ls/float((len(recs)*4))\n print \"Local sensitivity of QII: \", qii_ls/float((len(qii)*4))\n report[\"computation_time\"] = stop - start\n\n\n res[\"perturbations\"].append(report)\n\n for per in res[\"perturbations\"]:\n res[\"avg_recs_ls\"] += float(per[\"recs_ls\"])/len(res[\"perturbations\"])\n res[\"max_recs_ls\"] = max(res[\"max_recs_ls\"], per[\"recs_ls\"])\n res[\"avg_recs_ls_norm\"] +=\\\n float(per[\"recs_ls_norm\"])/len(res[\"perturbations\"])\n res[\"max_recs_ls_norm\"] = max(res[\"max_recs_ls_norm\"],\n per[\"recs_ls_norm\"])\n res[\"avg_qii_ls\"] += float(per[\"qii_ls\"])/len(res[\"perturbations\"])\n res[\"max_qii_ls\"] = max(res[\"max_qii_ls\"], per[\"qii_ls\"])\n res[\"avg_qii_ls_norm\"] +=\\\n float(per[\"qii_ls_norm\"])/len(res[\"perturbations\"])\n res[\"max_qii_ls_norm\"] = max(res[\"max_recs_qii_norm\"],\n per[\"qii_ls_norm\"])\n return dict(res)", "def best_policy(self, U):\n pi = {}\n utility = {s:dict() for s in self.states}\n for s in self.states:\n for a in self.available(s):\n utility[s][a] = self.expected_utility(a,s,U)\n pi[s] = utility[s].keys()[utility[s].values().index(max(utility[s].values()))]\n return pi", "def compute_kappa_score(self, scorer1, scorer2, items, matrix_weights):\r\n #matrice nb items\r\n df_n_items = pd.DataFrame(data=0, index=items, columns=items)\r\n for score1, score2 in zip(scorer1, scorer2):\r\n df_n_items[score1][score2] = df_n_items[score1][score2] + 1\r\n #nb items * total number of scoring items\r\n df_proba = df_n_items / len(scorer1)\r\n #df_proba with weights\r\n df_proba_w = df_proba * matrix_weights\r\n #relative observed agreement among raters\r\n p_a = df_proba_w.sum().sum()\r\n #hypothetical probability of chance agreement\r\n p_e = 0\r\n for lig in df_proba.index:\r\n for col in df_proba.columns:\r\n p_e = p_e + df_proba.loc[lig, :].sum() * \\\r\n df_proba.loc[:, col].sum() * matrix_weights.loc[lig, col]\r\n #kappa score\r\n kappa = 1 - (1 - p_a) / (1 - p_e)\r\n return kappa", "def get_preferences(buyer_count=5):\n\n\n buyer_wants = np.random.randint(1, 4, buyer_count)\n seller_count = sum(buyer_wants)\n\n buyer_start = range(seller_count)\n seller_start = range(buyer_count)\n\n buyer_prefs = list()\n seller_prefs = list()\n\n for i in range(buyer_count):\n random.shuffle(buyer_start)\n buyer_prefs.append(buyer_start[:])\n\n for i in range(seller_count):\n random.shuffle(seller_start)\n seller_prefs.append(seller_start[:])\n\n return buyer_prefs, seller_prefs, buyer_wants", "def get_recommendations(name, data):\r\n #sorts preferences in alphabetical order\r\n #do this to make it easier to compare\r\n for key in data:\r\n data[key] = selection_sort(data[key])\r\n most_similar_key = \"\"\r\n max_matches = 0\r\n for key in data:\r\n if not(key[-1] == \"$\" or data[key] == data[name]):\r\n \"\"\"if the person is not private or does not have the same data\"\"\"\r\n matches = num_matches(data[key], data[name])\r\n if matches > max_matches:\r\n most_similar_key = key\r\n max_matches = matches\r\n if most_similar_key == \"\":\r\n print(\"No recommendations available at this time\")\r\n return 1\r\n else:\r\n final_recommendations = []\r\n for x in data[most_similar_key]:\r\n if x not in data[name]:\r\n final_recommendations += [x]\r\n return final_recommendations", "def compare_for_list_key_value(self, item_1: Tuple[keyType, valueType], item_2: Tuple[keyType, valueType]) -> int:\n # When one element is tuple and the other is another type, such as str,\n # using hash value can make the '<' operator function function normally.\n if hash(item_1[0]) != hash(item_2[0]):\n if hash(item_1[0]) < hash(item_2[0]):\n return -1\n return 1\n else:\n tmp_1 = item_1[1]\n tmp_2 = item_2[1]\n # 'values' could be list or set which don't have hash() function.\n if isinstance(item_1[1], (list, set)):\n tmp_1 = tuple(item_1[1])\n if isinstance(item_2[1], (list, set)):\n tmp_2 = tuple(item_2[1])\n if hash(tmp_1) < hash(tmp_2):\n return -1\n return 1", "def item_to_ids(items, user):\r\n sizes = [\"10\", \"12\", \"14\", \"16\", \"25\", \"30\", \"35\", \"40\"]\r\n if not items:\r\n return []\r\n ids = []\r\n names_to_id_product = get_names_preconfigured(user)\r\n for item in items:\r\n for name, product_id in names_to_id_product.items():\r\n # CLEAN TO REMOVE SMALL MEDIUM LARGE, AND STRIP\r\n item = item.strip()\r\n for size in sizes:\r\n if size in item:\r\n if size == \"10\" or size == \"25\":\r\n replace = \"Small\"\r\n elif size == \"12\" or size == \"30\":\r\n replace = \"Medium\"\r\n elif size == \"14\" or size == \"35\":\r\n replace = \"Large\"\r\n elif size == \"16\" or size == \"40\":\r\n replace = \"X-Large\"\r\n item = item.replace(size + '\"', replace).replace(size + \"'\", replace)\r\n # print(item, \" | \", name, editDistanceDP(item, name, len(item), len(name)) / (len(name)))\r\n if edit_distance_dp(item, name, len(item), len(name)) / (len(name)) < .3 or edit_distance_dp(\r\n item.replace(\"Pizza\", \"\"), name.replace(\"Dipping \", \"\"), len(item.replace(\"Pizza\", \"\")),\r\n len(name.replace(\"Dipping \", \"\"))) / (len(name)) < .1:\r\n ids.append(product_id)\r\n break\r\n final_ids = []\r\n for id in ids:\r\n if \"F_\" in id:\r\n variants = ids_to_variants(user)\r\n replace = variants[id][0]\r\n if replace == \"STJUDE\":\r\n replace = \"STJUDE10\"\r\n final_ids.append(replace)\r\n else:\r\n final_ids.append(id)\r\n return final_ids\r\n # order.add_item('P12IPAZA') # add a 12-inch pan pizza\r\n # order.add_item('MARINARA') # with an extra marinara cup\r\n # order.add_item('20BCOKE') # and a 20oz bottle of coke\r\n return ['P12IPAZA', 'MARINARA', '20BCOKE']", "def top_matches(self, prefs, p1):\n #print 'top_matches', prefs, p1\n #print '\\n'\n return [(p2, self.similarity(prefs[p1], prefs[p2])) for p2 in prefs if p2 != p1]", "def _update_suspicion_2(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _update_suspicion_2(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def predict(self, users, items):\n users_t = torch.tensor(users, dtype=torch.int64, device=self.device)\n items_t = torch.tensor(items, dtype=torch.int64, device=self.device)\n with torch.no_grad():\n scores = torch.mul(\n torch.cat(\n (self.user_encode(users_t)[0], self.user_emb(users_t)), dim=1\n ),\n torch.cat(\n (self.item_encode(items_t)[0], self.item_emb(items_t)), dim=1\n ),\n ).sum(dim=1)\n return scores", "def calculate_recommendations(self, vote_list, itemMatch, itemIgnored):\n #print \"--------------------------------------------------\"\n #print \"calculate_recommendations\"\n #print \"--------------------------------------------------\"\n\n # http://www.quuxlabs.com/blog/2010/09/matrix-factorization-a-simple-tutorial-and-implementation-in-python/\n\n # U = np.array('users')\n # D = np.array('video_games')\n\n # R = |U| cross |D|\n\n # We want to discover K latent features\n\n # Find\n # P(a | |U| corss K matrix)\n # Q(a | |D| cross K matrix)\n # Such that their product approximates R\n # R approx= P cross transpose(Q) = hat(R)\n #\n\n # r[i][j] = transpose(p)[i] * q[j]\n # = sum( 1..k, p[i][k] * q[k][j] )\n\n # e[i][j]**2 = (r[i][j] - hat(r)[i][j])**2\n # = (r[i][j] - sum( 1..K, p[i][k] * q[k][j]))**2\n # squared error, estimated rating can be either higher or lower than the real thing\n\n # find the gradient\n # diff(e[i][j]**2, p[i][k]) = -2*(r[i][j] - hat(r)[i][j]) * (q[k][j]) = -2*e[i][j] * q[k][j]\n # diff(e[i][j]**2, q[k][j]) = -2*(r[i][j] - hat(r)[i][j]) * (p[i][k]) = -2*e[i][j] * p[i][k]\n\n # update rules\n # alpha = settings.alpha # learning_rate\n # alpha = 0.0002 # learning_rate\n # p[i][k]' = p[i][k] + alpha * diff(e[i][j]**2, p[i][k])\n # = p[i][k] + 2 * alpha * e[i][j] * q[k][j]\n # q[k][j]' = q[k][j] + alpha * diff(e[i][j]**2, q[k][j])\n # = q[k][j] + 2 * alpha * e[i][j] * p[i][k]\n\n # training data\n # T = (u[i], d[j], r[i][j])\n # np.array()\n\n # iterate until convergance\n # E = sum((u[i], d[j], r[i][j]) in T, e[i][j])\n # = sum((u[i], d[j], r[i][j]) in T, r[i][j]\n # - sum(1..k, p[i][k]*q[k][j]))**2\n\n # regularization\n # beta = 0.02\n # e[i][j]**2 = (r[i][j] - sum(1..K, p[i][j]*q[k][j]))**2\n # + ((beta/2) * sum(1..K, norm(P)**2 + norm(Q)**2))\n #\n # p[i][k]' = p[i][k] + alpha * (2 * e[i][j] * q[k][j] - beta * p[i][k])\n # q[k][j]' = q[k][j] + alpha * (2 * e[i][j] * p[i][k] - beta * q[k][j])\n\n data = np.array(vote_list)\n\n encoder = OneHotEncoder()\n\n users = data[:,0]\n unique_users = list(set(users))\n for i in range(len(users)):\n users[i] = unique_users.index(users[i])\n\n video_games = data[:,1]\n unique_games = list(set(video_games))\n for i in range(len(video_games)):\n video_games[i] = unique_games.index(video_games[i])\n\n ratings = data[:,2]\n M = len(set(video_games))\n N = len(set(users))\n R = np.zeros((N,M))\n for i in range(len(users)):\n user = users[i]\n game = video_games[i]\n rating = ratings[i]\n R[user][game] = rating\n\n K = 2\n\n P = np.random.rand(N,K)\n Q = np.random.rand(M,K)\n\n nP, nQ = self.matrix_factorization(R, P, Q, K)\n nR = np.dot(nP, nQ.T)\n\n itemMatch = {}\n for i in range(N):\n user = unique_users[i]\n itemMatch[user] = []\n for j in range(M):\n if R[i][j] == 0:\n video_game = unique_games[j]\n recommendation = (video_game, nR[i][j])\n itemMatch[user].append(recommendation)\n itemMatch[None] = []\n print 'pmf recommendations', itemMatch.items()\n print '\\n'\n recommendations = itemMatch.items()\n\n # returns\n # [\n # (<user1>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # (<user2>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # ]\n\n return recommendations", "def agg_preferences(prefs_list):\n aggregated_preferences=defaultdict(lambda: 0)\n for els in prefs_list:\n if els[0]<els[1]:\n aggregated_preferences[(els[0],els[1])]+=1\n else:\n aggregated_preferences[(els[1],els[0])]-=1\n return aggregated_preferences", "def update_user_user_sim(self, user_A_info, user_B_info, item_popularity):\n user_A_id, user_A_time = user_A_info\n user_B_id, user_B_time = user_B_info\n try:\n row = self.sim_matrix[user_A_id]\n except KeyError:\n row = self.sim_matrix[user_A_id] = {} # all reference\n IIF = log(1+item_popularity)\n if self.timestamp:\n time_elapse = Model.time_elapse(user_A_time, user_B_time)\n score = 1*time_elapse/IIF\n else:\n score = 1/IIF\n try:\n row[user_B_id] += score\n except KeyError:\n row[user_B_id] = score", "def itembase(user_id):\n frame1 = pd.concat([pd.DataFrame(REVIEWS[x]) for x in REVIEWS])\n filtered_data = recommender.filtering_not_city()\n businesses = pd.DataFrame(filtered_data).set_index('business_id')\n frame2 = frame1.drop_duplicates(subset=[\"user_id\", \"business_id\"], keep='last', inplace=False)\n\n utility_matrix = pivot_reviews(frame2)\n\n similarity = create_similarity_matrix_euclid(utility_matrix)\n\n for business in businesses.index:\n neighborhood = select_neighborhood(similarity, utility_matrix, user_id, business)\n prediction = weighted_mean(neighborhood, utility_matrix, user_id)\n businesses.ix[business, 'predicted rating'] = prediction\n\n sorted_prediction = businesses.sort_values(by=['predicted rating'], ascending=False)\n sorted_prediction2 = sorted_prediction.drop(columns=['predicted rating'])\n sorted_prediction2 = sorted_prediction2.reset_index()\n sorted_prediction3 = sorted_prediction.reset_index()\n return sorted_prediction2.to_dict(orient='records'), sorted_prediction3.to_dict(orient='records')", "def percentage(my_list, item):\n return 100.0 * frequency(my_list, item)", "def get_neigh_info(self, user, item):\r\n batch_u_neigh = self.u_neigh[user]\r\n batch_i_neigh = self.i_neigh[item]\r\n return batch_u_neigh, batch_i_neigh", "def common_likes(self, user):\n\n self_like_ids = set(self.likes.keys()) if self.likes else set()\n other_like_ids = set(user.fb_profile.likes.keys()) if user.fb_profile.likes else set()\n\n common_like_ids = self_like_ids.intersection(other_like_ids)\n\n return common_like_ids", "def similarRecommendation(user_preferences: dict, matcher: NodeMatcher):\r\n similarE = user_preferences[\"gustos_similares\"]\r\n equal_styles = list(matcher.match(\"User\",similar = similarE))\r\n return equal_styles", "def total_profit(knapsack, items, weight):\n return knapsack[items][weight]", "def count_score(data: GameData) -> Tuple[int, int]:\n score_user = score_ai = 0\n\n for item in data.items:\n if not hasattr(item, \"prediction_user\"):\n return score_user, score_ai\n\n if item.prediction_user == item.ground_truth:\n score_user += 1\n\n if item.prediction_ai[0] == item.ground_truth:\n score_ai += 1\n\n return score_user, score_ai", "def friend_overlap(users):\n ###TODO-- Completed\n\n #Creating a list of tuples to store the values for number of shared accounts by each of the user\n overlap_tuples = []\n\n #Trying for all the combination if user's without repetition\n for outer_idx,_ in enumerate(users):\n for inner_idx,_ in enumerate(users):\n if (inner_idx != len(users)-1) and (outer_idx < inner_idx+1):\n #Creating a SET of friends for 2 users and finding the INTERSECTION i.e. Common friends between these users\n overlap_tuples.append(tuple((users[outer_idx]['screen_name'],users[inner_idx+1]['screen_name'],\n len(list(set(users[outer_idx]['friends']) & set(users[inner_idx+1]['friends']))))))\n\n #Sort based on first KEY as N i.e. number of shared account in descending order,\n # for ties break using screen_name of user one, further on screen_name of user two\n return sorted(overlap_tuples, key=lambda x:[-x[2], x[0], x[1]])\n\n #for perm in combinations(screen_names,2):\n # overlap_tuples.append(tuple(perm[0],perm[1],len(list(set(user[perm[0]]['friends']) & set(perm[1]['friends'])))))\n #print(len(list(set(users[0]['friends']) & set(users[1]['friends']))))", "def item_based_recommendation(\n self, test_dataRDD,\n item_based_dict_bd, itembased_sim_pair_dict_bd, item_info_bd):\n return test_dataRDD.map(\n lambda line: self.item_based_prediction(\n line, item_based_dict_bd,\n itembased_sim_pair_dict_bd, item_info_bd))", "def create_user_item_array(self):\n user_em = self.user_factors.weight.detach()\n item_em = self.product_factors.weight.detach()\n user_b = self.user_bias.weight.detach()\n item_b = self.product_bias.weight.detach()\n\n user_item_array = (item_em + item_b) @ (user_em + user_b).transpose(0, 1)\n preds = self._prob_to_class(user_item_array).numpy()\n\n return preds", "def apriori(TDB,supMin):\n \"\"\" subMin is ......... \"\"\"\n dic = dict()\n keys = set()\n for s in TDB:\n for element in s:\n keys.add(frozenset(element))\n keys = list(keys)\n\n while len(keys)>=1:\n dic.clear()\n for key in keys:\n f = getFreq(TDB,key)\n if f >= supMin:\n dic[key]=f\n printItemSet(dic)\n keys = list(dic.keys())\n newKeys = set()\n for i in range (0,len(keys)-1):\n for j in range(i+1,len(keys)):\n newKey = frozenset.union(keys[i],keys[j])\n newKeys.add(newKey)\n keys = newKeys\n return dic", "def personalization(prediction, n):\n # prediction\n # n top n recommendation\n\n top_n = get_top_n(prediction, n)\n\n rec_dict = {}\n for uid, user_ratings in top_n.items():\n rec_dict[uid] = [iid for (iid, _) in user_ratings]\n\n rec_user_ls = [pred[0] for pred in prediction]\n rec_item_ls = [pred[1] for pred in prediction]\n\n unique_rec_user_ls = np.unique(rec_user_ls)\n unique_rec_item_ls = np.unique(rec_item_ls)\n\n # assign each item with index number\n unique_rec_item_dict = {item: ind for ind,\n item in enumerate(unique_rec_item_ls)}\n\n n_unique_rec_user = len(unique_rec_user_ls)\n n_unique_rec_item = len(unique_rec_item_ls)\n\n # recommended user item matrix\n rec_matrix = np.zeros(shape=(n_unique_rec_user, n_unique_rec_item))\n\n # represent recommended item for each user as binary 0/1\n for user in range(n_unique_rec_user):\n # get userid\n user_id = unique_rec_user_ls[user]\n # get rec item list\n item_ls = rec_dict[user_id]\n\n for item_id in item_ls:\n # get item index\n item = unique_rec_item_dict[item_id]\n rec_matrix[user, item] = 1\n\n # calculate cosine similarity matrix across all user recommendations\n similarity = cosine_similarity(X=rec_matrix, dense_output=False)\n # calculate average of upper triangle of cosine matrix\n upper_right = np.triu_indices(similarity.shape[0], k=1)\n # personalization is 1-average cosine similarity\n score = 1 - np.mean(similarity[upper_right])\n return score", "def item_based_prediction(self, line, rating_bd, sim_bd, item_bd):\n def sort_by_time(pairs):\n \"\"\"For each user, sort its rating records based on its datetime.\n More specifically, if time_a > time_b,\n then: time_a <- x, time_b <- x + 1.\n \"\"\"\n pairs = sorted(pairs, key=lambda line: line[2], reverse=False)\n order = 0\n out = []\n for i in range(len(pairs)):\n if i != 0 and pairs[i][2] == pairs[i - 1][2]:\n out += [(pairs[i][0], pairs[i][1], order)]\n else:\n order += 1\n out += [(pairs[i][0], pairs[i][1], order)]\n return out\n\n def f_decay(cur, t_ui):\n return np.exp(- self.alpha * (cur - t_ui))\n\n def add_decay(pairs):\n \"\"\"add decay rate to the pairs.\n Args:\n pairs: sim * rating, sim, time\n \"\"\"\n new_pairs = sort_by_time(pairs)\n current_time = max(map(lambda line: line[2], new_pairs)) + 1\n final_pairs = [\n (pair[0] * f_decay(current_time, pair[2]),\n pair[1] * f_decay(current_time, pair[2]))\n for pair in new_pairs]\n return sum(map(lambda line: line[0], final_pairs)) / sum(\n map(lambda line: line[1], final_pairs))\n\n def prediction(uid, pair, rating_bd, sim_bd, item_bd):\n \"\"\"do the prediction. It can either add decay rate or not,\n which is decided by `method`.\n \"\"\"\n iid, real_rating = pair[0], pair[1]\n if iid not in sim_bd.value.keys():\n return ()\n iid_neighbors = [\n (i[0], i[1], rating_bd.value[i[0]]) for i in sim_bd.value[iid]]\n average_iid_rating = item_bd.value[iid][0]\n sim_rating = []\n for info in iid_neighbors:\n niid, nsim, ratings = info\n sim_rating += [\n (iid, nsim, rating[1] - item_bd.value[niid][0], rating[2])\n for rating in ratings if uid in rating[0]]\n if len(sim_rating) != 0:\n sim_ratings = [\n (line[1] * line[2], abs(line[1]), line[3])\n for line in sim_rating]\n predicted_rating_no_decay = average_iid_rating + sum(\n map(lambda line: line[0], sim_ratings)) / sum(\n map(lambda line: line[1], sim_ratings))\n predicted_rating_decay = \\\n average_iid_rating + add_decay(sim_ratings)\n else:\n predicted_rating_no_decay = average_iid_rating\n predicted_rating_decay = average_iid_rating\n return iid, real_rating, \\\n self.bound_rating(predicted_rating_no_decay), \\\n self.bound_rating(predicted_rating_decay)\n\n uid, pairs = line\n return uid, [\n prediction(uid, pair, rating_bd, sim_bd, item_bd)\n for pair in pairs]", "def ap(self, result, next_item):\n if next_item in result.index:\n rank = result.index.get_loc(next_item) + 1\n return 1.0 / rank\n else:\n return 0", "def test_reasonable_auction(self):\n # I have no preferences\n bids = [Cost(ITEM1, ACTOR1, 1000),\n Cost(ITEM2, ACTOR1, 1000),\n Cost(ITEM3, ACTOR1, 1000),\n Cost(ITEM4, ACTOR1, 1000),\n Cost(ITEM5, ACTOR1, 1000),\n\n # I have linear preferences\n Cost(ITEM1, ACTOR2, 700),\n Cost(ITEM2, ACTOR2, 800),\n Cost(ITEM3, ACTOR2, 1000),\n Cost(ITEM4, ACTOR2, 1200),\n Cost(ITEM5, ACTOR2, 1300),\n\n # I have non-linear preferences\n Cost(ITEM1, ACTOR3, 400),\n Cost(ITEM2, ACTOR3, 800),\n Cost(ITEM3, ACTOR3, 1000),\n Cost(ITEM4, ACTOR3, 1200),\n Cost(ITEM5, ACTOR3, 1600),\n\n # I have arbitrary preference\n Cost(ITEM1, ACTOR4, 2435),\n Cost(ITEM2, ACTOR4, 305),\n Cost(ITEM3, ACTOR4, 310),\n Cost(ITEM4, ACTOR4, 1725),\n Cost(ITEM5, ACTOR4, 225),\n\n # I have strong preferences\n Cost(ITEM1, ACTOR5, 0),\n Cost(ITEM2, ACTOR5, 0),\n Cost(ITEM3, ACTOR5, 0),\n Cost(ITEM4, ACTOR5, 0),\n Cost(ITEM5, ACTOR5, 5000)]\n result = self.splitter.split(ITEMS[:5], ACTORS[:5], bids)\n expected = [(ITEM1, ACTOR4, None),\n (ITEM2, ACTOR1, None),\n (ITEM3, ACTOR3, None),\n (ITEM4, ACTOR2, None),\n (ITEM5, ACTOR5, None)]\n item_assignments_present(self, result, expected)", "def efficientShopping(family, items, NoOfItems):\r\n cost = 0\r\n result = {}\r\n # items are sorted by price\r\n # items closer to list start are items that family wants\r\n for f in range(len(family)):\r\n member_data = chooseItems(family[f], items, NoOfItems)\r\n cost += member_data[0]\r\n result[family[f][1] + 1] = member_data[1]\r\n # get data in sorted order by family member id\r\n final_data = {}\r\n for i in range(len(family)):\r\n final_data[i+1] = result[i+1]\r\n final_data[\"total\"] = cost\r\n return final_data", "def recommend_me_team(self, user_id: int, user_ids_per_group: list, n_recommendations: int):\n res_arr = np.array([self.user_similarity.loc[user_id, group_ids].mean() for group_ids in user_ids_per_group])\n res_inds = np.argsort(res_arr)[::-1][:n_recommendations]\n return res_inds, res_arr[res_inds]", "def predict(self, user, item):\n return self.user_vectors[user, :].dot(self.item_vectors[item, :].T)", "def get_participation_in_pairing(self):\n entries = self.c.select(pairing=1)\n\n frequency = dict()\n pairs = []\n for e in entries:\n c1, c2 = e.data['parents']\n pairs.append(tuple(sorted([c1, c2])))\n if c1 not in frequency.keys():\n frequency[c1] = 0\n frequency[c1] += 1\n if c2 not in frequency.keys():\n frequency[c2] = 0\n frequency[c2] += 1\n return (frequency, pairs)", "def compare_users(userA, userB):\n reader = open(\"similarities.txt\", \"r\")\n lines = reader.readlines()\n similarity = None\n for line in lines:\n a, b, sim = line.split(\"\\t\")\n if userA == a and userB == b:\n print(sim)\n similarity = float(sim)\n\n reader2 = open(\"sorted_similarities.txt\", \"r\")\n lines = reader2.readlines()\n index = 0.0\n for line in lines:\n sim = float(line)\n if similarity == sim:\n return 1.0 - (float(index) / len(lines))\n index += 1\n\n return None\n \n \n \"\"\"\n if len(sim_dict.keys()) == 0:\n return None\n\n ranked = sorted(sim_dict.keys(), key=lambda x: sim_dict[x],\n reverse=True)\n try:\n index = ranked.index(userB)\n percentile = 1.0 - (float(index) / len(sim_dict.keys()))\n return int(100 * percentile)\n except:\n return None\n \"\"\"", "def single_user_recommendation_vector(self, user_ratings):\r\n\r\n ratings_vector = self.ratings_matrix.get_ratings_vector(user_ratings)\r\n\r\n user_similarity_profile = self.calculate_user_similarity_profile(ratings_vector)\r\n\r\n return self.calculate_item_relevance_scores(user_similarity_profile)", "def recommend_new(self, user_id, N=10):\n scores = self.user_factors[user_id] @ self.product_factors.T\n bought_indices = self.product_user_matrix.T[user_id].nonzero()[1]\n count = N + len(bought_indices)\n ids = np.argpartition(scores, -count)[-count:]\n best = sorted(zip(ids, scores[ids]), key=lambda x: -x[1])\n return list(itertools.islice((rec for rec in best if rec[0] not in bought_indices), N))", "def step(self, user: torch.tensor, item: torch.tensor,\n rating: torch.tensor, preference: torch.tensor):\n pass", "def map_user_to_ratings(reviews):\n\tuser_ratings = dict()\n\tfor review in reviews:\n\t\tuser = review[0]\n\t\tif user not in user_ratings:\n\t\t\tuser_ratings[user] = 0\n\t\telse:\n\t\t\tuser_ratings[user] = user_ratings[user] + 1\n\n\treturn user_ratings", "def test_get_user_rankings(self):\n user = User(name=u'no win', email=u'generic@thingy.com')\n user.put()\n \n userone = User(name=u'one win', email=u'generic@thingy.com', total_played=1, wins=1)\n userone.put()\n \n usertwo = User(name=u'two wins', email=u'generic@thingy.com', total_played=2, wins=1)\n usertwo.put()\n \n user_rankings = users.get_user_rankings()\n self.assertEquals(len(user_rankings), 2)\n self.assertEquals(user_rankings.pop().key, usertwo.key)\n self.assertEquals(user_rankings.pop().key, userone.key)", "def chosen_items(sack, items, weight):\n total = total_profit(sack, items, weight)\n chosen = []\n \n while total != 0:\n for i in range(items + 1):\n if total in sack[i]:\n chosen.append(i) \n total = total - profit[i - 1] \n break \n \n return sorted(chosen)", "def _update_suspicion_1(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier", "def _pval_pairs(self, idx0, idx1):\n return self._test_pairs(idx0=idx0, idx1=idx1)", "def estimate(self, u, j):\n if not (self.trainset.knows_user(u) and self.trainset.knows_item(j)):\n raise PredictionImpossible('User and/or item is unknown.')\n\n u_ratings = self.trainset.ur[u]\n\n if self.weighting == 'linear':\n weight = sum(self.freqs[i, j, self.to_index(r)] for i, r in u_ratings)\n score = sum(self.sums[i, j, self.to_index(r)] for i, r in u_ratings)\n return score / weight\n\n # self.weighting == 'log' or None\n weights = [self.freqs[i, j, self.to_index(r)] for i, r in u_ratings]\n reciprocals = [1 / w if w else 0 for w in weights]\n scores = [self.sums[i, j, self.to_index(r)] for i, r in u_ratings]\n scores = [s * w for s, w in zip(scores, reciprocals)]\n\n if self.weighting is None:\n return np.mean(scores)\n # self.weighting == 'log'\n logs = [np.log(w + 1) if w >= 1 else 0 for w in weights]\n return np.dot(scores, logs) / np.sum(logs)", "def compute_multiuser_local_sensitivity(sc, dataset, num_iters_ls,\n num_users_ls):\n res = []\n users_already_processed = set()\n all_users = list(get_user_list(dataset))\n for x in xrange(num_users_ls):\n while True:\n cur_user = random.choice(all_users)\n print \"Trying user\", cur_user\n if cur_user in users_already_processed:\n print \"Oops, we've already processed this one\"\n continue\n if max_movies_per_user == 0:\n break\n print \"Looking at their ratings\"\n u_ratings = get_ratings_from_uid(dataset, cur_user)\n u_ratings_list = u_ratings.collect()\n l = len(u_ratings_list)\n if l > max_movies_per_user:\n print \"This user has too many movies: \",\\\n l, \">\", max_movies_per_user\n users_already_processed.add(cur_user)\n continue\n else:\n print \"This user with\", l, \"movies \" +\\\n \"rated is fine!\"\n break\n print \"Probing user\", cur_user\n report = compute_user_local_sensitivity(sc, dataset, cur_user,\n num_iters_ls)\n users_already_processed.add(cur_user)\n res.append(report)\n return res", "def recommend_items(self, dataset, u, max_items=10, return_scores=True, item_features=None):\n try:\n r = (self.similarity_matrix * dataset[u].T).toarray().flatten()\n except AttributeError:\n raise AttributeError('you must call fit() before trying to recommend items')\n known_items = set(dataset[u].indices)\n recs = []\n for i in r.argsort()[::-1]:\n if i not in known_items:\n if return_scores:\n recs.append((i, r[i]))\n else:\n recs.append(i)\n if len(recs) >= max_items:\n break\n return recs", "def recommend(self, user):\n K = self.n_sim_user\n N = self.n_rec_movie\n rank = dict()\n watched_movies = self.trainset[user]\n\n # v=similar user, wuv=similarity factor\n for v, wuv in sorted(self.user_sim_mat[user].items(),\n key=itemgetter(1), reverse=True)[0:K]:\n for movie in self.trainset[v]:\n if movie in watched_movies:\n continue\n # predict the user's \"interest\" for each movie\n rank.setdefault(movie, 0)\n rank[movie] += wuv\n # return the N best movies\n return sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N]", "def _update_suspicion_1(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier", "def recommendations(self):\n return [user for user in self.tags.similar_objects() if user.status == UserStatus.APPROVED]", "def predict(self, user, item):\n pred = self.global_bias + self.user_bias[user] + self.item_bias[item]\n pred += self.U[user, :].dot(self.I[:, item])\n return pred", "def getSupport(item):\n return float(freqSet[item]) / len(transactionList)" ]
[ "0.6196895", "0.6023507", "0.6007207", "0.59735894", "0.5927089", "0.59151995", "0.5914826", "0.590741", "0.5795329", "0.56757665", "0.56184", "0.5602994", "0.5584905", "0.55800134", "0.5564539", "0.5547924", "0.55471706", "0.54899234", "0.5459986", "0.5451552", "0.54258966", "0.54209244", "0.5388015", "0.5376603", "0.53502774", "0.53469896", "0.5344142", "0.53437376", "0.5332137", "0.5330267", "0.53158444", "0.53101474", "0.5292084", "0.5282267", "0.5282261", "0.5275663", "0.5265569", "0.52614164", "0.5260824", "0.5260074", "0.5250618", "0.5219643", "0.52104336", "0.5196843", "0.51860064", "0.5154824", "0.51463777", "0.51354796", "0.5130657", "0.51295996", "0.5129141", "0.51222974", "0.51119035", "0.5108723", "0.5102208", "0.51010215", "0.5094492", "0.5079583", "0.50752753", "0.5061997", "0.5056326", "0.50468946", "0.5042581", "0.5042548", "0.5036664", "0.5030155", "0.50270367", "0.50080675", "0.50052345", "0.49937662", "0.49891025", "0.49871227", "0.49816456", "0.49788553", "0.49734733", "0.49709338", "0.49675348", "0.49594465", "0.4954356", "0.49522278", "0.4948908", "0.49416783", "0.49347872", "0.49346521", "0.4930449", "0.49277353", "0.49215943", "0.4921339", "0.49192226", "0.49185002", "0.49126554", "0.49081507", "0.49062407", "0.4905769", "0.49048883", "0.48993126", "0.4893923", "0.48883414", "0.48859063", "0.48853117", "0.48825967" ]
0.0
-1
calculate latent factors using the alternating least square method applicable to computing both user factors and item factors
def alternate_ls (u_num, Y, P, C, reg): # get # of items/users and # of latent factors [i_num, f_num] = Y.shape # output buffer X = np.zeros((u_num, f_num)) # precalculate YtY to improve the performance YtY = Y.T * Y # iterate over each user/item for u in range(u_num): # store the diagonal elements of the matrix Cu discussed in the paper in a vector Cu = C[u,:] # store the coresponding row/column of the preference matrix Pu = P[u,:] # compute Cu-I Cu_I = Cu - 1 # calculate Yt(Cu-I)Y YtCu_IY = np.zeros((f_num, f_num)) CuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I for row in range(f_num): for col in range(f_num): YtCu_IY[row,col] = Y[:,row].T * CuIY[:,col] # left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1) left_inv = YtY + YtCu_IY + reg*np.eye(f_num) left = np.linalg.inv(left_inv) # right term : YtCuPu right = Y.T * np.multiply(Cu.T, Pu.T) # compute the latent factor of the user/item x = left * right # store it in a matrix X[u,:] = x.T # return an MxF or NxF matrix return np.matrix(X)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_factors():", "def factors(self):\n X = [Var(i,2) for i in range(self.nvar)]\n factors = [Factor([],np.exp(self.c))] \n # TODO: exclude if zero? or exclude if inf/-inf, or if in \"assigned\", or?\n factors = factors + [Factor([X[i]],[-th,th]).exp() for i,th in enumerate(self.h) if self.dims[i]>1]\n L = coo(self.L)\n factors = factors + [Factor([X[i],X[j]],[[th,-th],[-th,th]]).exp() for i,j,th in zip(L.row,L.col,L.data) if i<j]\n return factors\n # TODO: should we exponentiate if isLog not True? ", "def factor_mat(all_dat, f_num, iterations, regularization):\n\n\t# get # of users and # of items\n\t[u_num, i_num] = all_dat.shape\n\n\t# init user factors and item factors with random values\n\tu_fac = np.matrix(np.random.rand(u_num, f_num))\t# MxF\n\ti_fac = np.matrix(np.random.rand(i_num, f_num))\t# NxF\n\n\t# calculate the preference matrix\n\tpreference = cal_preference(all_dat)\n\n\t# calculate the confidence matrix\n\tconfidence = cal_confidence(all_dat)\n\t\n\t# recalculate the user factors and item factors using the alternating least square method\n\tfor itr in range(iterations):\n\t\tu_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization)\n\t\t#print itr, \"u_fac\"\n\t\ti_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization)\n\t\t#print itr, \"i_fac\"\n\t\n\t# save the output\n\tdf = pd.DataFrame(u_fac)\n\tdf.to_csv(\"tmp/u_fac.tmp\", index=False, header=False, sep='\\t', encoding='utf-8')\n\tdf = pd.DataFrame(i_fac.T)\n\tdf.to_csv(\"tmp/i_fac.tmp\", index=False, header=False, sep='\\t', encoding='utf-8')\n\n\t# an MxF user factor matrix and an FxN item factor matrix\n\treturn [u_fac, i_fac.T]", "def test_lu_factor():\n\t#[A, b] = lu_read('test1.txt')\n\t# it is poor form to read an external file into a test function, as above\n\tA = np.array([\n\t\t[ 2., 3., -4., 2.],\n\t\t[-4., -5., 6., -3.],\n\t\t[ 2., 2., 1., 0.],\n\t\t[-6., -7., 14., -4.]])\t\n\tLU,p = lu_factor(A, pivot=False)\n\tLU_soln = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\tassert norm(LU - LU_soln) < 1.e-10\t\n\n\n\t# test 2\n\t[A2, b2] = lu_read('test2.txt')\t\t\t\t\t\t# read a matrix and RHS vector\n\tLU2,p2 = lu_factor(A2) \t\t\t\t\t\t\t\t# change display to False when LU_FACTOR working\n\tLU_soln2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\tassert norm(LU2 - LU_soln2) < 1.e-10", "def factorsWith(self,v,copy=True):\n Lv = self.L.getrow(v).tocoo();\n factors = [Factor([Var(int(v),2)],[-th,th]).exp() for th in [self.h[v]] if self.dims[i]>1]\n factors = factors + [Factor([Var(int(v),2),Var(int(j),2)],[[th,-th],[-th,th]]).exp() for j,th in zip(Lv.col,Lv.data)]\n return factors", "def _als_partial_step_explicit(self, user_mode):\n # DONE minmize loss function and update factors\n # Loss = (R - X * Y^T)^2 + λ_x * ||X||^2 + λ_y * ||Y||^2\n # X = ((Y^T * Y) + λ_x * I)^-1 * (R * Y) for dLoss / dX == 0\n # Useful functions: np.linalg.inv or np.linalg.solve\n if user_mode:\n fixed_factors = self._item_factors\n updated_factors = self._user_factors\n ratings = self._ratings\n reg_loss = self._user_reg_loss\n else:\n fixed_factors = self._user_factors\n updated_factors = self._item_factors\n ratings = self._ratings.T\n reg_loss = self._item_reg_loss\n \n A = (fixed_factors.T).dot(fixed_factors)\n A += np.eye(A.shape[0]) * reg_loss\n \n B = ratings.dot(fixed_factors)\n \n \n for row in range(updated_factors.shape[0]):\n updated_factors[row, : ] = np.linalg.solve(A, B[row])", "def factor(self, A):\n \n self.m, self.n = A.shape\n \n if self.use_sub_factor:\n self.sub_factor(A)\n else:\n self.A_factorized = spla.factorized(A)\n \n #if self.A_factorized is not None:\n # self.A_factorized.free() \n #self.A_factorized = pardiso.Factor(A.tocsr())", "def get_factors(self, triples):\n pass", "def factorsWithAny(self,vs):\n factors = []\n for v in vs:\n factors += [Factor([Var(int(v),2)],[-th,th]).exp() for th in [self.h[v]] if self.dims[i]>1]\n for u in self.markovBlanket(v):\n if u not in vs or v < u:\n factors += [Factor([Var(int(v),2),Var(int(u),2)],[[th,-th],[-th,th]]).exp() for th in [L[v,u]] if th!=0] \n return factors", "def _factorsX(self, inputs):\n return tensor.dot(inputs[0], self.wxf)", "def factor(self):\r\n\t\t\r\n\t\t# get gcf\r\n\t\tg = self.extract()\r\n\t\t\r\n\t\t# invert and multiply\r\n\t\tv = g.invert()\r\n\t\tf = self.multiply(v)\r\n\t\t\r\n\t\treturn f,g", "def lu_factorization (M) -> list:\n dim = len(M)\n L = np.eye(dim)\n\n #Itero sulle Incognite da Trovare\n for i in range(dim-1):\n\n #Itero sulle righe su cui devo cancellare un elemento\n for j in range(i+1,dim):\n m__j_i = M[j][i] / M[i][i]\n L[j][i] = m__j_i\n \n M[j][i] = 0.0\n\n for k in range (i+1,dim):\n M[j][k] = M[j][k] - m__j_i * M[i][k]\n \n\n return M,L", "def forward(self, user, item):\n item_emb = self.product_factors(item.view(-1)) + self.product_bias(\n item.view(-1)\n )\n user_emb = self.user_factors(user.view(-1)) + self.user_bias(user.view(-1))\n mat_mult = (item_emb * user_emb).sum(1)\n\n return mat_mult", "def nfactors(self):\n return self.L.nnz", "def _compute_mu_factor2(*input_mols):\n mu_factor = 1\n for mol in input_mols:\n mu_factor *= np.prod(fact(mol))\n return mu_factor", "def doolittle_factorization(self, matrix=None):\n try:\n local_matrix = self.matrix.matrix if not matrix else matrix.matrix\n except AttributeError:\n local_matrix = matrix\n \n (result, upper, lower, temp_sum) = (0.0, [[]], [[]], 0.0)\n for i in range(len(local_matrix)):\n lower.append([])\n for j in range(len(local_matrix[i])):\n lower[i].append(0)\n \n lower.remove([])\n\n for i in range(len(local_matrix)):\n for j in range(len(local_matrix[i])):\n temp_sum = 0.0\n for k in range(i):\n temp_sum += lower[i][k] * upper[k][j]\n result = local_matrix[i][j] - temp_sum\n upper[i].append(round(result, 9))\n upper.append([])\n \n for j in range(len(local_matrix[i])):\n temp_sum = 0.0\n for k in range(i):\n temp_sum += lower[j][k] * upper[k][i]\n result = local_matrix[j][i] - temp_sum\n lower[j][i] = round(result / upper[i][i], 9)\n\n upper.remove([])\n upper = Matrix(upper, name=\"U\")\n lower = Matrix(lower, name=\"L\")\n\n print(\"Las matrices son: \")\n upper.print_matrix()\n lower.print_matrix()\n\n print(\"Al multiplicarlas queda\")\n comprobation = lower.multiply_matrix(upper.matrix)\n comprobation.set_name(\"comprobación de la factorización LU\")\n comprobation.print_matrix()\n\n return (lower, upper)", "def _compute_kl(self, lvl):\n kl = [] # kernal length\n for n in range(lvl):\n fct = self.scaling**n # up-sampling factor\n kl.append(fct*(self.nfreq-1)+1)\n kl.append(kl[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return kl[::-1]", "def compute_factor(X, v, c1, c2):\n\n assert np.shape(v)[1] == 1,\"v is not a column vector\"\n\n v = normalize_l2(v)\n\n sz_u = np.shape(X)[0]\n sz_v = np.shape(X)[1]\n\n assert sz_v == np.size(v)\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = 1000\n delta_v = 1000\n\n while delta_u > 1e-5 or delta_v > 1e-5:\n oldU = u\n oldV = v\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = npla.norm(u - oldU) / sz_u\n delta_v = npla.norm(v - oldV) / sz_v\n\n d = u.T @ X @ v\n\n return (d,u,v)", "def totient(self,x):\r\n dist = self.distr(self.factors(x)).items()\r\n if self.verbose: print(dist)\r\n \r\n # return Euler's result based on product of each prime factor p and its power n\r\n # return prod([ p**n*(1-(1/p)) for p,n in dist])\r\n # return prod([ p**n*1 - p**n/p for p,n in dist])\r\n # return prod([ p**n - p**(n-1) for p,n in dist])\r\n \r\n return prod([ p**n - p**(n-1) for p,n in dist])", "def DualDecomposition(model, maxIter=100, verbose=False):\n lnF = sum( np.log(f.max()) for f in model.factors )\n lnX, xhat = -np.inf, np.zeros( (len(model.X),), dtype=int) \n lnR, rhat = -np.inf, np.zeros( (len(model.X),), dtype=int) \n if verbose: print(\"Iter 0: \"+str(lnF))\n \n for t in xrange(1,maxIter+1): # for each iteration:\n # Update each variable in turn:\n for Xi in model.X: # for each variable, \n flist = model.factorsWith(Xi, copy=False)\n gamma = [f.maxmarginal([Xi]) for f in flist]\n avg = np.prod(gamma)**(1.0/len(gamma))\n for f,g in zip(flist,gamma): f *= avg/(g+1e-300) # !!! numerical issues... \n xhat[Xi] = avg.argmax()[0] # guess a state for Xi\n #\n # Compute the upper bound on the maximum and the value of our current guess\n lnF = sum( np.log(f.max()) for f in model.factors )\n lnX = model.logValue( xhat )\n if lnR < lnX: lnR = lnX; rhat[:]=xhat;\n if verbose: print(\"Iter \"+str(t)+\": \"+str(lnF)+\" > \"+str(lnX))\n if (lnF == lnX): break\n return lnF,lnR,rhat", "def calEachCrossflowAllAxialNode():\n AxialNodeno = 14 # axial node number in CFD data\n Nodes = []\n base = 'Node'\n for i in range(0, AxialNodeno):\n Nodes.append(base+str(i))\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n lateralFactors = []\n for node in Nodes:\n lateralFactors.append(crossFlow[node]/0.8)\n #need to judge the sign of lateral flow according to CTF rule!!\n gapsToFlip = [2,4,6,7,9,11,13,14,16,18,20,21] #gaps in y direction\n gapsToFlipIndex = [x - 1 for x in gapsToFlip]\n for factors in lateralFactors:\n for index in gapsToFlipIndex:\n factors[index] = -factors[index] \n #note: lateralFactors is a list of list\n \n #below calculate factors averaged over all subchannels\n crossFlowAveFactor = crossFlow.apply(abs).mean(axis = 0)/0.8\n lateralFactorsAvelist = []\n for i in range(0,14):\n base = []\n for j in range(0,24):\n base.append(crossFlowAveFactor[i])\n lateralFactorsAvelist.append(base)\n \n \n for i in range(0, 14):\n for j in range(0, 24):\n #note, in the original model there is only one sign for all source\n #terms in one sub-channel. therefore -- sign(crossFlow.iloc[j,2])\n lateralFactorsAvelist[i][j] = lateralFactorsAvelist[i][j] *sign(crossFlow.iloc[j,2]) \n for each in lateralFactorsAvelist:\n for index in gapsToFlipIndex:\n each[index] = -each[index] \n \n \n return lateralFactors, lateralFactorsAvelist", "def do_factor(m=4, q=11):\n from sage.rings.finite_rings.integer_mod_ring import IntegerModRing\n from sage.matrix.constructor import identity_matrix, block_matrix\n from sage.matrix.matrix_space import MatrixSpace\n from sage.rings.integer_ring import IntegerRing\n from sage.modules.free_module_integer import IntegerLattice\n \n \n m=n+1\n ZZ = IntegerRing()\n ZZ_q = IntegerModRing(q)\n \n \n \n from sage.arith.all import euler_phi\n from sage.misc.functional import cyclotomic_polynomial\n \n for a in range(\n R = ZZ_q['x']#.quotient(cyclotomic_polynomial(k, 'x'), 'x')\n f=cyclotomic_polynomial(m,'x')\n return f.factor()", "def _compute_factors(roots, multiplicity, include_powers=False):\n current = cupy.array([1])\n suffixes = [current]\n for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):\n monomial = cupy.r_[1, -pole]\n for _ in range(int(mult)):\n current = cupy.polymul(current, monomial)\n suffixes.append(current)\n suffixes = suffixes[::-1]\n\n factors = []\n current = cupy.array([1])\n for pole, mult, suffix in zip(roots, multiplicity, suffixes):\n monomial = cupy.r_[1, -pole]\n block = []\n for i in range(int(mult)):\n if i == 0 or include_powers:\n block.append(cupy.polymul(current, suffix))\n current = cupy.polymul(current, monomial)\n factors.extend(reversed(block))\n\n return factors, current", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def NMF(model, maxIter=100, beliefs=None, verbose=False):\n if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]\n \n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter 0: \"+str(lnZ))\n\n for t in xrange(1,maxIter+1): # for each iteration:\n # Update all the beliefs via coordinate ascent:\n for Xi in model.X: # for each variable, \n bNew = 0.0 # compute E[ log f ] as a function of Xi:\n for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:\n m = f.log() # E[log f_a] = \\sum \\log f_a \\prod b_v\n for v in f.vars - [Xi]: m *= beliefs[v]\n bNew += m.marginal([Xi]) # sum them up to get E[log f]\n bNew -= bNew.max() # (numerical issues)\n bNew = bNew.exp()\n bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z\n beliefs[Xi] = bNew\n #\n # Compute the lower bound on the partition function:\n # E_b [ log f ] + H(b) = \\sum_a E[log f_a] + \\sum_i H(b_i) for independent beliefs\n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter \"+str(t)+\": \"+str(lnZ))\n return lnZ,beliefs", "def factors(self):\n self.assert_sampled()\n return self._factors", "def normalize(self, factor):", "def alternating_least_squares(Cui, factors, regularization=0.01,\n iterations=15, use_native=True, num_threads=0,\n dtype=np.float64):\n _check_open_blas()\n\n users, items = Cui.shape\n\n X = np.random.rand(users, factors).astype(dtype) * 0.01\n Y = np.random.rand(items, factors).astype(dtype) * 0.01\n\n Cui, Ciu = Cui.tocsr(), Cui.T.tocsr()\n\n solver = implicit.als.least_squares if use_native else least_squares\n\n for iteration in range(iterations):\n s = time.time()\n solver(Cui, X, Y, regularization, num_threads)\n solver(Ciu, Y, X, regularization, num_threads)\n log.debug(\"finished iteration %i in %s\", iteration, time.time() - s)\n\n return X, Y", "def fit(self, user_x_product, latent_features_guess=2, learning_rate=0.0002, steps=5000, regularization_penalty=0.02, convergeance_threshold=0.001):\n print 'training model...'\n return self.__factor_matrix(user_x_product, latent_features_guess, learning_rate, steps, regularization_penalty, convergeance_threshold)", "def _factorsH(self, inputs):\n return tensor.dot(self._hidden_activation(inputs), self.whf)", "def rebalance(self):\n\n # Compute norms along columns for each factor matrix\n norms = [np.linalg.norm(f, axis=0) for f in self.factors]\n\n # Multiply norms across all modes\n lam = np.prod(norms, axis=0) ** (1/self.ndim)\n\n # Update factors\n self.factors = [f * (lam / fn) for f, fn in zip(self.factors, norms)]\n return self", "def fn(i, n):\n if len(stack) > 0 and stack[-1] <= n: ans.append(stack + [n])\n for ii in range(i, len(factors)):\n if n % factors[ii] == 0: \n stack.append(factors[ii])\n fn(ii, n//factors[ii])\n stack.pop()", "def create_factors(self):\n\n super(SVDPlusPlus, self).create_factors()\n self.y = np.random.normal(self.init_mean, self.init_stdev, (len(self.items), self.factors))", "def factor_polynomial(a,b,c):\r\n afacts = factors(abs(a))\r\n cfacts = factors(abs(c))\r\n #choose a factor of a (d), then f is a/d and so on\r\n for d in afacts:\r\n for g in cfacts:\r\n f = a/d\r\n e = c/g\r\n if e*f + d*g == b:\r\n print(\"{}*x**2 + {}*x + {} = ({}x + {})({}x + {})\".format(a,b,c,d,int(e),int(f),g))\r\n return\r\n #return False\r", "def compute_quotients(X = np.zeros((1,1,2))):\r\n \r\n start=time.time()\r\n rad = X[:,:,1]\r\n\r\n X = np.reshape(X,(X.shape[0],1,X.shape[1]*X.shape[2]))\r\n\r\n drad = np.asarray([[item[0]/item[1] if item[1] != 0 else 0 for item in list(itertools.combinations(rad[sample],2))] \\\r\n for sample in range(X.shape[0])])\r\n\r\n dradsum = np.asarray([[item[0]/item[1] if item[1] != 0 else 0 for item in itertools.combinations([ \\\r\n item[0]+item[1] for item in list(itertools.combinations(rad[sample],2))], 2)] \\\r\n for sample in range(drad.shape[0])])\r\n \r\n drad = np.reshape(drad,(drad.shape[0],1,drad.shape[-1]))\r\n drads = np.reshape(dradsum,(dradsum.shape[0],1,dradsum.shape[-1]))\r\n\r\n X = np.concatenate((drad,drads), axis=2)\r\n print('Geometric and packing factors computed in', round(time.time()-start,2),' s')\r\n np.save('X', X)\r\n \r\n return X", "def _cache_factors(self, Pibra, Piket, Kbra, Kket, eps):\n q1, p1, Q1, P1 = Pibra\n q2, p2, Q2, P2 = Piket\n\n # If both parameter sets are identical, we are back in the homogeneous case.\n if q1 == q2 and p1 == p2 and Q1 == Q2 and P1 == P2:\n self._Hl = None\n\n # We have k in [0, 1, ..., K-1] where |K| is the basis size\n # hence K-1 is the maximal index.\n L = Kket.get_basis_size()\n\n makl = L\n\n # Factorials\n f = factorial(arange(makl))\n self._f = 1.0 / sqrt(f[:L].reshape(1, -1))\n\n # Note: formula currently fails for non-inhomogeneous case\n # because of divisions by zero in the two args below.\n argl = ((1.0j * conjugate(P1) * (q1 - q2) - 1.0j * conjugate(Q1) * (p1 - p2)) /\n (sqrt(1.0j * conjugate(Q2 * P1) - 1.0j * conjugate(Q1 * P2)) *\n sqrt(1.0j * conjugate(P1) * Q2 - 1.0j * conjugate(Q1) * P2)))\n\n # TODO: Better test for failure?\n if self._doraise and isnan(squeeze(argl)):\n raise InnerProductException(\"Symbolic formula failed due to Q_k = Q_l and P_k = P_l.\")\n\n # The parameter j varies in the range [0, 1, ..., min(K-1,L-1)]\n # hence we have that k-j can be in [K-1, K-2, ..., K-1-min(K-1,L-1)]\n # and similar for l-j we have [L-1, L-2, ..., L-1-min(K-1,L-1)]\n # where both K-1-min(K-1,L-1) and L-1-min(K-1,L-1) are non-negative.\n self._Hl = self._evaluate_hermite(L - 1, 1.0 / eps * argl)\n\n il = arange(L).reshape(-1, 1)\n self._pfl = ((1.0j * conjugate(Q2 * P1) - 1.0j * conjugate(Q1 * P2)) ** (il / 2.0)).reshape(L)\n\n # And the groundstate value\n self._I0 = self.exact_result_ground(Pibra, Piket, eps)", "def removeFactors(self,flist, isLog=False):\n # Currently: just divide out factors (add inverse factors) -- can't check if factor present? (minimal)\n # TODO: set entries to zero, then call self.L.eliminate_zeros()\n row = np.zeros(2*len(flist),dtype=int)-1; col=row.copy(); data=np.zeros(2*len(flist));\n for k,f in enumerate(flist):\n if not isLog: \n if np.any(f.t==0): f = f+1e-30; # TODO: log nonzero tol\n f = f.log()\n if f.nvar == 1:\n Xi = f.vars[0]\n self.h[Xi] -= .5*(f[1]-f[0])\n self.c -= .5*(f[1]+f[0])\n else:\n Xi,Xj = f.vars[0],f.vars[1]\n row[2*k],col[2*k],data[2*k] = int(Xi),int(Xj), .25*(f[1,1]+f[0,0]-f[0,1]-f[1,0])\n row[2*k+1],col[2*k+1],data[2*k+1] = col[2*k],row[2*k],data[2*k] \n #L[Xi,Xj] += .25*(f[1,1]+f[0,0]-f[0,1]-f[1,0])\n self.h[Xi] -= .5*(f[1,0]-f[0,0])+data[2*k] #L[Xi,Xj]\n self.h[Xj] -= .5*(f[0,1]-f[0,0])+data[2*k] #L[Xi,Xj]\n self.c -= .25*(f[1,1]+f[1,0]+f[0,1]+f[0,0])\n self.L -= csr((data[row>=0],(row[row>=0],col[row>=0])),shape=(self.nvar,self.nvar)); \n #raise NotImplementedError();", "def sub_factor(self, A):\n \n #\n # In general, to slice a sparse matrix use:\n #\n # indices = np.where(bool_vect)[0]\n # out1 = M.tocsc()[:,indices] # for column slices\n # out2 = M.tocsr()[indices,:] # for row slices\n #\n \n print \"Running subfactor routine.\"\n \n self.m, self.n = A.shape\n \n # Build fixed/known solution vector mask\n x_sol_mask = numpy.zeros((self.n,));\n x_sol_mask[self.xinds] = 1;\n \n # Get the indices for the unknowns\n unknown_inds_mask = numpy.logical_not(x_sol_mask)\n self.unknown_inds = numpy.arange(self.n)[unknown_inds_mask]\n \n # Store internal r.h.s. for known part\n Apart = A.tocsc()[:,self.xinds]\n self.r = Apart.dot(self.xsol)\n # note: in the backsolve step this is subtracted from b, and it\n # works even if xsol is a matrix, with each column corresponding\n # to the known part of the solution for each column of b.\n \n # Get the part of A that corresponds to the unknown displacements.\n Asub = A.tocsr()[self.unknown_inds,:]\n Asub = Asub.tocsc()[:,self.unknown_inds]\n\n self.Asub_factorized = spla.factorized(Asub)\n #if self.Asub_factorized is not None:\n # self.Asub_factorized.free() \n #self.Asub_factorized = pardiso.Factor(Asub.tocsr())\n \n print \"Done with subfactor routine.\"", "def __init__(self, user_factors, item_factors):\n self._user_factors = np.copy(user_factors)\n self._item_factors = np.copy(item_factors)\n \n self._users_num = user_factors.shape[0]\n self._items_num = item_factors.shape[0]\n\n assert user_factors.shape[1] == item_factors.shape[1]", "def AlternatingLeastSquares(\n factors=100,\n regularization=0.01,\n alpha=1.0,\n dtype=np.float32,\n use_native=True,\n use_cg=True,\n use_gpu=implicit.gpu.HAS_CUDA,\n iterations=15,\n calculate_training_loss=False,\n num_threads=0,\n random_state=None,\n):\n if use_gpu:\n return implicit.gpu.als.AlternatingLeastSquares(\n factors,\n regularization,\n alpha,\n dtype=dtype,\n iterations=iterations,\n calculate_training_loss=calculate_training_loss,\n random_state=random_state,\n )\n return implicit.cpu.als.AlternatingLeastSquares(\n factors,\n regularization,\n alpha,\n dtype,\n use_native,\n use_cg,\n iterations,\n calculate_training_loss,\n num_threads,\n random_state,\n )", "def exo1():\n randu = randn(N/ 2, N/ 2, 2); % a random vector field\n b = 2\n for i in 1: 4:\n LLs_u = Li{i}(LiS{i}(randu))\n % relative error should be very small\n norm(abs(LLs_u(: ) - b*randu(: )))/ norm(randu(: ))", "def _factorsY(self, inputs):\n return tensor.dot(inputs[1], self.wyf)", "def F(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n a = g1*u1 - u0\n b = g2*u2 - u0\n c = g3*u - u0\n l = g1*v1 - v0 \n m = g2*v2 - v0\n n = g3*v - v0\n r = g1 - 1\n s = g2 - 1\n t = g3 - 1\n return np.array([\n coeffs[0]*(a**2-l**2) + 2*coeffs[1]*(a*b-l*m) + coeffs[2]*(b**2-m**2) + 2*coeffs[3]*(a*c-l*n) + 2*coeffs[4]*(b*c-m*n) + c**2 - n**2,\n coeffs[0]*(l**2-r**2) + 2*coeffs[1]*(l*m-r*s) + coeffs[2]*(m**2-s**2) + 2*coeffs[3]*(l*n-r*t) + 2*coeffs[4]*(m*n-s*t) + n**2 - t**2,\n coeffs[0]*a*l + coeffs[1]*(l*b+m*a) + coeffs[2]*m*b + coeffs[3]*(l*c+n*a) + coeffs[4]*(m*c+b*n) + c*n,\n coeffs[0]*a*r + coeffs[1]*(r*b+s*a) + coeffs[2]*s*b + coeffs[3]*(r*c+t*a) + coeffs[4]*(s*c+b*t) + c*t,\n coeffs[0]*r*l + coeffs[1]*(l*s+m*r) + coeffs[2]*m*s + coeffs[3]*(l*t+n*r) + coeffs[4]*(m*t+s*n) + t*n \n ])", "def get_factors(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:\n if self.fit_result:\n return 1. / (funcs.gaussian_2d(x, y, **self.fit_result) /\n self.fit_amplitude)\n else:\n return np.transpose(1. / self.interp(np.array([y, x]).T))", "def compute_norm_fac_per_weight(fan_in):\n ksi = 1. / math.sqrt(fan_in)\n return (1 / 3) * ksi ** 2", "def landau1(n):\n\n i = 2\n sum_factors = 1\n factors = set()\n\n while i <= n: \n common = {j for j in factors if gcd(j, i) != 1}\n if len(common) == 0:\n factors = add_factor(i, n, factors)\n sum_factors = sum(factors)\n elif product(common) <= i:\n difference = factors.difference(common)\n new_factors = add_factor(i, n, difference)\n if product(new_factors) > product(factors):\n factors = new_factors\n sum_factors = sum(factors)\n i += 1\n\n print(n, product(factors), factors)\n return product(factors)", "def fs_form_factor(x_c_m,t_c,mach,max_t_sweep):\n return (1.0+(0.6/x_c_m)*t_c+100.0*(t_c**4))*(1.34*(mach**0.18)*((np.cos(np.deg2rad(max_t_sweep)))**0.28))", "def test_two_qubit_weyl_decomposition_aama(self, smallest=1e-18, factor=9.8, steps=11):\n for aaa in (\n [smallest * factor**i for i in range(steps)]\n + [np.pi / 4 - smallest * factor**i for i in range(steps)]\n + [np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]\n ):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(aaa, aaa, -aaa)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def solve():\n result = 1\n map = dict()\n for x in range(2, 20):\n temp = prime_factors(x)\n for n in range(2, 20):\n if n in temp:\n if n in map:\n map[n] = max(temp.count(n), map[n])\n else:\n map[n] = temp.count(n)\n\n for x in map:\n result *= (x ** map[x])\n\n return result", "def calculate_217f_part_stress(**attributes): # pylint: disable=R0912, R0914\n _dic_ref_temp = {\n 1: 343.0,\n 2: {\n 1: 343.0,\n 2: 343.0,\n 3: 398.0,\n 4: 398.0\n },\n 3: 298.0,\n 5: 398.0,\n 6: 298.0,\n 7: 298.0,\n 9: 358.0,\n 10: 358.0,\n 11: 313.0,\n 12: 298.0,\n 13: 358.0,\n 14: 343.0,\n 15: 343.0\n }\n _dic_factors = {\n 1: [4.5E-9, 12.0, 1.0, 0.6, 1.0, 1.0],\n 2: {\n 1: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 2: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 3: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0],\n 4: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0]\n },\n 3: [7.33E-3, 0.202, 2.6, 1.45, 0.89, 1.3],\n 5: [0.0031, 1.0, 10.0, 1.0, 1.0, 1.5],\n 6: [0.00148, 1.0, 2.0, 0.5, 1.0, 1.0],\n 7: [0.00015, 2.64, 1.0, 0.466, 1.0, 1.0],\n 8: [0.021, 0.065, 0.105, 0.0, 0.0, 0.0],\n 9: [0.0062, 1.0, 5.0, 1.0, 1.0, 1.0],\n 10: [0.0735, 1.03, 4.45, 2.74, 3.51, 1.0],\n 11: [0.0398, 0.514, 5.28, 1.44, 4.46, 1.0],\n 12: [0.0481, 0.334, 4.66, 1.47, 2.83, 1.0],\n 13: [0.019, 0.445, 7.3, 2.69, 2.46, 1.0],\n 14: [0.0246, 0.459, 9.3, 2.32, 5.3, 1.0],\n 15: [0.018, 1.0, 7.4, 2.55, 3.6, 1.0]\n }\n _dic_piQ = {\n 1: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 2: [0.03, 0.1, 0.3, 1.0, 5.0, 5.0, 15.0],\n 3: [1.0, 3.0],\n 4: [1.0, 3.0],\n 5: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 6: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 7: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 8: [1.0, 15.0],\n 9: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 10: [2.5, 5.0],\n 11: [2.0, 4.0],\n 12: [2.0, 4.0],\n 13: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 14: [2.5, 5.0],\n 15: [2.0, 4.0]\n }\n _dic_piE = {\n 1: [\n 1.0, 3.0, 8.0, 5.0, 13.0, 4.0, 5.0, 7.0, 11.0, 19.0, 0.5, 11.0,\n 27.0, 490.0\n ],\n 2: [\n 1.0, 2.0, 8.0, 4.0, 14.0, 4.0, 8.0, 10.0, 18.0, 19.0, 0.2, 10.0,\n 28.0, 510.0\n ],\n 3: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 4: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 5: [\n 1.0, 2.0, 11.0, 5.0, 18.0, 15.0, 18.0, 28.0, 35.0, 27.0, 0.8, 14.0,\n 38.0, 610.0\n ],\n 6: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.3, 13.0,\n 34.0, 610.0\n ],\n 7: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.5, 13.0,\n 34.0, 610.0\n ],\n 8: [\n 1.0, 5.0, 21.0, 11.0, 24.0, 11.0, 30.0, 16.0, 42.0, 37.0, 0.5,\n 20.0, 53.0, 950.0\n ],\n 9: [\n 1.0, 2.0, 12.0, 6.0, 20.0, 5.0, 8.0, 9.0, 15.0, 33.0, 0.5, 18.0,\n 48.0, 870.0\n ],\n 10: [\n 1.0, 2.0, 18.0, 8.0, 30.0, 8.0, 12.0, 13.0, 18.0, 53.0, 0.5, 29.0,\n 76.0, 1400.0\n ],\n 11: [\n 1.0, 2.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 12: [\n 1.0, 3.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 13: [\n 1.0, 3.0, 14.0, 6.0, 24.0, 5.0, 7.0, 12.0, 18.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ],\n 14: [\n 1.0, 2.0, 19.0, 8.0, 29.0, 40.0, 65.0, 48.0, 78.0, 46.0, 0.5, 25.0,\n 66.0, 1200.0\n ],\n 15: [\n 1.0, 3.0, 14.0, 7.0, 24.0, 6.0, 12.0, 20.0, 30.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ]\n }\n # Resistance factor (piR) dictionary of values. The key is the\n # subcategory ID. The index in the returned list is the resistance range\n # breakpoint (breakpoint values are in _lst_breakpoints below). For\n # subcategory ID 6 and 7, the specification ID selects the correct set of\n # lists, then the style ID selects the proper list of piR values and then\n # the resistance range breakpoint is used to select\n _dic_piR = {\n 1: [1.0, 1.1, 1.6, 2.5],\n 2: [1.0, 1.1, 1.6, 2.5],\n 3: [1.0, 1.2, 1.3, 3.5],\n 5: [1.0, 1.7, 3.0, 5.0],\n 6: [[[1.0, 1.0, 1.2, 1.2, 1.6, 1.6, 1.6,\n 0.0], [1.0, 1.0, 1.0, 1.2, 1.6, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.0, 1.2, 1.2, 1.2,\n 1.6], [1.0, 1.2, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0],\n [1.0, 1.6, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.1, 1.2, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0, 0.0, 0.0]],\n [[1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.2, 1.6, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.0, 2.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [1.0, 1.2, 1.4, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.6, 0.0, 0.0, 0.0], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.6, 0.0, 0.0\n ], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.2, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.5, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 1.6, 0.0],\n [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [\n 1.0, 1.0, 1.4, 2.4, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 0.0, 0.0, 0.0, 0.0], [\n 1.0, 1.2, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.6, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.2, 1.5, 0.0, 0.0,\n 0.0], [1.0, 1.2, 0.0, 0.0, 0.0, 0.0]]],\n 7: [[[1.0, 1.2, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.2, 1.6],\n [1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.0, 1.2, 1.6]],\n [[1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.1, 1.2, 1.4, 0.0],\n [1.0, 1.0, 1.0, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.4, 0.0]]],\n 9: [1.0, 1.4, 2.0],\n 10: [1.0, 1.1, 1.4, 2.0, 2.5, 3.5],\n 11: [1.0, 1.4, 2.0],\n 12: [1.0, 1.4, 2.0],\n 13: [1.0, 1.1, 1.2, 1.4, 1.8],\n 14: [1.0, 1.1, 1.2, 1.4, 1.8],\n 15: [1.0, 1.1, 1.2, 1.4, 1.8]\n }\n # Dictionary containing the number of element breakpoints for determining\n # the resistance factor list to use.\n _dic_breakpoints = {\n 1: [1.0E5, 1.0E6, 1.0E7],\n 2: [1.0E5, 1.0E6, 1.0E7],\n 3: [100.0, 1.0E5, 1.0E6],\n 5: [1.0E4, 1.0E5, 1.0E6],\n 6: [[500.0, 1.0E3, 5.0E3, 7.5E3, 1.0E4, 1.5E4, 2.0E4],\n [100.0, 1.0E3, 1.0E4, 1.0E5, 1.5E5, 2.0E5]],\n 7: [500.0, 1.0E3, 5.0E3, 1.0E4, 2.0E4],\n 9: [2.0E3, 5.0E3],\n 10: [1.0E4, 2.0E4, 5.0E4, 1.0E5, 2.0E5],\n 11: [2.0E3, 5.0E3],\n 12: [2.0E3, 5.0E3],\n 13: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 14: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 15: [1.0E4, 5.0E4, 2.0E5, 1.0E6]\n }\n _dic_piV = {\n 9: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 10: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 11: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 12: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 13: [1.0, 1.05, 1.2],\n 14: [1.0, 1.05, 1.2],\n 15: [1.0, 1.05, 1.2]\n }\n _dic_piC = {10: [2.0, 1.0, 3.0, 1.5], 12: [2.0, 1.0]}\n _msg = ''\n\n # Calculate the base hazard rate.\n if attributes['subcategory_id'] == 2:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']][attributes[\n 'specification_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][5]\n elif attributes['subcategory_id'] not in [4, 8]:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][5]\n\n if attributes['subcategory_id'] == 4:\n attributes['lambda_b'] = 0.00006\n elif attributes['subcategory_id'] == 8:\n attributes['lambda_b'] = _dic_factors[attributes['subcategory_id']][\n attributes['type_id'] - 1]\n else:\n attributes['lambda_b'] = _f0 * exp(_f1 * (\n (attributes['temperature_active'] + 273.0) /\n _ref_temp))**_f2 * exp(((attributes['power_ratio'] / _f3) * (\n (attributes['temperature_active'] + 273.0) / 273.0)**_f4)**_f5)\n\n if attributes['lambda_b'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: Base hazard rate is 0.0 when ' \\\n 'calculating resistor, hardware ID: ' \\\n '{0:d}'.format(attributes['hardware_id'])\n\n # Calculate the resistance factor (piR).\n if attributes['subcategory_id'] not in [4, 8]:\n _index = -1\n if attributes['subcategory_id'] == 6:\n _breaks = _dic_breakpoints[attributes['subcategory_id']][\n attributes['specification_id'] - 1]\n else:\n _breaks = _dic_breakpoints[attributes['subcategory_id']]\n\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['n_elements']\n if len(_breaks) == 1 and _diff < 0:\n break\n elif _diff >= 0:\n break\n\n if attributes['subcategory_id'] in [6, 7]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][\n attributes['specification_id'] - 1][attributes['family_id'] -\n 1][_index + 1]\n elif attributes['subcategory_id'] not in [4, 8]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][_index +\n 1]\n\n # Determine the quality factor (piQ).\n attributes['piQ'] = _dic_piQ[attributes['subcategory_id']][\n attributes['quality_id'] - 1]\n\n if attributes['piQ'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piQ is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Determine the environmental factor (piE).\n attributes['piE'] = _dic_piE[attributes['subcategory_id']][\n attributes['environment_active_id'] - 1]\n\n if attributes['piE'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piE is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Calculate the temperature factor (piT).\n if attributes['subcategory_id'] == 4:\n attributes['temperature_case'] = (attributes['temperature_active'] +\n 55.0 * attributes['power_ratio'])\n attributes['piT'] = exp(-4056.0 * (\n (1.0 / (attributes['temperature_case'] + 273.0)) - 1.0 / 298.0))\n\n # Calculate the taps factor (piTAPS).\n if attributes['subcategory_id'] in [9, 10, 11, 12, 13, 14, 15]:\n attributes['piTAPS'] = (attributes['n_elements']**1.5 / 25.0) + 0.792\n\n # Calculate the voltage factor (piV).\n if attributes['subcategory_id'] > 8:\n _index = -1\n if attributes['subcategory_id'] in [9, 10, 11, 12]:\n _breaks = [0.1, 0.2, 0.6, 0.7, 0.8, 0.9]\n elif attributes['subcategory_id'] in [13, 14, 15]:\n _breaks = [0.8, 0.9]\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['voltage_ratio']\n if len(_breaks) == 1 and _diff < 0.0:\n break\n elif _index == 0 and _diff >= 0.0:\n break\n elif _diff >= 0:\n break\n attributes['piV'] = _dic_piV[attributes['subcategory_id']][_index]\n\n # Determine the consruction class factor (piC).\n if attributes['subcategory_id'] in [10, 12]:\n attributes['piC'] = _dic_piC[attributes['subcategory_id']][\n attributes['construction_id'] - 1]\n\n # Calculate the active hazard rate.\n attributes['hazard_rate_active'] = (\n attributes['lambda_b'] * attributes['piQ'] * attributes['piE'])\n if attributes['subcategory_id'] == 4:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piT'] *\n attributes['n_elements'])\n elif attributes['subcategory_id'] in [9, 11, 13, 14, 15]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] in [10, 12]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piC'] * attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] != 8:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piR'])\n\n return attributes, _msg", "def improve(factors):\n factors = defaultdict(int, factors)\n improvement = 0\n for a, b in find_pairs(factors):\n c = a*b\n # If local improvement is possible, create remove the two\n # old factors once and introduce a new one\n if DR[c] >= DR[a] + DR[b]:\n new_factors = factors.copy()\n new_factors[a] -= 1\n new_factors[b] -= 1\n new_factors[c] += 1\n improvement += 1\n improve(new_factors)\n\n # If no more local improvements are possible then add\n # to the pool of local optima as candidates for MDRS\n if not improvement:\n candidates.append(factors)", "def empirical( cliques, data, normalize=False):\n factors = [None]*len(cliques)\n for i,vs in enumerate(cliques):\n vs_data = np.array([data[v] for v in vs])\n if len(vs_data.shape)>1: vs_data = vs_data.T # data axis first, now\n factors[i] = Factor(vs, 0.)\n for xs in vs_data:\n if np.any(np.isnan(xs)): continue # skip data with missing entries \n factors[i].t[ tuple(xs.astype(int)) ] += 1.\n if normalize: factors[i].t /= factors[i].sum()\n return factors", "def test_two_qubit_weyl_decomposition_ab0(self, smallest=1e-18, factor=9.8, steps=11):\n for aaa in (\n [smallest * factor**i for i in range(steps)]\n + [np.pi / 4 - smallest * factor**i for i in range(steps)]\n + [np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]\n ):\n for bbb in np.linspace(0, aaa, 10):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(aaa, bbb, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def test_two_qubit_weyl_decomposition_aaa(self, smallest=1e-18, factor=9.8, steps=11):\n for aaa in (\n [smallest * factor**i for i in range(steps)]\n + [np.pi / 4 - smallest * factor**i for i in range(steps)]\n + [np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]\n ):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(aaa, aaa, aaa)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def _compute_mu_factor(*input_mols):\n return np.sqrt(_compute_mu_factor2(*input_mols))", "def projective_factorization(x, max_iterations=1):\n\n n_views = len(x)\n n_points = x[0].shape[1]\n\n iterations = 0\n\n #lambda matrix, approximate depths\n l = np.ones((n_views, n_points))\n\n #normalization matrices\n norm_matrices = []\n\n # normalize coordinates\n xn = np.zeros((3*n_views, n_points))\n for i in range(n_views):\n\n #find normalization matrix for projections i\n x_norm, T = normalize_points(x[i], is_homogeneous=True)\n xn[3*i:3*(i+1), :] = x_norm\n norm_matrices.append(T)\n\n while iterations < max_iterations:\n # normalize the lambda matrix\n lr_norm = norm(l, axis=1)\n ln = l / lr_norm[:, np.newaxis]\n lc_norm = norm(ln, axis=0)\n ln /= lc_norm\n\n # repeat the lambdas\n ln = np.repeat(ln, 3, axis=0)\n\n #build the factorization matrix\n fact_matrix = ln*xn\n\n u, d, vh = svd(fact_matrix)\n\n print(d[3] / d[4])\n d = d[:4]/d[0]\n\n # from the svd decomposition we can find the projections and 3d points\n p_matrices = u[:, :4]\n x_3d = np.dot(np.diag(d), vh[:4, :])\n\n iterations += 1\n if iterations != max_iterations:\n\n w_matrix = np.dot(p_matrices, x_3d)\n\n for i in range(n_views):\n l[i, :] = w_matrix[3*i+2, :]\n\n cameras = []\n\n for i in range(n_views):\n # denormalize camera matrices\n c_matrix = np.dot(inv(norm_matrices[i]), p_matrices[3*i:3*(i+1), :])\n\n cameras.append(c_matrix)\n\n return cameras, x_3d", "def get_norm_factor(self, arr):\r\n\r\n\t\tif self.func == \"sigmoid\":\r\n\t\t\tnorm_factor = sigmoid(arr, *self.popt) \r\n\t\telif self.func == \"constant\":\r\n\t\t\tnorm_factor = arr*0 + self.popt\t#ensures that output is the same size as arr\r\n\t\treturn(norm_factor)", "def test_two_qubit_weyl_decomposition_aa0(self, smallest=1e-18, factor=9.8, steps=11):\n for aaa in (\n [smallest * factor**i for i in range(steps)]\n + [np.pi / 4 - smallest * factor**i for i in range(steps)]\n + [np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]\n ):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(aaa, aaa, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def get_scale_factor(self, jets, passing_cut):\n # First of all flatten everything to make it easier to handle\n pt = ak.to_numpy(ak.flatten(jets.pt))\n eta = ak.to_numpy(ak.flatten(jets.eta))\n flav = ak.to_numpy(ak.flatten(jets.hadronFlavour))\n pass_wp = ak.to_numpy(ak.flatten(jets[passing_cut]))\n\n # Get the MC efficiency\n eff = self.efficiency_(pt, eta, flav)\n # for each systematic/central value compute the proper SF\n # cache the SF values as sometimes they are repeated, there \n # might also be systematic combinations that are never accessed\n # but pruning them at the beginning can be hard\n # use schema to define combinations, lcb is a tuple with the sf keys\n # for light, charm, bottom for each systematic\n flavour_sf_cache = {}\n scale_factors = {} # our final product\n for key, lcb in self.schema_.items(): \n # populate cache if needed\n for i in range(3):\n # protect against using prelim csv files that don't have UDSG wps\n flavour_sf_cache[lcb[i]] = np.ones(eta.size) if lcb[i] not in self.sf_.keys() else flavour_sf_cache.get(lcb[i], self.sf_[lcb[i]](eta, pt, pass_wp))\n \n scale_factors[key] = eff * self.match_flav_(\n flavour_sf_cache[lcb[0]],\n flavour_sf_cache[lcb[1]],\n flavour_sf_cache[lcb[2]],\n flav\n )\n\n # use SF and eff to compute p(data) and p(MC)\n p_data = {key : np.where(pass_wp, val, 1 - val) \n for key, val in scale_factors.items()}\n p_mc = np.where(pass_wp, eff, 1 - eff)\n\n # return the unflattened version of the ratio\n return {key : ak.unflatten(i/p_mc, ak.num(jets.pt))\n for key, i in p_data.items()}", "def test_two_qubit_weyl_decomposition_a00(self, smallest=1e-18, factor=9.8, steps=11):\n for aaa in (\n [smallest * factor**i for i in range(steps)]\n + [np.pi / 4 - smallest * factor**i for i in range(steps)]\n + [np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]\n ):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(aaa, 0, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def update_factors(self,u,i,j,update_u=True,update_i=True):\n #print(\"session run\")\n loss_v = self.sess.run(self.train_step , feed_dict={\n self.u: u,\n self.i: i,\n self.j: j})\n\n returnText = \"\"\n\n if self.alg_type == \"TFL\" or self.alg_type == \"TFLWM\":\n sum_lambda = 0\n for k in self.sim_matrix_names:\n sum_lambda += abs(self.sim_lambda[k].eval())\n #print(sum_lambda,self.sim_lambda)\n for k in self.sim_matrix_names:\n if math.isnan(sum_lambda):\n print(\"sim_lambda overflow\")\n tf.assign(self.sim_lambda[k], [self.sim_lambda_zero], validate_shape=False).eval()\n returnText = \"sim_lambda overflow\"\n else:\n tf.assign(self.sim_lambda[k], self.sim_lambda[k].eval()/sum_lambda).eval()\n else:\n for k in self.sim_matrix_names:\n val = self.sim_lambda[k].eval()\n if math.isnan(val[0]):\n print(\"sim_lambda overflow\")\n tf.assign(self.sim_lambda[k], [self.sim_lambda_zero], validate_shape=False).eval()\n returnText = \"sim_lambda overflow\"\n if val[0] <= 0.0:\n tf.assign(self.sim_lambda[k], [self.delta], validate_shape=False).eval()\n elif val[0] >= 1.0:\n tf.assign(self.sim_lambda[k], [self.one - self.delta], validate_shape=False).eval()\n\n return returnText", "def factors(self, X):\r\n return (lambda fd: [X] if not fd else fd + self.factors(X // fd[0])) (self.firstdiv(X))", "def discount_factors(r,n,m):\n return [1/((1+r/m)**i) for i in cashflow_times(n,m)]", "def original_factors(self):\n self.assert_sampled()\n # the original factors is the same for all samples set\n return self._original_factors", "def test_two_qubit_weyl_decomposition_abc(self, smallest=1e-18, factor=9.8, steps=11):\n for aaa in (\n [smallest * factor**i for i in range(steps)]\n + [np.pi / 4 - smallest * factor**i for i in range(steps)]\n + [np.pi / 8, 0.113 * np.pi, 0.1972 * np.pi]\n ):\n for bbb in np.linspace(0, aaa, 4):\n for ccc in np.linspace(-bbb, bbb, 4):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(aaa, bbb, ccc)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def initiate_factors(Y, latent_dim):\n\tt = Y.shape[0]\n\tpca = PCA(n_components=latent_dim)\n\tcomponents = pca.fit_transform(Y)\n\tmu_0 = np.repeat(0, latent_dim) # prior mean 0 for regression coefficients\n\tSigma_0 = np.diag(np.repeat(1, latent_dim)) # prior variance 1\n\ta_0 = 1 # Inverse-Gamma(1, 1) is fairly diffused\n\tb_0 = 1\n\tloading_matrix, Y_variance = blr_mv(Y, components, mu_0, Sigma_0, a_0, b_0)\n\tX = np.linspace(0.1, t * 0.1, t).reshape((t, 1)) # create initial GP covariance matrices\n\tcov1 = sample_covariance_matern(X, 1.0, 1.0) # length scale 1.0 corresponds to 10 time points\n\tcov2 = sample_covariance_matern(X, 1.0, 1.0) # variance scale set to 1.0 as well\n\tS1, S2, S3, S4 = build_covariance_blocks([cov1, cov2], loading_matrix, Y_variance)\n\tF = sample_conditional_F(Y, S1, S2, S3, S4)\n\treturn F", "def compute_error(data, user_features, item_features, nz):\n sum_err = 0\n for d, n in nz:\n err = data[d,n] - np.dot(item_features[d,:],user_features[:,n])\n sum_err += err**2\n rmse = 0.5*sum_err/len(nz)\n return rmse", "def factor(n: int) -> List[Tuple[int, int]]:\n if n <= 1:\n raise ValueError\n\n factors = list()\n\n ml = 0\n p = 2\n while n % p == 0:\n n //= p\n ml += 1\n if ml > 0:\n factors.append((p, ml,))\n\n p = 3\n while p ** 2 <= n:\n ml = 0\n while n % p == 0:\n n //= p\n ml += 1\n if ml > 0:\n factors.append((p, ml,))\n p += 2\n\n if n > 2:\n factors.append((n, 1,))\n\n return factors", "def _m_to_F_on_basis(self, la):\n Sym = self._kBoundedRing.ambient()\n kB = Sym.kBoundedSubspace(self.k, t=1)\n h = kB.khomogeneous()\n ks = kB.kschur()\n return sum( h(ks(x)).coefficient(la) * self(x) for x in PartitionsGreatestLE(sum(la), self.k))", "def nonnegative_tensor_factorization(X, r, method='anls_bpp',\n tol=1e-4, stop_criterion=1,\n min_iter=20, max_iter=200, max_time=1e6,\n init=None, orderWays=None):\n\n nWay = len(X.shape)\n\n if orderWays is None:\n orderWays = np.arange(nWay)\n\n # set initial values\n if init is not None:\n F_cell = init\n else:\n Finit = [np.random.rand(X.shape[i], r) for i in range(nWay)]\n F_cell = Finit\n\n grad = getGradient(X, F_cell, nWay, r)\n\n nr_X = X.norm()\n nr_grad_all = np.sqrt(np.sum(np.linalg.norm(grad[i], 'fro') ** 2\n for i in range(nWay)))\n\n if method == \"anls_bpp\":\n method = anls_bpp()\n elif method == \"anls_asgroup\":\n method = anls_asgroup()\n else:\n raise Exception(\"Unknown method\")\n\n # Execute initializer\n F_cell, FF_init = method.initializer(X, F_cell, nWay, orderWays)\n\n tStart = time.time()\n\n if stop_criterion == 2:\n F_kten = ktensor(F_cell)\n rel_Error = getRelError(X, ktensor(F_cell), nWay, nr_X)\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n\n # main iterations\n for iteration in range(max_iter):\n cntu = True\n\n F_cell, FF_init = method.iterSolver(X, F_cell,\n FF_init, nWay, r, orderWays)\n F_kten = ktensor(F_cell)\n\n if iteration >= min_iter:\n\n if time.time() - tStart > max_time:\n cntu = False\n\n else:\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n if SC_PGRAD < tol:\n cntu = False\n\n elif stop_criterion == 2:\n prev_rel_Error = rel_Error\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n SC_DIFF = np.abs(prev_rel_Error - rel_Error)\n if SC_DIFF < tol:\n cntu = False\n else:\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n if rel_Error < 1:\n cntu = False\n\n if not cntu:\n break\n\n return F_kten", "def return_factors(self):\n if self.global_rank==0:print([i.shape for i in self.factors])\n return self.factors", "def stretch_factor(self):\n p = self._pants_decomposition\n\n # pick a curve to iterate\n c = PantsLamination.random(p)\n # print(c)\n\n cc = (self**100) * c\n # print(self**100)\n # print(cc)\n return float(sum(abs(x) for x in (self*cc).to_vector())) / \\\n sum(abs(x) for x in cc.to_vector())", "def trifactor(a, d, c):\n u, l = zeros_like(d), zeros_like(a)\n u[0] = d[0]\n for k in range(len(a)):\n l[k] = a[k]/u[k]\n u[k+1] = d[k+1] - l[k]*c[k]\n return l, u", "def compute_pmf(X_inp, rank, c1, c2):\n\n X_arr = []\n u_arr = []\n v_arr = []\n d_arr = []\n\n X = deepcopy(X_inp)\n\n v_init = np.ones((np.shape(X)[1],1))\n\n for i in range(rank):\n X_arr.append(X)\n (d,u,v) = compute_factor(X, v_init, c1, c2)\n\n\n assert abs(npla.norm(u) - 1 ) < 1e-3\n assert abs(npla.norm(v) - 1 ) < 1e-3\n\n d_arr.append(d)\n u_arr.append(u)\n v_arr.append(v)\n\n toSub = np.outer(u,v)\n assert np.shape(toSub) == np.shape(X)\n X -= d * toSub\n\n return (X_arr, u_arr, v_arr, d_arr)", "def problem3():\n def _prime_factorization(n):\n \"\"\"Returns the list of prime factors of a number n\"\"\"\n factors = []\n f = 2\n # Use trial division to add factors\n while f**2 <= n:\n while (n % f) == 0:\n factors.append(f)\n n //= f\n f += 1\n\n if n > 1:\n factors.append(n)\n\n return factors\n\n return max(_prime_factorization(600851475143))", "def __det3x3__(a):\r\n # val = +a[0,0] * ( a[1,1] * a[2,2] - a[2,1] * a[1,2] )\r\n # val += -a[0,1] * ( a[1,0] * a[2,2] - a[2,0] * a[1,2] )\r\n # val += +a[0,2] * ( a[1,0] * a[2,1] - a[2,0] * a[1,1] )\r\n val = +a[0] * (a[4] * a[8] - a[7] * a[5])\r\n val += -a[1] * (a[3] * a[8] - a[6] * a[5])\r\n val += +a[2] * (a[3] * a[7] - a[6] * a[4])\r\n return val", "def addFactors(self, flist, copy=True, isLog=False):\n row = np.zeros(2*len(flist),dtype=int)-1; col=row.copy(); data=np.zeros(2*len(flist));\n for k,f in enumerate(flist):\n if not isLog: \n if np.any(f.t<=0): f = f+1e-10; # TODO: log nonzero tol\n f = f.log()\n if f.nvar == 1:\n Xi = f.vars[0]\n self.h[Xi] += .5*(f[1]-f[0])\n self.c += .5*(f[1]+f[0])\n else:\n Xi,Xj = f.vars[0],f.vars[1]\n row[2*k],col[2*k],data[2*k] = int(Xi),int(Xj), .25*(f[1,1]+f[0,0]-f[0,1]-f[1,0])\n row[2*k+1],col[2*k+1],data[2*k+1] = col[2*k],row[2*k],data[2*k] \n #L[Xi,Xj] += .25*(f[1,1]+f[0,0]-f[0,1]-f[1,0])\n self.h[Xi] += .5*(f[1,0]-f[0,0])+data[2*k] #L[Xi,Xj]\n self.h[Xj] += .5*(f[0,1]-f[0,0])+data[2*k] #L[Xi,Xj]\n self.c += .25*(f[1,1]+f[1,0]+f[0,1]+f[0,0])\n self.L += csr((data[row>=0],(row[row>=0],col[row>=0])),shape=(self.nvar,self.nvar));", "def _fit(self):\n I_mat = []\n for i in range(self._num_bases):\n temp = []\n for j in range(self._num_bases):\n v = 1.0 if i == j else 0.0\n temp.append(v)\n I_mat.append(temp)\n I_mat = Mat(I_mat)\n return (self._input.t() * self._input + self._lambda * I_mat).inv() * self._input.t() * self._label", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def rfactor(h,rmask):\n\n Lp, Mp = h.shape\n L=Lp-1\n M=Mp-1\n\n # Land/Sea mask on U-points.\n umask = np.zeros((L,Mp))\n for j in range(Mp):\n for i in range(1,Lp):\n umask[i-1,j] = rmask[i,j] * rmask[i-1,j]\n\n # Land/Sea mask on V-points.\n vmask = np.zeros((Lp,M))\n for j in range(1,Mp):\n for i in range(Lp):\n vmask[i,j-1] = rmask[i,j] * rmask[i,j-1]\n\n #-------------------------------------------------------------------\n # Compute R-factor.\n #-------------------------------------------------------------------\n\n hx = np.zeros((L,Mp))\n hy = np.zeros((Lp,M))\n\n hx = abs(h[1:,:] - h[:-1,:]) / (h[1:,:] + h[:-1,:])\n hy = abs(h[:,1:] - h[:,:-1]) / (h[:,1:] + h[:,:-1])\n\n hx = hx * umask\n hy = hy * vmask\n\n r = np.zeros((L,M))\n\n r = np.maximum(np.maximum(hx[:,:-1],hx[:,1:]), np.maximum(hy[:-1,:],hy[1:,:]))\n\n rmin = r.min()\n rmax = r.max()\n ravg = r.mean()\n rmed = np.median(r)\n\n print ' '\n print 'Minimum r-value = ', rmin\n print 'Maximum r-value = ', rmax\n print 'Mean r-value = ', ravg\n print 'Median r-value = ', rmed\n\n return r", "def factor_naive(n):\n factors = []\n\n for factor in range(2, n // 2):\n q, r = divmod(n, factor)\n power = 0\n while r == 0:\n power += 1\n n = q\n q, r = divmod(q, factor)\n if power != 0:\n factors.append((factor, power))\n\n if factors == []:\n factors = [(n, 1)]\n\n return factors", "def get_factors(val):\n N = np.sqrt(val)\n N = np.floor(N)\n M = val/N\n\n while (val % N != 0):\n N = N-1\n M = val/N\n\n return int(M), int(N)", "def blueschist_felsic():\n\n rho = 2970.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 149.85; C[0,1] = 38.7; C[0,2] = 32.59; C[0,3] = -0.15; C[0,4] = -1.; C[0,5] = -0.19\n C[1,0] = C[0,1]; C[1,1] = 163.55; C[1,2] = 30.03; C[1,3] = 1.05; C[1,4] = -1.81; C[1,5] = -1.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 121.62; C[2,3] = 0.22; C[2,4] = -0.95; C[2,5] = -0.13\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 48.03; C[3,4] = -0.63; C[3,5] = -1.14\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 48.62; C[4,5] = -0.01\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 58.42\n\n return C, rho", "def als(matrix, n_factors=8,n_iterations=15, lambda_=10):\r\n\tm, n = matrix.shape\r\n\tQ = matrix\r\n\tW = Q > 0.5\r\n\tW = W.astype(int)\r\n\tprint('X and Y randomly initialzied.')\r\n\tX = 5 * np.random.rand(m, n_factors) \r\n\tY = 5 * np.random.rand(n_factors, n)\r\n\tfor ii in range(n_iterations):\r\n\t\tfor u, Wu in enumerate(W):\r\n\t\t\tX[u] = np.linalg.solve(np.dot(Y, np.dot(np.diag(Wu), Y.T)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(Y, np.dot(np.diag(Wu), Q[u].T))).T\r\n\t\tfor i, Wi in enumerate(W.T):\r\n\t\t\tY[:,i] = np.linalg.solve(np.dot(X.T, np.dot(np.diag(Wi), X)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(X.T, np.dot(np.diag(Wi), Q[:, i])))\r\n\t\tprint('{}th iteration is completed of {}'.format(ii + 1,n_iterations))\r\n\tprediction = np.dot(X,Y)\r\n\tprint('Done.')\r\n\treturn prediction, X, Y", "def rf_prod(prime_factors: [int, ]):\n return 1 if not prime_factors else reduce(mul, prime_factors, 1)", "def reciprocal_lattice_vectors(a):\n b = np.zeros(shape=(3,3))\n b[:,0] = 2 * np.pi * np.cross(a[:,1], a[:,2]) / triple_product(a[:,0], a[:,1], a[:,2])\n b[:,1] = 2 * np.pi * np.cross(a[:,2], a[:,0]) / triple_product(a[:,1], a[:,2], a[:,0])\n b[:,2] = 2 * np.pi * np.cross(a[:,0], a[:,1]) / triple_product(a[:,2], a[:,0], a[:,1])\n return b", "def __truediv__(self, factor):\n if type(factor) == Vector:\n raise NotImplementedError\n else:\n return Vector([c / factor for c in self.components])", "def _factors(n):\n gen = ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)\n return set(sum(gen, []))", "def csrbf(r):\n return num.power((num.maximum(0, 1-r)), 3)*(3*r+1)", "def auxminf1(x):\n \n# Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmin_f1_part_i(x,m_ind) \n \n return f", "def expand_factors(factor_components, factor_data):\n logger.debug('Transforming input from %d x %d to full %d-dimensional feature space',\n factor_data.shape[0], factor_data.shape[1], factor_components.shape[1])\n return factor_data.dot(factor_components)", "def state_norm_opt(state):\n fact_arr = np.array([factorial(x) for x in range(len(state))])\n tf2 = np.tensordot(fact_arr, fact_arr, axes=0)\n tf4 = np.tensordot(tf2, tf2, axes=0)\n st_abs_quad = np.power(np.abs(state), 2)\n mult = np.multiply(st_abs_quad, tf4)\n return sqrt(np.sum(mult))", "def get_factors(number):\n\n factors = [1, number]\n\n for i in range(2, int(math.sqrt(number))):\n if number % i == 0:\n factors.extend([i, number / i])\n\n return(factors)", "def fit(self, y):\n if isinstance(y, np.ndarray) and y.ndim == 2:\n y = [y]\n y_all = np.concatenate(y)\n self.mean_ = y_all.mean(axis=0, keepdims=True)\n y = [yi - self.mean_ for yi in y]\n n = y[0].shape[1]\n T = [yi.shape[0] for yi in y]\n model = FA(self.n_factors, svd_method='lapack')\n model.fit(y_all)\n\n self.R_ = np.diag(model.noise_variance_)\n self.C_ = model.components_.T\n self.d_ = np.zeros(n)\n self.tau_ = self.tau_init + self.rng.rand(self.n_factors)\n # Allocated and reuse these\n C = self.C_\n R = self.R_\n big_K = {Ti: calc_big_K(Ti, self.n_factors, self.tau_, self.var_n) for Ti in set(T)}\n y_cov = {Ti: block_dot_B(block_dot_A(C, big_K[Ti], Ti), C.T, Ti) + make_block_diag(R, Ti)\n for Ti in set(T)}\n big_d = {Ti: np.tile(self.d_, Ti) for Ti in set(T)}\n big_y = [yi.ravel() for yi in y]\n ll_pre = log_likelihood(big_d, y_cov, big_y, T)\n if self.verbose:\n print(\"FA log likelihood:\", ll_pre)\n\n converged = False\n for ii in range(self.max_iter):\n ll = self._em_iter(y, big_K)\n if abs(ll - ll_pre) / np.amax([abs(ll), abs(ll_pre), 1.]) <= self.tol:\n converged = True\n break\n ll_pre = ll\n if not converged:\n warnings.warn(\"EM max_iter reached.\", ConvergenceWarning)\n return self", "def factor_list(f):\n coeff, factors = dmp_factor_list(f.rep, f.lev, f.dom)\n return coeff, [ (f.per(g), k) for g, k in factors ]", "def test_l1norm () :\n n = 10\n rfs = RewardFnSpace(list(range(n)))\n for i in range(10): \n b = rfs.bs[i]\n rfs.lp += b == 0\n rfs.lp.solve()\n rfs._setCoeffs()\n coeffs = np.array(rfs.coeffs)\n assert(np.linalg.norm(coeffs - np.ones(n)) < 1e-4)", "def __solve_alternative_linear_problem(self, user):\n result = [0] * self.layout_slots\n de_rand_approach = \"greedy\"\n bins_per_category = []\n bins_cardinality = []\n for _ in range(len(self.categories)):\n bins_per_category.append([])\n bins_cardinality.append([])\n\n for cat in range(len(self.categories)):\n for _ in range(len(self.news_row_pivots) + 1):\n bins_per_category[cat].append([])\n bins_cardinality[cat].append([])\n for _ in range(len(self.news_column_pivots) + 1):\n bins_per_category[cat][-1].append([])\n bins_cardinality[cat][-1].append(0)\n\n for news in self.news_pool:\n category_index = self.categories.index(news.news_category)\n x, y = self.__compute_position_in_learning_matrix(user=user, news=news)\n bins_per_category[category_index][x][y].append(news)\n bins_cardinality[category_index][x][y] += 1\n\n index = 0\n bin_samples = []\n for cat in range(len(self.categories)):\n for x in range(len(self.news_row_pivots) + 1):\n for y in range(len(self.news_column_pivots) + 1):\n if (y == 0) and (x != 0):\n continue\n self.alt_B[index] = min(bins_cardinality[cat][x][y], self.layout_slots)\n index += 1\n try:\n selected_news = np.random.choice(bins_per_category[cat][x][y])\n self.sample_quality(selected_news, user, interest_decay=True)\n bin_samples += [selected_news.sampled_quality] * self.layout_slots\n except ValueError:\n bin_samples += [0] * self.layout_slots\n\n self.alt_C = np.array(list(np.array(self.alt_lambdas) * bin_samples)) * -1\n linear_problem = opt.linprog(A_ub=self.alt_A, b_ub=self.alt_B, c=self.alt_C)\n\n # FOR EACH SLOT, ISOLATES THE CORRESPONDING VARIABLES\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n slot_promenances = self.real_slot_promenances.copy()\n slot_promenances_norm = np.array(slot_promenances) / sum(slot_promenances)\n slots_nr = [s for s in range(0, self.layout_slots)]\n for i in range(self.layout_slots):\n if de_rand_approach == \"ordered\":\n k = i\n elif (de_rand_approach == \"greedy\") or (de_rand_approach == \"greedy_max\"):\n k = np.argmax(slot_promenances)\n slot_promenances[k] = 0\n elif de_rand_approach == \"randomized\":\n k = np.random.choice(slots_nr, p=slot_promenances_norm)\n slot_promenances[k] = 0\n else:\n raise RuntimeError(\"De_randomization approach not recognized. Try either 'ordered', 'greedy', \"\n \"'randomized' or 'greedy_max'.\")\n\n target_slot_probabilities = [x for x in slots_assegnation_probabilities[k]]\n target_slot_probabilities_norm = np.array(target_slot_probabilities) / sum(target_slot_probabilities)\n if de_rand_approach == \"greedy_max\":\n assigning_bin_index = np.argmax(target_slot_probabilities)\n cat_index = int(assigning_bin_index / self.num_of_bins)\n x = self.bins_for_position[int(assigning_bin_index)][0]\n y = self.bins_for_position[int(assigning_bin_index)][1]\n\n else:\n assigning_bin = np.random.choice([x for x in range(len(slots_assegnation_probabilities[k]))], p=target_slot_probabilities_norm)\n cat_index = int(assigning_bin / self.num_of_bins)\n x = self.bins_for_position[int(assigning_bin)][0]\n y = self.bins_for_position[int(assigning_bin)][1]\n\n result[k] = np.random.choice(bins_per_category[cat_index][x][y])\n\n return result", "def kullback_leibler_dirichlet(m_true, alpha):\n\n alpha = alpha * (1 - m_true) + m_true\n M = int(m_true.shape[1])\n beta = tf.constant(np.ones((1, M)), dtype=tf.float32)\n alpha0 = tf.reduce_sum(input_tensor=alpha, axis=1, keepdims=True)\n \n kl = tf.reduce_sum(input_tensor=(alpha - beta) * (tf.math.digamma(alpha) - tf.math.digamma(alpha0)), axis=1, keepdims=True) + \\\n tf.math.lgamma(alpha0) - tf.reduce_sum(input_tensor=tf.math.lgamma(alpha), axis=1, keepdims=True) + \\\n tf.reduce_sum(input_tensor=tf.math.lgamma(beta), axis=1, keepdims=True) - tf.math.lgamma(tf.reduce_sum(input_tensor=beta, axis=1, keepdims=True))\n kl = tf.reduce_mean(input_tensor=kl)\n return kl", "def exclusive_lasso5(X, n_clusters, gamma=0.5):\n MAX_ITERS = 100\n n_dims, n_samples = X.shape\n\n # Initialize indicaotr matrix F\n F = np.zeros((n_samples, n_clusters), dtype=np.int8)\n for i in xrange(n_samples):\n F[i, randint(0, n_clusters-1)] = 1\n\n conv = False\n iteration = 0\n results = np.zeros(n_clusters, dtype=np.float64)\n\n while iteration < MAX_ITERS and not conv:\n conv = True\n # Calculate H = XF(F^TF)^-1\n H = X.dot(sp.linalg.pinv(F.T))\n\n base_sum = ((X-H.dot(F.T))**2).sum(axis=0)\n F_counts = F.sum(axis=0)\n\n # Caculate F\n # For each row (sample)\n for i in xrange(n_samples):\n # Get current indicator\n curr_ind = F[i].nonzero()[0][0]\n F_counts[curr_ind] = F_counts[curr_ind] - 1\n\n # We want to find the indicator column that minimizes\n # X-HF^T + gamma * Tr(F^T11^TF)\n for j in xrange(n_clusters):\n F_counts[j] = F_counts[j] + 1\n tr = (F_counts**2).sum()\n res = ((X[:, i]-H[:, j])**2).sum() - base_sum[i] + gamma * tr\n results[j] = res\n F_counts[j] = F_counts[j] - 1\n new_ind = results.argmin()\n\n F_counts[new_ind] = F_counts[new_ind] + 1\n\n if (curr_ind != new_ind):\n conv = False\n F[i, curr_ind] = 0\n F[i, new_ind] = 1\n iteration = iteration + 1\n print iteration\n return map(lambda x: x.index(1), F.tolist())", "def V_fit(x, a, b, c, d, e, f):\n x1 = x[0] # I\n x2 = x[1] # dT\n m = (a * x1 ** 2 + b * x1 + c)\n b = (d * x1 ** 2 + e * x1 + f)\n return m * x2 + b" ]
[ "0.6412927", "0.6364732", "0.6273934", "0.59649765", "0.58943266", "0.5739722", "0.56869096", "0.5642237", "0.5588103", "0.55404395", "0.55167574", "0.54780066", "0.54257447", "0.5419568", "0.5391013", "0.53886884", "0.53792053", "0.53650635", "0.53555024", "0.53386456", "0.53257406", "0.5323429", "0.5294586", "0.5292349", "0.5272046", "0.527183", "0.52611107", "0.5260819", "0.52391315", "0.52267504", "0.5217867", "0.5203884", "0.51968634", "0.5194007", "0.5184397", "0.5175547", "0.5174155", "0.5167332", "0.51647", "0.5160241", "0.51588875", "0.51511145", "0.51492983", "0.5143407", "0.5143237", "0.51316553", "0.51206505", "0.51188797", "0.51183593", "0.5116824", "0.51140136", "0.5097241", "0.5094218", "0.5091829", "0.5089147", "0.50843155", "0.50814706", "0.5080961", "0.5074891", "0.5069342", "0.5068288", "0.5068123", "0.506794", "0.50628144", "0.5047817", "0.5045847", "0.503681", "0.5036594", "0.5033891", "0.5032531", "0.5024618", "0.502404", "0.5022142", "0.5020027", "0.50142956", "0.50089747", "0.49918312", "0.49900314", "0.49880186", "0.49842826", "0.49814063", "0.4977382", "0.49751285", "0.4968745", "0.49657407", "0.49519876", "0.494884", "0.4948572", "0.49375013", "0.49347597", "0.49285334", "0.49267295", "0.49255168", "0.49186885", "0.4915023", "0.49099287", "0.48995343", "0.4898187", "0.48966107", "0.489111" ]
0.5874729
5
Get authorization header for GoDaddy Developer API.
def _get_headers() -> dict: api_key = API_KEY_CRED_LOADER.load_credentials() api_secret = API_SECRET_CRED_LOADER.load_credentials() return {"Authorization": "sso-key {}:{}".format(api_key, api_secret)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def get_authorization_header(self):\n return {\"Authorization\": \"Bearer {}\".format(self.get_jwt())}", "def get_authorization_header(client, user):\n # obtain authorization token\n response = client.post(\n reverse('token-obtain'),\n data={'username': user.username, 'password': user.raw_password},\n content_type='application/json'\n )\n token = response.json()['access']\n return {'HTTP_AUTHORIZATION': f'Bearer {token}'}", "def get_api_header(token):\n return {\n 'Authorization': 'Token ' + str(token)}", "def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}", "def build_header(self):\n authstring = \"Bearer \" + self.auth_token\n header = {\n \"Authorization\": authstring,\n \"Content-Type\": \"application/json\",\n \"User-Agent\": self.user_agent,\n \"Accept-Encoding\": \"gzip\"\n }\n return header", "def _get_authorization_header(self):\n return f\"token {self._context.get_github_token()}\"", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "def create_auth_header(api_token):\n return {'Authorization': f'token {api_token}'}", "def getBasicAuthorization(self):\n header = self.request.getHeader(\"Authorization\")\n if header:\n try:\n if header.lower().startswith(\"basic \"):\n authstr = base64.b64decode(header[6:]).split(\":\")\n if len(authstr) == 1:\n return None\n return authstr[0], \":\".join(authstr[1:])\n except:\n return None\n return None", "def get_auth(info) -> Tuple[Optional[str], Optional[str]]:\n auth = info.context[\"request\"].headers.get(\"Authorization\")\n if not auth:\n return None, None\n return auth.replace(\"Bearer \", \"\"), None", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def auth_header(self):\n return self._auth_header", "def extract_bearer_token(request):\n return request.headers['Authorization'].split(\" \")[-1].strip()", "def get_token_auth_header():\n # Get authorization form request header\n auth = request.headers.get('Authorization', None)\n # Check if authorization header exists\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is MISSING!'\n }, abort(401))\n # If bearer token, then first part of string = 'bearer'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\"'\n }, abort(401))\n # Authorization header string length must be 2\n elif len(parts) != 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be a BEARER token'\n }, abort(401))\n\n token = parts[1]\n return token", "def get_authorization(self):\n auth = get_authorization_header()\n\n if not auth:\n return None\n\n auth_type, auth_info = auth\n\n if auth_type != b'basic':\n return None\n\n try:\n username, password = base64.b64decode(auth_info).split(b':', 1)\n except Exception:\n return None\n\n return Authorization(\"basic\", username=bytes_to_wsgi(username), password=bytes_to_wsgi(password))", "def request_http_header( self ) -> dict:\n return {'content-type': 'application/json','Authorization':f'NLAuth nlauth_account={self._acct_number},nlauth_email={self._auth_email},nlauth_signature={self._acct_signature},nlauth_role=1090'}", "def get_auth_header(self) -> Mapping[str, Any]:\n return {}", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n print(auth)\n\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n \n parts = auth.split()\n \n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def auth_key(event):\n headers = event.get('header')\n if not headers:\n raise RestException(\"Headers are missing\", 400)\n auth = headers.get('Authorization')\n if not auth:\n raise RestException('Header Authorization is missing', 400)\n if not auth.lower().startswith('bearer '):\n raise RestException(\"Authorization missing Bearer keyword\", 400)\n auth = auth.replace('Bearer ', '')\n auth = auth.replace('bearer ', '')\n return auth.strip()", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n elif auth.split()[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n elif len(auth.split()) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be include type and token.'\n }, 401)\n elif len(auth.split()) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be Bearer token.'\n }, 401)\n else:\n token = auth.split()[1]\n return token", "def __header_base64(self):\n header_base64 = base64.b64encode(f'{self.client_id}:{self.client_secret}'.encode('ascii'))\n header_base64 = str(header_base64).split(\"'\")[1]\n return {'Authorization': f'Basic {header_base64}'}", "def _request_token(self):\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n }\n\n response = self._http_request(\n method='POST',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n full_url=self.auth_url,\n data=params\n )\n access_token = response.get('access_token')\n auth_header = {'Authorization': f'Bearer {access_token}'}\n return auth_header", "def get_auth_headers(key):\n return {\n 'Content-Type': 'Application/JSON',\n 'Authorization': key\n }", "def get_authenticate_header(self):\n pass", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n\n elif len(parts) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, 401)\n\n elif len(parts) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, 401)\n\n token = parts[1]\n return token", "def _auth_headers(self):\n if self.token_str:\n return {'Authorization': 'Bearer {}'.format(self.token_str)}\n else:\n return {}", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected'}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header must start with Bearer'}, 401)\n\n if len(parts) < 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Token not found after Bearer'}, 401)\n\n if len(parts) > 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header is an invalid token structure'}, 401)\n\n return parts[1]", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def get_authenticate_header(self):\n return f'Basic realm=\"{self.www_authenticate_realm}\"'", "async def authorization(request):\n # Decode tokens, load/check users and etc\n # ...\n # in the example we just ensure that the authorization header exists\n return request.headers.get(\"authorization\", \"\")", "def asterisk_in_header():\n auth_token = get_auth_token()\n\n headers = '{\"Host\":\"$host\",\"User-Agent\":\"$user_agent\",\"Date\":\"DATE\",'\n headers += '\"Accept\": \"*/*\",\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Project-ID\": \"$project_id\",'\n headers += '\"X-Auth-Token\": \"$token\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, user_agent=CFG.user_agent,\n project_id=CFG.project_id, token=auth_token)", "def auth_header_value(self):\n return f\"token {self.API_TOKEN}\"", "def getHeaders():\n userid = rhev_settings.USERNAME\n passwd = rhev_settings.PASSWORD\n # base64.encodestring adds trailing \\n. \n auth = base64.encodestring(\"%s:%s\" % (userid, passwd)).rstrip(\"\\n\")\n headers = {\"Content-Type\": \"application/xml\",\n \"Accept\": \"application/xml\",\n \"Accept-Charset\": \"utf-8\",\n \"Authorization\" : (\"Basic %s\" % auth)}\n return headers", "def build_header(token: str = None):\n return {\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": token or get_project_token(),\n }", "def _retrieve_token(request):\n auth_string = request.headers.get('Authorization')\n try:\n match = re.match(\"Bearer (.+)\", auth_string)\n except TypeError:\n match = None\n if match:\n return match.groups()[0]", "def get_headers():\n return {'Authorization': f'token {settings.GITHUB_AUTH_TOKEN}'}", "def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header", "def auth_headers(current_user_token: str) -> Dict[str, str]:\n return {\"Authorization\": f\"Bearer {current_user_token}\"}", "def get_headers():\n if not headers:\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"Accept\"] = \"application/json\"\n headers[\"User-Agent\"] = constants.USER_AGENT\n headers[\"Authorization\"] = get_token(constants.AUTH_URL, cfg[\"key\"])\n\n return headers\n\n return headers", "def authorization(self):\n return {'auth-token': '{token}'.format(token=self.token)}", "def authorization(self):\n return {'auth-token': '{token}'.format(token=self.token)}", "def get_headers(self):\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Basic %s\" % self.rest_auth_key\n }\n return headers", "def get_token() -> str:\n try:\n bearer, authorization = request.headers['Authorization'].split()\n if 'bearer' not in bearer.lower():\n raise Forbidden('Invalid token. Please login!')\n return authorization\n\n except Exception:\n raise Forbidden('Token is required. Please login!')", "def bearer_auth():\n authorization = request.headers.get(\"Authorization\")\n if not (authorization and authorization.startswith(\"Bearer \")):\n response = app.make_response(\"\")\n response.headers[\"WWW-Authenticate\"] = \"Bearer\"\n response.status_code = 401\n return response\n slice_start = len(\"Bearer \")\n token = authorization[slice_start:]\n\n return jsonify(authenticated=True, token=token)", "def authenticate_header(self, request):\n return '{0} realm=\"{1}\"'.format(settings.JWT_AUTH_HEADER_PREFIX,\n self.www_authenticate_realm)", "def _headers(self):\n auth = AuthenticationProvider.currentAuth()\n\n return {\n 'Authorization': '%s %s' % (auth.tokenType, auth.accessToken),\n 'Content-Type': 'application/json'}", "def oauth_headers(oauth):\n import base64\n encoded_credentials = base64.b64encode(('{0}:{1}'.format(oauth.client_id, oauth.client_secret)).encode('utf-8'))\n headers = {\n 'Authorization': 'Basic {0}'.format(encoded_credentials.decode('utf-8')),\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n return headers", "def get_headers(self):\n return {\n 'Authorization': 'JWT {}'.format(self.token)\n }", "def header_token(token):\n return {'Authorization': '{0} {1}'.format('JWT', token)}", "def get_headers() -> dict:\n headers = {}\n token = GoogleAuthService.get_report_api_token()\n if token:\n headers['Authorization'] = 'Bearer {}'.format(token)\n return headers", "def get_headers(self):\r\n return {\r\n 'authenticate': {\r\n 'username': self.username,\r\n 'apiKey': self.api_key,\r\n }\r\n }", "def get_headers():\n headers = {\n \"Authorization\": \"Token {}\".format(get_token()),\n }\n\n return headers", "def get_auth(context):\n\n headers = context['headers']\n auth_info = {\n \"type\": \"basic\",\n \"basic\": {\n \"user\": headers['api_key'],\n \"password\": \"X\"\n }\n }\n auth = Auth().get_auth(auth_info)\n\n return auth", "def create_authorization_header(self, **kwargs):\n return {\"Authorization\": \"Bearer {}\".format(self.create_jwt(**kwargs))}", "def basic_header(self):\n self.auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\\n', '')\n return { \n #\"Authorization\" : \"Basic %s\" % self.auth, \n \"Content-type\": \"text/plain\" }", "def generate_oauth_headers(access_token: str) -> dict:\n return {'Authorization': 'Bearer ' + access_token}", "def getToken(request):\n try:\n token = request.META['HTTP_AUTHORIZATION'].split()[1]\n except:\n token = \"\"\n return token", "def add_header(response):\n response.headers['Authorization'] = response\n return response", "def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}", "def get_auth_headers(self,email,passwd):\n #获取认证后的http头\n\n postdata = urllib.urlencode({'Email':email,'Passwd':passwd,'service':'reader','source':self.ClIENT})\n req = urllib2.Request(self.AUTH_URL,postdata)\n if self.host:\n req.set_proxy(self.host, self.type)\n f = urllib2.urlopen(req)\n auth_value = f.read().split()[2][5:]\n f.close()\n self.Author_Headers = {'Authorization':'GoogleLogin auth=%s'%auth_value}", "def _headers(self):\n\n api_key_bytes = '{0}:'.format(self.api_key).encode()\n authorization = b64encode(api_key_bytes).decode()\n\n headers = {\n 'Authorization': 'Basic {0}'.format(authorization),\n 'User-Agent': USER_AGENT,\n }\n\n if self.config['api_version']:\n headers['X-Button-API-Version'] = self.config['api_version']\n\n return headers", "def test_headers(self):\n token = 'abc123'\n requests.get(self.url, auth=BearerAuth(token))\n self.assertEqual(httpretty.last_request().headers['Authorization'], 'Bearer {}'.format(token))", "def _create_auth_headers(self):\n auth_headers = {**self.get_headers()}\n auth_headers['Authorization'] = 'Bearer ' + self.get_access_token()\n return auth_headers", "def get_request_headers(self):\n return {\n 'Authorization': 'JWT ' + self.get_authorization_token()\n }", "def get_authorization_headers(access_keeper):\n access_token = access_keeper.get_access_token()\n headers = {\n 'Authorization': f'Bearer {access_token}'\n }\n return headers", "def _get_oauth_headers(self, user):\n access_token = AccessTokenFactory.create(user=user, application=ApplicationFactory()).token\n headers = {\n 'HTTP_AUTHORIZATION': 'Bearer ' + access_token\n }\n return headers", "def _get_oauth_headers(self, user):\n access_token = AccessTokenFactory.create(user=user, application=ApplicationFactory()).token\n headers = {\n 'HTTP_AUTHORIZATION': 'Bearer ' + access_token\n }\n return headers", "def _headers(self):\n\n auth_token = SendbeeAuth(self.client.api_secret).get_auth_token()\n headers = {\n 'X-Auth-Token': auth_token,\n 'X-Api-Key': self.client.api_key,\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'User-Agent': 'Sendbee Python API Client'\n }\n self.debug.ok('headers', headers)\n\n return headers", "def get_auth_headers():\n\n auth_type = \"Basic\"\n if request.headers.get('UseXBasic'):\n auth_type = \"XBasic\"\n\n return {\n 'WWW-Authenticate': '%s realm=\"Login Required\"' % auth_type\n }", "def __MakeHeaders(self, auth):\n\n headers = dict()\n headers[\"X-Blip-api\"] = BLIP_API_VERSION\n headers[\"Accept\"] = JSON\n if (auth and self.userName != None and self.password != None):\n credentials = self.userName + \":\" + self.password;\n headers[\"Authorization\"] = \"Basic \"+base64.b64encode(credentials)\n if (self.userAgent != None):\n headers[\"User-Agent\"] = self.userAgent\n\n return headers", "def buildHeader(self):\n if self.key:\n userString = self.user+b\":\"+self.key\n else:\n userString = self.user+b\":\"\n \n encodedUserString = b64encode(userString)\n decodedUserString = encodedUserString.decode(\"ascii\")\n self.basicAuthHeader = {\"Authorization\": \"Basic \" + decodedUserString}", "def get_jwt():\n\n try:\n scheme, token = request.headers['Authorization'].split()\n assert scheme.lower() == 'basic'\n return base64.b64decode(token).decode(\"UTF-8\")\n except (KeyError, ValueError, AssertionError):\n raise Forbidden('Invalid Bearer Token.')", "def get_auth_headers(self) -> Dict:\n if self.__access_token:\n return {\n 'Authorization': self.__access_token,\n 'Api-Key': self.__api_key,\n 'X-Client-Name': __client_name__,\n 'X-Client-Version': __version__,\n 'X-Min-Version': __min_engine_version__\n }\n elif self.__license_key and self.__email and self.__password:\n return {\n 'Authorization': self.__calculate_basic_auth_value(),\n 'License-Key': self.__license_key,\n 'Api-Key': self.__api_key,\n 'X-Client-Name': __client_name__,\n 'X-Client-Version': __version__,\n 'X-Min-Version': __min_engine_version__\n }\n else:\n raise ValueError('Credentials are not configured')", "def get_client_login_token_string(http_body):\n for response_line in http_body.splitlines():\n if response_line.startswith('Auth='):\n # Strip off the leading Auth= and return the Authorization value.\n return response_line[5:]\n return None", "def get_jwt(self, request):\n auth_header_prefix = self.auth_header_prefix\n try:\n authorization = request.authorization\n except ValueError:\n return None\n if authorization is None:\n return None\n authtype, token = authorization\n if authtype.lower() != auth_header_prefix.lower():\n return None\n return token", "def authenticate_header(self, request):\n return \"Api key authentication failed.\"", "def get_token(self):\n self.register_user(self.user_data)\n result = self.login_user(self.login_data)\n header_access_token = json.loads(result.data.decode())['header_access_token']\n return header_access_token", "def authorization(self):\n token = self.create_auth_token(\n self.api_key.user, self.api_key.key, self.api_key.secret\n )\n return f'JWT {token}'", "def inv_headers(access_token):\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Authorization': 'Bearer ' + access_token,\n 'Accept': 'application/json',\n 'Accept-Encoding': 'application/json',\n }\n return headers", "def decode_auth_headers(request: Request) -> Tuple[str, str]:\n authorization = request.headers.get(\"Authorization\", \"\")\n\n headers = CaseInsensitiveDict({\"WWW-Authenticate\": \"Basic\"})\n\n scheme, param = get_authorization_scheme_param(authorization)\n if not authorization or scheme.lower() != \"basic\":\n raise InvalidClientError(request=request, headers=headers)\n\n try:\n data = b64decode(param).decode(\"ascii\")\n except (ValueError, UnicodeDecodeError, binascii.Error):\n raise InvalidClientError(request=request, headers=headers)\n\n client_id, separator, client_secret = data.partition(\":\")\n\n if not separator:\n raise InvalidClientError(request=request, headers=headers)\n\n return client_id, client_secret", "def polling_header(self):\n self.auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\\n', '')\n return { \n #\"Authorization\" : \"Basic %s\" % self.cmd.auth,\n #\"X-Atmosphere-Transport\" : \"long-polling\",\n #\"X-Atmosphere-tracking-id\" : self.atmos_id,\n \"X-Atmosphere-Framework\" : \"1.0\",\n \"Accept\" : \"application/json\" }", "def get_token_auth_header(params):\n auth = get_token(params)\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Authorization header must start with Bearer\"}, 401)\n\n if len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Token not found\"}, 401)\n\n if len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Authorization header must be Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def http_headers(self) -> dict:\n return {\n \"x-api-key\": f\"{self.config.get('api_key')}_{self.config.get('secret_key')}\",\n \"Content-type\": \"application/json\",\n }", "def test_get_auth_header_fresh(self, mocker):\n oauth = Oauth2Authenticator(\n TestOauth2Authenticator.refresh_endpoint,\n TestOauth2Authenticator.client_id,\n TestOauth2Authenticator.client_secret,\n TestOauth2Authenticator.refresh_token,\n )\n\n mocker.patch.object(Oauth2Authenticator, \"refresh_access_token\", return_value=(\"access_token\", 1000))\n header = oauth.get_auth_header()\n assert {\"Authorization\": \"Bearer access_token\"} == header", "def _headers(helper):\n return {\n 'Authorization': 'Splunk {0}'.format(\n helper.context_meta['session_key'])}", "def get_headers(self) -> Dict[str, str]:\n header_dict = self.generate_auth_dict()\n\n return {\n \"Authorization\": \"Basic \" + header_dict[\"signature\"],\n \"Content-Type\": 'application/json',\n }", "def auth(self):\r\n basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))\r\n if basic: return basic\r\n ruser = self.environ.get('REMOTE_USER')\r\n if ruser: return (ruser, None)\r\n return None", "def generate_headers_with_auth(self, token_type: str = 'access'):\n if re.search('access', token_type, re.I):\n bearer_token = self._access_token\n elif re.search('refresh', token_type, re.I):\n bearer_token = self._refresh_token\n else:\n raise (Exception('Please check docstrings and change token_type value'))\n\n return {\n 'accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + bearer_token\n }", "def get_base_ecom_headers(access_token):\r\n headers = config['ecom'].copy()\r\n headers['authorization'] = \"Bearer {}\".format(access_token)\r\n return headers", "def _getApiAuthToken(self):\n return settings.EBAY_API_AUTH_TOKEN", "def get_token_header(cls, token):\n if token is EMPTY_KNOX_TOKEN:\n return {}\n else:\n return {'HTTP_AUTHORIZATION': 'token {}'.format(token)}", "def _build_common_headers(apikey: str):\n return {\n \"Authorization\": f\"token {apikey}\",\n \"User-Agent\": \"sharing-api-fetcher\",\n \"Accept-Encoding\": \"gzip\",\n \"Accept\": \"application/json\",\n }", "def get_auth_token():\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8', }\n data = '{ \\\n \"auth\": { \\\n \"identity\": { \\\n \"methods\": [ \\\n \"password\" \\\n ], \\\n \"password\": { \\\n \"user\": { \\\n \"name\": \"zheng_zhao\", \\\n \"password\": \"ZhaoZheng0426\", \\\n \"domain\": { \\\n \"name\": \"hwstaff_y00465251\" \\\n } \\\n } \\\n } \\\n }, \\\n \"scope\": { \\\n \"project\": { \\\n \"id\": \"454add6b26d04f53ae5c593551acf1ff\" \\\n } \\\n } \\\n } \\\n }'\n\n r = requests.post('https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens',\n headers=headers, data=data)\n\n # print(r.status_code)\n # print(r.headers)\n token = r.headers.get('X-Subject-Token')\n\n return token", "def api_key(request):\r\n user_acct = request.user\r\n return _api_response(request, {\r\n 'api_key': user_acct.api_key,\r\n 'username': user_acct.username\r\n })", "def _make_header(self, token):\n header = HEADER.copy()\n header['Authorization'] = \"Bearer {}\".format(token)\n\n return header", "def headers(group_id, token):\n return { \n \"active-group\": group_id,\n \"Authorization\" : \"Bearer: {}\".format(token) \n }", "def _token_header(token=None):\n if not token:\n return None\n\n message = '{token}:Ignored'.format(token=token)\n headers = {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}\n return headers", "def get_auth_header(self, login, password):\n json = self.request('post',\n '/auth/login',\n json={'uid': login, 'password': password},\n msg='authenticating at {} with user {}'.format(self.admin_url, login),\n errorfatal=False,\n retfmt='json',\n autoauth=False\n )\n if json:\n return {'Authorization': 'token=%s' % json['token']}\n else:\n return None" ]
[ "0.77325", "0.77325", "0.7386359", "0.73046136", "0.71326786", "0.70995873", "0.7064967", "0.69181406", "0.67679745", "0.6723393", "0.67186785", "0.66517", "0.66500354", "0.66368353", "0.6635809", "0.6585172", "0.6572229", "0.6569699", "0.65522623", "0.65466243", "0.65304196", "0.6509508", "0.6477137", "0.64675003", "0.64669615", "0.6422462", "0.6418191", "0.63934374", "0.6390747", "0.6382785", "0.63766855", "0.63636583", "0.63521415", "0.63419616", "0.63332254", "0.63210744", "0.6294615", "0.62922484", "0.62789834", "0.6275267", "0.6274332", "0.6267784", "0.6267784", "0.6255235", "0.62233526", "0.6211367", "0.61836326", "0.6173208", "0.61695576", "0.61642355", "0.6164117", "0.6154562", "0.6150091", "0.61259437", "0.6101547", "0.61009836", "0.61008537", "0.6093743", "0.60872734", "0.60850525", "0.6074983", "0.60630244", "0.6059984", "0.60490835", "0.60478127", "0.60476834", "0.6042372", "0.6040074", "0.6040074", "0.60370815", "0.6020954", "0.60111666", "0.6009826", "0.5982999", "0.59761596", "0.596899", "0.5955032", "0.5952252", "0.5941291", "0.59349686", "0.59327376", "0.59323555", "0.59267515", "0.58809465", "0.58788306", "0.58632916", "0.5862378", "0.58435166", "0.58385134", "0.5836756", "0.58284754", "0.5827809", "0.5819473", "0.58189636", "0.57989544", "0.5790581", "0.57896334", "0.57849663", "0.57589597", "0.57469755" ]
0.6833908
8
Call GoDaddy developer API endpoint. Only supports GET endpoints to keep access readonly.
def _call_endpoint(url_suffix: str, base_url: str = BASE_URL) -> dict: headers = _get_headers() url = os.path.join(base_url, url_suffix) resp = requests.get(url, headers=headers) return resp.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response", "def call_api(url):\n\n req = requests.get(url)\n return req", "def call_api(url):\n\n req = requests.get(url)\n return req", "def requester(get_args: dict) -> dict:\n get_args.update(dict(apikey = apikey))\n response = requests.get(URL, params = get_args)\n return response.json()", "def _make_api_call(url, params=None):\n if params is None:\n params = {}\n\n # params['apikey'] = CONFIG.BIOPORTAL_API_KEY\n params['apikey'] = \"8316a8aa-ff8e-4d6e-aa95-faeabfc72d2a\"\n return requests.get(url, params=params)", "def api():\n\treturn \"The API call\"", "def api_endpoint():\n return 'localhost'", "def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def get():\n return jsonify({'doctor': 'Doctor API'}), 200", "def get_rest_call(api_url, username, password):\n response = requests.get(api_url,\n auth=HTTPBasicAuth(username, password),\n verify=False,\n timeout=4)\n return response", "def _api_call(self, **kwargs):\n params = {\n 'format': 'json',\n }\n params.update(kwargs)\n r = requests.get(self.api_base_url, params=params)\n return r.json()", "def get_call_api(url, payload, headers):\n return requests.request(\"GET\", url, headers=headers, data=payload)", "def request(host=API_HOST, path=SEARCH_PATH, api_key=API_KEY, url_params=params):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def api_client(request):\n\n base_url = request.config.getoption(\"--url\")\n delay = request.config.getoption(\"--delay\")\n templates = CONF.ENDPOINT_TEMPLATES\n return client.APIClient(base_url=base_url, delay=delay, templates=templates)", "def request(host, path, api_key, url_params=None):\n\n url_params = url_params or {}\n\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n\n headers = {\n\n 'Authorization': 'Bearer %s' % api_key,\n\n }\n\n\n print(u'Querying {0} ...'.format(url))\n\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n\n return response.json()", "def request(host, path, api_key, url_params=None):\r\n url_params = url_params or {}\r\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\r\n headers = {\r\n 'Authorization': 'Bearer %s' % api_key,\r\n }\r\n\r\n print(u'Querying {0} ...'.format(url))\r\n \r\n response = requests.request('GET', url, headers=headers, params=url_params)\r\n\r\n return response.json()", "def api():\n try:\n data = json.loads(request.data)\n except:\n return jsonify({\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32700, \"message\": \"Parse error\"}, \"id\": None}), 500\n return jsonify(hwi.jsonrpc(data))", "def ping_missing_api(request):\r\n return _api_response(request, {\r\n 'success': False,\r\n 'message': 'The API url should be /api/v1'\r\n })", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methods as functions(get, post, patch, put, delete)',\n 'Is similar to traditional Django view',\n 'Give you the most control over your app logic',\n 'Is mapped manually to URLs'\n ]\n return Response({'message':'Hello!', 'an_apiview':an_apiview})", "def makeApiCall(url, endpointParams, type):\r\n\r\n if type == 'POST': # post request\r\n data = requests.post(url, endpointParams)\r\n else: # get request\r\n data = requests.get(url, endpointParams)\r\n\r\n response = dict() # hold response info\r\n response['url'] = url # url we are hitting\r\n response['endpoint_params'] = endpointParams # parameters for the endpoint\r\n response['endpoint_params_pretty'] = json.dumps(endpointParams, indent=4) # pretty print for cli\r\n response['json_data'] = json.loads(data.content) # response data from the api\r\n response['json_data_pretty'] = json.dumps(response['json_data'], indent=4) # pretty print for cli\r\n\r\n return response # get and return content\r", "def _execApiCall(headers, params, method_name,\r\n domain='ma.gnolia.com',\r\n urlhead='/api/rest/1/'):\r\n \r\n if 'api_key' not in params and method_name not in ['echo', 'get_key']:\r\n raise MagnoliaException('Required API Key parameter missing')\r\n conn = httplib.HTTPConnection(domain)\r\n conn.request('POST', urlhead + method_name, params, headers)\r\n return conn.getresponse()", "def api(self) -> str:", "def get(self,request,format = None):\n an_apiview = [\n 'Uses HTTP methods as function (get,post,patch,put,delete)',\n 'Is similar to a traditional Django View',\n 'Gives you the most control over your appliction logic',\n 'Is mapped manually to URLs'\n ]\n return Response({'message':'Hello!','an_apiview': an_apiview})", "def __request(self,endpoint):\n apiRequest = requests.get(\"%s/%s\" % (self.baseurl,endpoint), \n auth=requests.auth.HTTPBasicAuth(self.api_id, self.api_secret))\n try:\n json = apiRequest.json()\n return json\n except JSONDecodeError:\n print(\"Failed to download or failed to parse JSON.\")\n print(apiRequest)\n return None", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n print(u'Querying {0} ...'.format(url))\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n print(u'Querying {0} ...'.format(url))\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def call_api(payload):\n data = requests.get(weatherbit_url, params=payload)\n assert data.status_code == 200, f\"Something wrong. Error details: {data.json()['error']}\"\n assert 'error' not in data.json().keys(), f'Problem: {data.json()}'\n return data.json()", "def send_api_request(self, url, **kwargs):\n\n params = self._params.copy()\n dct = {k: kwargs[k] for k in kwargs if kwargs[k] is not None}\n params.update(dct)\n\n res = requests.get(url, params=params)\n if res.status_code != 200:\n try:\n error = res.json()['error']\n except ValueError:\n error = None\n raise SwrveApiException(error, res.status_code, url, params)\n\n return res.json()", "async def test_dev_fetch_api_key(client):\n params = [('username', 'iago@zulip.com')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/api/v1/dev_fetch_api_key',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def httpapi_request(client, **params) -> 'Response':\n return requests.get(\n _HTTPAPI,\n params={\n 'client': client.name,\n 'clientver': client.version,\n 'protover': 1,\n **params\n })", "def view_get():\n\n return jsonify(get_dict(\"url\", \"args\", \"headers\", \"origin\"))", "def _query_api(\n master_url=settings.OPENSHIFT_API['NP']['OPENSHIFT_MASTER'],\n api_token=settings.OPENSHIFT_API['NP']['API_TOKEN'],\n endpoint='/oapi/v1/buildconfigs'):\n\n openshift_api_url = 'https://' + master_url\n openshift_api_get_endpoint = openshift_api_url + endpoint\n bearer_token_header = {'Authorization': 'Bearer ' + api_token }\n\n try:\n response = requests.get(openshift_api_get_endpoint,headers=bearer_token_header, timeout=2.0)\n except requests.ConnectTimeout as e:\n logger.error(e)\n return None\n except requests.ConnectionError as e:\n logger.error(e)\n return None\n\n if not response.ok:\n logger.error(response.status_code)\n return None\n else:\n return response", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n print(u'Querying {0} ...'.format(url))\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response", "def base_request(url_path):\n response = requests.get(settings.URL_API + url_path)\n if response.status_code != 200:\n return response\n else:\n return response.json()", "def get(self, request, format=None):\n an_apiview = [\n \"User HTTp methods get, put, post, delete method\",\n \"very similar to previous Django view\",\n \"gives you more control on api logic\",\n 'Is mapped to manually to urls'\n ]\n\n return Response({'message':\"hello\", \"an_apiview\": an_apiview} )", "def _public_request(self, path, querystring):\n\n url = '{}/{}?{}'.format(self.api_server, path, querystring)\n return self._request(self.HTTP_GET, url, request_type=self.PUBLIC)", "def api(self, url, response_checker=default_checker.default_checker):\n return self._api_call(url, response_checker)", "def GET(self, path, params={}):\n request_url = 'https://{0}:{1}/rest/{2}'.format(\n self.settings.api_host,\n self.settings.api_port,\n path\n )\n\n # Make the API request\n response = requests.get(request_url,\n auth = (self.settings.api_user, self.settings.api_password),\n verify = self.settings.verify_ssl,\n headers = self.settings.headers,\n params = params\n )\n\n # Request failed\n if not int(response.status_code) == 200:\n raise Exception('Failed to GET {0}: {1}'.format(request_url, response.json()))\n return response.json()", "def cli(ctx, base_url, token, debug, watch):\n connection = ctx.obj.connection\n ctx.obj.debug = debug\n ctx.obj.watch = watch\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n import httplib\n httplib.HTTPConnection.debuglevel = 1\n if base_url:\n connection.api_url = base_url\n\n if token:\n connection.credentials.token = token\n elif not connection.credentials.token:\n click.secho('Token not provided', fg='red')\n return\n if base_url:\n connection.api_url = base_url", "def _api_call(self, method: str, endpoint: str, data: dict = None) -> requests.Response:\n\n headers = {\n \"Content-Type\": \"text/plain; charset=uft-8\"\n }\n\n auth = {\n \"usr\": self.user,\n \"key\": self.api_key\n }\n\n payload = {\"auth\": auth, \"data\": data}\n\n response = requests.request(method=method,\n url=self.api_url + endpoint,\n headers=headers,\n data=json.dumps(payload))\n return response", "def api():\n api_routes = [\n \"/api/v1.0/beer\",\n \"/api/v1.0/breweries\",\n ]\n return render_template(\"api.html\", api_routes = api_routes)", "def __apiRequest(self, url, parms={}):\n authparms = self.__addAuthParms(parms);\n request = self.http.request('GET', url, fields=authparms)\n if request.status != 200:\n raise ApiCommunicationError('Failed to retrieve data from Marvel, HTTP Status {}'.format(request.status))\n else:\n return json.loads( request.data.decode('utf-8') )", "def protect_endpoint():\n pass", "def test_00_api_get(self):\r\n # GET as Anonymous\r\n url = '/api/'\r\n action = 'get'\r\n self.check_limit(url, action, 'app')", "def api_key(request):\r\n user_acct = request.user\r\n return _api_response(request, {\r\n 'api_key': user_acct.api_key,\r\n 'username': user_acct.username\r\n })", "def request(self, verb, address, params=None, data=None):\n return BWUser.bare_request(verb=verb, address_root=self.api_url,\n address_suffix=address,\n access_token=self.token,\n params=params or dict(),\n data=data or dict())", "def status_api(request):\n if request.method == 'GET':\n return JsonResponse({\n 'status': 'OK',\n 'version': __version__\n }, status=200)", "def do_GET(self):\n self.http_method = 'GET'\n self.response()", "def request_gateway_api(url, payload=None):\n if is_url_validated(url):\n response = requests.get(url, params=payload)\n return response\n # Logs\n logger.info('{} is not validated'.format(url))\n return None", "def api_call(endpoint, params, headers):\n\n api_response = get(BASE_URL.format(endpoint=endpoint), params=params,\n headers=headers)\n\n api_response.raise_for_status()\n json_resp = api_response.json()\n\n api_response.close()\n return json_resp", "def send_get(self, api_url, query=None):\n resp = requests.get(self.base_url + api_url, params=query)\n\n return resp", "def url(vmanage_host,vmanage_port,api):\r\n \"\"\" function to get the url provide api endpoint \"\"\"\r\n \r\n return f\"https://{vmanage_host}:{vmanage_port}{api}\"", "def _get_request(url_root,api_key,path,response_type,params, ssl_verify):\n url = _url_builder(url_root,api_key,path,params)\n content = _fetch(url, ssl_verify)\n response = _dispatch(response_type)(content)\n return response", "def index():\n response = jsonify(\n {'message':'Hello, RESTful API development!'}\n )\n \n return response, 200", "def index():\n # curl -k -X POST https://127.0.0.1:43210/api/v1.0 -H 'content-type: application/json' -d '{\"data\": \"exhaust\"}'\n return jsonify({'meta': {'success': True, 'code': 200}, 'result': {\"message\": request.get_json()}}), 200", "def bare_request(verb, address_root, address_suffix, access_token=\"\",\n params=None, data=None):\n params = params or dict()\n data = data or dict()\n url = \"%s%s\" % (address_root, address_suffix)\n\n if access_token:\n params[\"access_token\"] = access_token\n\n try:\n if data:\n response = verb(url,\n params=params,\n data=data,\n headers={\"Content-type\": \"application/json\"})\n else:\n response = verb(url, params=params)\n response_json = response.json()\n except Exception as e:\n logger.error(\"Something was wrong getting a response from \"\n \"URL %s\" % url)\n raise exc.BrandwatchApiException(str(e))\n else:\n errors = response_json.get('errors')\n if errors:\n _raise_bw_exception(response, data, errors)\n return response_json", "def api_request(method, url, **kwargs):\n if not settings.BLOCKSTORE_API_AUTH_TOKEN:\n raise ImproperlyConfigured(\"Cannot use Blockstore unless BLOCKSTORE_API_AUTH_TOKEN is set.\")\n kwargs.setdefault('headers', {})['Authorization'] = f\"Token {settings.BLOCKSTORE_API_AUTH_TOKEN}\"\n response = requests.request(method, url, **kwargs)\n if response.status_code == 404:\n raise NotFound\n response.raise_for_status()\n if response.status_code == 204:\n return None # No content\n return response.json()", "def test_doGet(self) -> None:\n\n status_code = apicall.doGet(URL, self._browserheader)\n print(\"in do get:\", status_code)\n assert status_code == API_SUCCESS", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methods as functions (get,post,patch,put,delete)',\n 'Is similar to a traditional django view',\n 'Gives you the most control over the applicaton logic',\n 'Is mapped manually to the URLs',\n ]\n return Response({'message': 'get method', 'an_apiview': an_apiview})", "def get(self, request, format=None):\n\n an_apiview= [\n 'Uses HTTP methods as functions (get, post, patch, put, delete)',\n 'Is similar to a traditional Django View',\n 'Gives you the most control over your logic',\n 'Is mapped manually to URLs',\n 'Douki mohamed',\n ]\n\n return Response({'message': 'Hello Douki!', 'an_apiview': an_apiview})", "def test_GET_call_api_and_return_200Ok(client):\n\n url = '/api/v1/calls/'\n\n response = client.get(url)\n\n assert response.status_code == status.HTTP_200_OK", "def call_api(\n self,\n endpoint,\n method='GET',\n payload=None,\n params=None,\n deadline=None,\n version='v1',\n service='compute'):\n assert service in ('compute', 'replicapool')\n assert endpoint.startswith('/'), endpoint\n url = 'https://www.googleapis.com/%s/%s/projects/%s%s' % (\n service, version, self._project_id, endpoint)\n return net.json_request(\n url=url,\n method=method,\n payload=payload,\n params=params,\n scopes=AUTH_SCOPES,\n service_account_key=self._service_account_key,\n deadline=30 if deadline is None else deadline)", "def index():\n\n return redirect(api)", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methos as function (get, post, patch, put, delete)',\n 'Is similar to a traditional Django View',\n 'Gives you the most control over your application logic',\n 'Is mapped manually to URLs',\n ]\n\n return Response({'message': 'Hello', 'an_apiview': an_apiview}) # Dictionary or List only", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def do_GET(self): # pylint:disable=invalid-name\n if not self.is_log_path_valid():\n self.report_404()\n return\n scheme = \"https\" if self.server.cert is not None else \"http\"\n resp = '<html>'\n resp += '<head>\\n'\n resp += ' <title>{0}</title>\\n'.format(self.app_name)\n resp += '</head>\\n'\n resp += '<body>\\n'\n resp += ' <center>\\n'\n resp += ' <h2>{0} is working via {1}</h2>\\n'.format(self.app_name,\n scheme.upper())\n resp += ' </center>\\n'\n resp += ' <p>Please point your APIC at:<br /><br />'\n ip_add = [(s.connect((self.client_address[0], 80)), s.getsockname()[0],\n s.close()) for s in [socket.socket(socket.AF_INET,\n socket.SOCK_DGRAM)]][0][1]\n resp += ' {0}://{1}:{2}{3}</p>'.format(scheme, ip_add,\n self.server.server_address[\n 1],\n self.path)\n resp += '</body>\\n'\n resp += '</html>'\n self.send_200_resp(resp, \"text/html\")", "def ping(request):\r\n rdict = request.matchdict\r\n params = request.params\r\n username = rdict.get('username', None)\r\n api_key = params.get('api_key', None)\r\n user = UserMgr.get(username=username)\r\n # Check if user provided the correct api_key\r\n if api_key == user.api_key:\r\n return _api_response(request, {\r\n 'success': True,\r\n 'message': 'Looks good'\r\n })\r\n else:\r\n return _api_response(request, {\r\n 'success': False,\r\n 'message': 'API key is invalid.'\r\n })", "def get_admin(self, path):\n req_url = self.normalize_admin_url(path)\n headers = {\"user-agent\": self.u_agent}\n res = requests.get(req_url, headers=headers, auth=self.auth, verify=False)\n if res.status_code in [400, 401, 403, 404, 406]:\n return Response(res.status_code, res)\n try:\n return Response(0, res.json())\n except ValueError:\n # The API does not appear to return valid JSON\n # It is probably not a CDMI API - this will be a problem!\n return Response(500, \"Invalid response format\")", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix='/api')", "async def get(self) -> web.Response:\n response = {\n \"status\": \"success\",\n \"Data\": \"No current tools supported\",\n }\n\n return web.json_response(data=response, status=200)", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methods as function (get, post, put, delete)',\n 'Is similar to a traditional Django view',\n 'Gives you the most control over you application logic',\n 'Is mapped manually to URLs',\n ]\n\n return Response({'message':'Hello', 'an_view':an_apiview})", "def getAPI(self):\n return self.api_url", "def probe_api():\n\n info = loads(get(url).text)\n return info", "def request_external(host, path, api_key, url_params=None) -> Dict:\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def call_api(page_num=0):\n base_url = \"http://data.sfgov.org/resource/jjew-r69b.json\"\n query_string = SoQL_query(page_num=page_num).generate_query()\n url = base_url+query_string\n response = requests.get(url)\n return response", "def api_endpoint(self) -> str:\n return pulumi.get(self, \"api_endpoint\")", "def live_url_request(url, arg):\n if arg == \"-m\":\n json_response = requests.get(url)\n\n elif arg == \"-l\":\n live_token= get_live_token()\n json_response = requests.get(url, headers={\"PRIVATE-TOKEN\": live_token})\n return json_response", "def access_url(context, url):\n context.response = requests.get(context.coreapi_url + url)", "def get(self, *args, **kwargs):\n self.request(\"get\", *args, **kwargs)", "async def _api_request(self,\n method: str,\n path_url: str,\n params: Dict[str, Any] = {}) -> Dict[str, Any]:\n base_url = f\"https://{global_config_map['gateway_api_host'].value}:\" \\\n f\"{global_config_map['gateway_api_port'].value}\"\n url = f\"{base_url}/{path_url}\"\n client = await self._http_client()\n if method == \"get\":\n if len(params) > 0:\n response = await client.get(url, params=params)\n else:\n response = await client.get(url)\n elif method == \"post\":\n response = await client.post(url, data=params)\n\n parsed_response = json.loads(await response.text())\n if response.status != 200:\n err_msg = \"\"\n if \"error\" in parsed_response:\n err_msg = f\" Message: {parsed_response['error']}\"\n raise IOError(f\"Error fetching data from {url}. HTTP status is {response.status}.{err_msg}\")\n if \"error\" in parsed_response:\n raise Exception(f\"Error: {parsed_response['error']}\")\n\n return parsed_response", "def home(request):\n response_status = {'text': \"API is up\"}\n return JsonResponse(response_status, status=200)", "def opt_engine_rest_api():\n request_json = request.get_json()\n return process_request(request_json)", "def _do_call(cls, method, url, params={}):\n headers = {\n 'User-Agent': 'py-retain/' + __version__,\n 'content-type': 'application/json'\n }\n try:\n r = cls.request_map[method.lower()]\n except KeyError:\n raise ValueError(\"Unknow HTTP Method\")\n response = r(\n url,\n auth=(cls.app_id, cls.api_key),\n headers=headers,\n data=json.dumps(params),\n timeout=cls.timeout)\n return response.json()", "def api_call(url, method, debug, **kwargs):\n resp = None\n attempt = 0\n maxattempts = 3\n req = Request(method.upper(), url, **kwargs)\n\n if debug:\n print(\"DEBUG: Request ({}) {}\".format(method.upper(), url))\n\n while True:\n try:\n attempt += 1\n resp = Session().send(\n Session().prepare_request(req), verify=True)\n resp.raise_for_status()\n break\n except (HTTPError, ConnectionError, Timeout) as ex:\n if attempt >= maxattempts:\n abort(ex.message)\n else:\n time.sleep(1)\n continue\n except RequestException as ex:\n abort(ex.message)\n\n if resp is not None:\n return resp\n else:\n abort(\"Error making API call to URL: \" % url)", "def call_api_endpoint(url, data):\n try:\n \n req = urllib2.Request(url, data, {'Content-Type': 'application/json'})\n \n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n return response\n\n \n except urllib2.HTTPError, e:\n if e.code == 400:\n return -1", "def http_request(endpoint, data, method='POST'):\n url = BASE_API + endpoint\n data['authkey'] = AUTH_KEY\n\n response = requests.request(method, url=url, data=data, timeout=300, verify=VERIFY)\n if response.status_code == 200:\n try:\n return response.json()\n except Exception as e:\n return_error('Response JSON decoding failed due to {}'.format(str(e)))\n\n else:\n return_error('API Returned, {}:{}'.format(response.status_code, response.reason))", "def _issue_api_request(self, resource_url, method='get', body=None,\n sensitive=False, conflict_ok=False):\n host = self.configuration.san_ip\n port = self.configuration.datera_api_port\n api_token = self.datera_api_token\n api_version = self.configuration.datera_api_version\n\n payload = json.dumps(body, ensure_ascii=False)\n payload.encode('utf-8')\n\n header = {'Content-Type': 'application/json; charset=utf-8',\n 'Datera-Driver': 'OpenStack-Cinder-{}'.format(self.VERSION)}\n\n protocol = 'http'\n if self.configuration.driver_use_ssl:\n protocol = 'https'\n\n if api_token:\n header['Auth-Token'] = api_token\n\n client_cert = self.configuration.driver_client_cert\n client_cert_key = self.configuration.driver_client_cert_key\n cert_data = None\n\n if client_cert:\n protocol = 'https'\n cert_data = (client_cert, client_cert_key)\n\n connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,\n api_version, resource_url)\n\n response = self._request(connection_string,\n method,\n payload,\n header,\n cert_data)\n\n data = response.json()\n\n if not response.ok:\n self._handle_bad_status(response,\n connection_string,\n method,\n payload,\n header,\n cert_data,\n conflict_ok=conflict_ok)\n\n return data", "def _api_call(self, api_call, method=\"GET\", payload=None):\n # type: (str, str, Dict[str, str]) -> requests.Response\n\n headers = {\n \"accept\" : \"application/json\",\n \"Authorization\" : f\"Bearer {self.access_token}\",\n \"x-ibm-client-id\" : self.client_id,\n }\n self.__log.debug(headers)\n api_url = f\"{self.base_url}/{api_call}\"\n\n self.__log.debug(f\"Calling {api_url} with method {method}\")\n if method == \"GET\":\n resp = requests.get(api_url, headers=headers)\n elif method == \"POST\":\n resp = requests.post(api_url, headers=header, data=payload)\n elif method == \"PUT\":\n resp = requests.put(api_url, headers=header, data=payload)\n elif method == \"DELETE\":\n resp = requests.delete(api_url, headers=headers)\n elif method == \"HEAD\":\n resp = requests.head(api_url, headers=headers)\n elif method == \"OPTIONS\":\n resp = requests.options(api_url, headers=headers)\n else:\n raise Exception(f\"The method {method} is unsupported\")\n \n if (resp.ok):\n return resp\n else:\n self.__log.debug(resp.status_code)\n self.__log.debug(resp.text)\n return resp", "def post(self):\n\n if config.logging:\n logfile.info(\"Request to /rest/broadsoft recieved\")\n if config.verbose:\n logconsole.info(\"Request to /rest/broadsoft recieved\")\n\n # Ensure that the user has sent a jwt to the endpoint.\n try:\n verify_jwt_in_request()\n except Exception as error:\n return make_response(\"<error>Unauthorized</error>\", 401)\n\n # Create a user object from the JWT identity object.\n user = User().from_identity(get_jwt_identity())\n\n # Check if a user was able to be created.\n if user is None:\n return \"<ErrorInfo><message>Not logged in</message><error>true</error></ErrorInfo>\", 401\n\n # Create a request parser to parse arguments\n parser = reqparse.RequestParser()\n\n # Configure endpoint arguments.\n parser.add_argument(\n name='endpoint',\n help='Missing the required broadsoft endpoint to connect to.',\n required=True)\n\n parser.add_argument(\n name='data',\n type=str,\n help='JSON data needs to be a string')\n\n parser.add_argument(\n name='method',\n help='Missing method type. ex) method:GET/PUT/POST...',\n required=True)\n\n # Check if the arguments passed were valid.\n try:\n args = parser.parse_args()\n except reqparse.exceptions.BadRequest as e:\n # If there are any errors, ensure that login=False is sent.\n message = \"<error>true</error>\"\n return message, 400\n\n # Get the data sent from the request.\n url = self.url + args['endpoint'].replace(\"<user>\", user.username)\n data = \"\"\n method = args['method']\n\n # Check if any data was sent\n if(args['data']):\n data = args['data']\n\n if config.logging:\n logfile.info(\"Sending data: \" + method + \" \" + url + \" \" + data)\n if config.verbose:\n logconsole.info(\"Sending data: \" + method + \" \" + url + \" \" + data)\n\n # Get the user's broadsoft token from the JWT and send a request to broadsoft.\n response = Proxy().to_broadsoft(method, url, data, user)\n\n # Check if a valid response was returned.\n if response.status_code == 200 or response.status_code == 201:\n\n # Output a response to the console and log files.\n if config.logging:\n logfile.info(\"Recieved:\" + str(response.status_code) + \" \" + str(response.content) if response.content else \"Recieved: \" + str(response.status_code))\n if config.verbose:\n logconsole.info(\"Recieved:\" + str(response.status_code) + \" \" + str(response.content) if response.content else \"Recieved: \" + str(response.status_code))\n\n # Format a response\n if response.content:\n return make_response(str(response.content.decode('ISO-8859-1')), 200)\n else:\n return make_response(\"\", 200)\n else:\n if config.logging:\n logfile.info(\"Recieved:\" + str(response.status_code) + \" \" + response.content.decode('ISO-8859-1') if response.content else \"\")\n if config.verbose:\n logconsole.info(\"Recieved:\" + str(response.status_code) + \" \" + response.content.decode('ISO-8859-1') if response.content else \"\")\n\n if response.content:\n return make_response(response.content.decode('ISO-8859-1'), response.status_code)\n else:\n return make_response(\"\", response.status_code)", "def get():\n return jsonify(baby='knight2'), 200", "def make_get_request(client, endpoint):\n return client.get(endpoint)", "def api_req(dev, api_call):\r\n import xmltodict\r\n import logging\r\n try:\r\n r = requests.get(dev + ':8060' + api_call, timeout=5)\r\n except Exception as exc:\r\n response = [\"ERR\", exc]\r\n return response[0]\r\n except ConnectionError as connerr:\r\n response = [\"ERR\", connerr]\r\n return response[0]\r\n except TimeoutError as toerr:\r\n response = [\"ERR\", toerr]\r\n return response[0], toerr\r\n r_code = r.status_code\r\n if r_code == 200:\r\n print(\"REQUEST WAS A SUCCESS. DEVICE RETURNED: {} \".format(str(r)))\r\n r2 = r.text\r\n response = xmltodict.parse(r2, xml_attribs=False)\r\n return response\r\n else:\r\n response = \"UnknownERR\"\r\n dev.state(DISABLED)\r\n return msg_box(response)", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, path)\n headers = {\n 'x-rapidapi-host': \"us-restaurant-menus.p.rapidapi.com\",\n 'x-rapidapi-key': api_key,\n }\n\n print(u'Querying {0} ...'.format(url))\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n data = json.loads(response.text)\n if 'error' in data:\n raise Exception(str(data))\n return data", "def cci_api():\n\n\n\t\t\treturn render_template( 'api.html' )", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix=\"/\")", "def test_GET(self):\n if not self.url:\n return\n response = self.client.get(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])", "def _get_good_request(self):\r\n prms = {\r\n 'url': u'http://google.com',\r\n 'description': u'This is my google desc',\r\n 'extended': u'And some extended notes about it in full form',\r\n 'tags': u'python search',\r\n }\r\n\r\n req_params = urllib.urlencode(prms)\r\n res = self.app.post(\r\n '/api/v1/admin/bmark?api_key={0}'.format(self.api_key),\r\n params=req_params,\r\n )\r\n return res", "def request(self, apiurl, caller, mode=\"get\", data=None):\n\n token = self._getToken(caller)\n # print(self._tokenstore)\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': f'Bearer {token}'}\n if mode == \"get\":\n return self._oauth.getRequester().get(self._baseurl + apiurl, headers=headers)\n elif mode == \"post\":\n return self._oauth.getRequester().post(self._baseurl + apiurl, data=data, headers=headers)\n elif mode == \"delete\":\n return self._oauth.getRequester().delete(self._baseurl + apiurl, headers=headers)", "def api_get(self, name):\n try:\n r = self._get(['apis', name])\n except requests.HTTPError:\n return None\n else:\n return r" ]
[ "0.6853146", "0.6268339", "0.6268339", "0.62518954", "0.62271094", "0.605959", "0.60268193", "0.60259753", "0.59603226", "0.59556425", "0.59494823", "0.5872682", "0.5860177", "0.58262163", "0.57653207", "0.5758254", "0.57468194", "0.57456577", "0.5684184", "0.567575", "0.56635946", "0.5656675", "0.5651085", "0.5647359", "0.56392694", "0.56391686", "0.56391686", "0.5632862", "0.562787", "0.5627653", "0.5625221", "0.56197697", "0.56016237", "0.5599405", "0.5590814", "0.5586176", "0.55841845", "0.55772984", "0.55477506", "0.5545348", "0.55429727", "0.5537563", "0.5522704", "0.55185765", "0.5509818", "0.55092716", "0.55069166", "0.5506516", "0.5498339", "0.5483691", "0.54830843", "0.5474672", "0.5473069", "0.54709125", "0.5470688", "0.54578346", "0.54551625", "0.5443743", "0.54424244", "0.5430492", "0.5427966", "0.5427574", "0.54144806", "0.5408867", "0.5392876", "0.5388418", "0.5387187", "0.5385353", "0.5382121", "0.5382091", "0.53740984", "0.5364051", "0.5361647", "0.5360928", "0.5360908", "0.53561306", "0.5349043", "0.5347934", "0.5346842", "0.5344333", "0.5343733", "0.5342094", "0.53402144", "0.5338128", "0.53373086", "0.53369176", "0.53287756", "0.532161", "0.5318225", "0.531762", "0.53121775", "0.53059155", "0.53044295", "0.5299869", "0.52979577", "0.52901024", "0.52894956", "0.5284227", "0.52823293", "0.5281704" ]
0.55546147
38
Get list of Domains for this API key.
def get_domains() -> List[str]: ret = _call_endpoint("v1/domains") # Example response: # [{'createdAt': '2016-06-25T03:08:44.000Z', # 'domain': 'mydomain.com', # 'domainId': 12345678, # 'expirationProtected': False, # 'expires': '2020-06-25T03:08:44.000Z', # 'holdRegistrar': False, # 'locked': True, # 'nameServers': None, # 'privacy': False, # 'renewAuto': True, # 'renewDeadline': '2020-08-09T03:08:44.000Z', # 'renewable': True, # 'status': 'ACTIVE', # 'transferProtected': False},] domains = [d["domain"] for d in ret] return domains
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains", "def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply", "def domains(self):\n return DomainCollection(self.request)", "def list_domain_names(self) -> Dict:\n pass", "def tracking_domain_list(self):\r\n params = base.get_params(None, locals())\r\n return self._get('tracking_domain_list', params)", "def ListDomains(self, perPage=0, page=1):\n\n class Result(Model):\n domains = ListField(ModelField(Domain))\n\n if perPage != 0:\n headers = {\"perPage\": perPage, \"page\": page}\n response = self.client.http_get(\"/v4/domains\", headers)\n else:\n response = self.client.http_get(\"/v4/domains\")\n\n return parse_response(response, Result)", "def getDomains(self, company):\n return self.db.getDomains(company)", "def list_domain(self, feed_id=None):\n resources = self.list_resource(feed_id=feed_id, resource_type_id='Host Controller')\n domains = []\n if resources:\n for resource in resources:\n resource_data = self.get_config_data(\n feed_id=resource.path.feed_id, resource_id=resource.id)\n domain_data = resource_data.value\n domains.append(Domain(resource.id, resource.name, resource.path, domain_data))\n return domains", "def list_domain(self, feed_id=None):\n domains = self.list_resource(feed_id=feed_id,\n resource_type_id='Domain Host',\n cls=Domain,\n list_children=True,\n include_data=True)\n return domains", "def domains(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"domains\")", "def list_keystone_v3_domains(self):\n LOG_OBJ.debug(\"List the domains.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating domain\")\n print (\"No response from Server while creating domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Listing domains Failed with status %s \"\n \"and error : %s\" % response.status, response.data)\n print (\" Listing domains Failed with status %s and error : %s\" %\n response.status, response.data)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domains list : %s \" % output)\n print (\"Domains list : %s \" % output)\n return output['domains']", "def domains(cls):\n return [cls.domain]", "def list_zones(self, **kwargs):\r\n return self.client['Account'].getDomains(**kwargs)", "def domains(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"domains\")", "def domain_list_all(self):\n page = 1\n on_page = 100\n ret = []\n while True:\n r = self.domain_list(page=page, on_page=on_page)\n ret += r['domains']\n if len(ret) >= r['total']:\n break\n page += 1\n return ret", "def domain(self):\n return self.keys()", "def get_search_domains(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetSearchDomains', self.handle))", "def domains(self) -> pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainArgs']]]:\n return pulumi.get(self, \"domains\")", "def list_domain_names():\n pass", "def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )", "def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur", "def get_subdomains(self):\n\n response = self.call(method='getSubdomains', args=[self.domainname])\n subdomains = []\n for s in response:\n subdomain = self.subdomain(domain=self.domainname, subdomain=s)\n subdomains.append(subdomain)\n return subdomains", "def list_all():\n\n return (_conn.listDefinedDomains() +\n [_conn.lookupByID(id).name() for id in _conn.listDomainsID()])", "def get(self, api_key):\n\n try:\n mailgun.list_domains(api_key)\n return {\"api_key\": api_key, \"valid\": True}\n except:\n return {\"api_key\": api_key, \"valid\": False}", "def get_search_domains(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetSearchDomains', self.handle))", "def AllowedDomains(self)->list:\n return self._allowedDomains", "def _get_domain(self):\n self.ensure_one()\n domain = []\n return domain", "def domains(cls):\n return (cls.domain, )", "def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"domains\")", "def get_dns_list(self):\n return self.get_ipv4_dns_list()", "def get_search_domains(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevNet_GetSearchDomains', self.handle))", "def get_dns_records_from_godaddy(self) -> list:\n\n headers = {\"Authorization\": \"sso-key {}:{}\".format(self.api_key, self.secret_key)}\n dns_records = []\n for dns_record in self.dns_records:\n url = \"https://api.godaddy.com/v1/domains/{}/records/{}/{}\".format(dns_record[\"domain\"],\n dns_record[\"dns_record_type\"],\n dns_record[\"name\"])\n dns_records.append(get(url, headers=headers).text)\n return dns_records", "def get_search_domains(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetSearchDomains', self.handle))", "def check_domains(self, service_id, service_version):\n domain_list = self.fastly_cache[service_id]['domain_list']\n\n return domain_list", "def extract_domains(self, resp):\n return", "def get_delta_domains():\n url = os.getenv('DELTAS_URL')\n if url is None:\n raise Exception('Delta report URL configuration not set!')\n\n json = requests.get(url, timeout=10).json()\n return [domain\n for (domain,)\n in json['values']\n if dnstwist.is_valid_domain(domain)]", "def case_search_enabled_domains():\n return CaseSearchConfig.objects.filter(enabled=True).values_list('domain', flat=True)", "def list_zones(self):\n data = self._paginated_request(\"/v2/domains\", \"domains\")\n return list(map(self._to_zone, data))", "def get_domain(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/domain/{id}\")", "def get_domains(filename):\n with open(filename, 'r') as file:\n result = []\n for line in file.readlines():\n domain = line.strip()[1:]\n result.append(domain)\n return result", "def get_dns(self):\n dns = []\n for id, user in self.users_by_id.items():\n if not user.dns:\n continue\n for dn in user.dns:\n dns.append(dn)\n return dns", "def domainlist_reversewhois(self, response):\n data = response.json()\n for domain in data['response']['domains']:\n yield(domain.lower())", "def domains(self):\n ctx = stack.top\n if ctx is not None:\n if not hasattr(ctx, 'simple_domains'):\n ctx.simple_domains = {}\n for domain in self.app.config['SIMPLE_DOMAINS']:\n ctx.simple_domains[domain] = Domain(\n connection = self.connection,\n name = domain,\n )\n\n if not hasattr(ctx, 'simple_domain_%s' % domain):\n setattr(ctx, 'simple_domain_%s' % domain, ctx.simple_domains[domain])\n\n return ctx.simple_domains", "def getlist(self):\n self.__domainlist.sort()\n\n outstr = \"{ \"\n for index, domain in enumerate(self.__domainlist):\n outstr += domain + \" \"\n if (index % 50 == 0) and index > 0:\n outstr += \"}\\n{ \"\n\n outstr += \"}\"\n\n return outstr", "def get_domains_by_provider_url(self, provider_url):\n\n domains = \\\n self.storage_controller.get_domains_by_provider_url(provider_url)\n\n return domains", "def info(self):\n\n return self.call(method='getDomain', args=[self.domainname])", "def get_urls(self, queries):\n domains = defaultdict(list)\n for q in queries:\n q = \"\\\"\" + q + \"\\\"\"\n results = self.engine.search(q)\n\n for result in results: \n url = result.url\n domain = self.get_domain(url)\n domains[domain].append(q) \n return domains", "def get_domain_names(self, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def get_domain_dns_records(domain):\n url_suffix = \"v1/domains/{}/records\".format(domain)\n ret = _call_endpoint(url_suffix)\n if isinstance(ret, dict) and ret.get('code', None) == \"UNKNOWN_DOMAIN\":\n # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'}\n raise Exception(f\"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}\")\n return ret", "def domainlist_reversens(self, response):\n data = response.json()\n for domain in itertools.chain(data['response']['primary_domains'], data['response']['primary_domains']):\n yield(domain.lower())", "def _list(self, account, page):\n response = self.client.get(self.get_url(account), data={\"page\": page})\n return [\n DomainResource(**item) for item in response['data']\n ], response['pagination']", "def domain(self):\n return self._get('domain', '/domain/', self.DOMAIN_DATA)", "def show_domains(self):\n show_domains(self.system.cavity_gri)", "def items_by_domain(self, domain: str) -> List[dict]:\n if not self.connected:\n raise NotConnected(\"Please call connect first.\")\n return [value for key, value in self._states.items() if key.startswith(domain)]", "def cookieDomains(self):\n domains = []\n for cookie in self.cookies():\n domain = cookie.domain()\n if domain not in domains:\n domains.append(domain)\n \n return domains", "def get_storage_domains(cohesity_client):\n storage_domain_list = cohesity_client.view_boxes.get_view_boxes()\n for domain in storage_domain_list:\n exported_res_dict[\"Storage Domains\"].append(domain.name)\n return storage_domain_list", "def allowed_domains(self):\n if self._allowed_domains is None:\n uri = \"/loadbalancers/alloweddomains\"\n resp, body = self.method_get(uri)\n dom_list = body[\"allowedDomains\"]\n self._allowed_domains = [itm[\"allowedDomain\"][\"name\"]\n for itm in dom_list]\n return self._allowed_domains", "def allowed_domains(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"allowed_domains\")", "def public_domain_names(self) -> pulumi.Output[Optional[Sequence['outputs.ContainerPublicDomainName']]]:\n return pulumi.get(self, \"public_domain_names\")", "def get_botnet_domains():\n\n fw = \"<HTTPS://YOUR_FORTIGATE_IP:YOUR_FORTIGATE_PORT>\"\n\n path = \"/api/v2/monitor/system/botnet-domains/hits/?access_token=\"\n\n token = \"<YOUR_API_KEY>\"\n\n content_filter = \"\"\n\n if content_filter != \"\":\n url = fw + path + token + content_filter\n else:\n url = fw + path + token\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n \n try:\n r = requests.get(url, verify=False).json()\n except Exception:\n print(\"Something went wrong. Is the url correct? Exiting...\")\n sys.exit()\n\n for key in r['results']:\n print()\n for k,v in key.items():\n print(\"{0:6} : {1}\".format(k.upper(), str(v)))", "def get_domains(filters: Optional[Sequence[pulumi.InputType['GetDomainsFilterArgs']]] = None,\n sorts: Optional[Sequence[pulumi.InputType['GetDomainsSortArgs']]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainsResult:\n __args__ = dict()\n __args__['filters'] = filters\n __args__['sorts'] = sorts\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('digitalocean:index/getDomains:getDomains', __args__, opts=opts, typ=GetDomainsResult).value\n\n return AwaitableGetDomainsResult(\n domains=pulumi.get(__ret__, 'domains'),\n filters=pulumi.get(__ret__, 'filters'),\n id=pulumi.get(__ret__, 'id'),\n sorts=pulumi.get(__ret__, 'sorts'))", "def parse(domains):\n subdomains = []\n for domain in domains:\n url = 'https://urlscan.io/api/v1/search/?q=domain:{}'.format(domain)\n json_resp = json.loads(requests.get(url).text)\n subdomains += list(set(find('domain', json_resp)))\n return list(set(subdomains))", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetDnsServers', self.handle))", "def load_private_domains(self):\n url = self.lookup(\"private_domains_url\")\n p_domains = self._fetcher.get_entities(url)\n domains = []\n for domain in p_domains:\n if 'name' in domain:\n domains.append({'name': domain['name']})\n self._domains_private = domains", "def getIpv4DnsList(self):\n with self.status._dhcp_status_mutex:\n if self.status.ipv4_lease_valid is None:\n return [None]\n else:\n return self.status.ipv4_dnslist", "def get_all_courses(self) -> List[str]:\n\n print(\"Downloading all Courses from all Domains...\")\n all_courses_url = []\n for domain in DOMAINS:\n print(\"Selected Domain: \", domain)\n selected_domain_url = ROOT_URL + \"/\" + domain\n courses, courses_url = self.get_courses(selected_domain_url)\n all_courses_url += courses_url\n\n return all_courses_url", "def get_ipv4_dns_list(self):\n \n ipv4_dns_list = self._dhcp_client_ctrl.getIpv4DnsList()\n if ipv4_dns_list is None:\n return [None]\n else:\n return map(unicode, ipv4_dns_list)", "def get_domain_whitelist(self):\n whitelist = self.charm_config[\"federation-domain-whitelist\"]\n return list(filter(None, whitelist.split(\",\")))", "def domains(cls) -> Set[str]:\n return set(cls.langs.values())", "def listRR(self):\n reply = self.rpc.getSubdomains(self.username,\n self.password,\n self.domain)\n\n if len(reply) and reply[0] in ('UNKNOWN_ERROR',\n 'RATE_LIMITED'):\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply", "def get_domains(self, ID, domain_type):\n \n try:\n record = self.database[ID]\n except KeyError:\n return -1\n domain_type = domain_type.lower()\n if domain_type == 'pfam':\n doms = record[\"pfam_domains\"]\n elif domain_type == 'uniprot':\n doms = record[\"uniprot_domains\"]\n else:\n print \"%s is an unrecognized domain type. Use 'pfam' or 'uniprot'\"%(domain_type)\n return -2\n \n doms_raw=doms.split(\";\")\n doms_clean =[]\n for i in doms_raw:\n if i:\n tmp = i.strip()\n tmp = tmp.split(\":\")\n if len(tmp)>=2:\n name, sites = tmp\n tmp = sites.split(\"-\")\n doms_clean.append((name, tmp[0], tmp[1]))\n else:\n print \"ERROR: the domain did not match expected %s\"%(i)\n #doms_clean.append((tmp, -1, -1))\n return doms_clean", "def __parse_domains(self, lines):\n domain_list = []\n for line in lines:\n if len(line) == 0:\n continue\n if line[0] == \"!\":\n continue\n if line[0] == \"|\":\n continue\n if line[0] == \"@\":\n continue\n if line[0] == \"[\":\n continue\n if line.find('zh.wikipedia.org') == 0:\n continue\n line = string.replace(line, \"||\", \"\").lstrip(\".\")\n # strip everything from \"/\" to the end\n if line.find(\"/\") != -1:\n line = line[0:line.find(\"/\")]\n if line.find(\"*\") != -1:\n continue\n if line.find(\".\") == -1:\n continue\n # if line in oklist:\n # \tcontinue\n domain_list.append(line)\n\n return domain_list", "def disable_all_domains_always_in_scope(self, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/disableAllDomainsAlwaysInScope/', {'apikey': apikey})))", "def enable_all_domains_always_in_scope(self, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/enableAllDomainsAlwaysInScope/', {'apikey': apikey})))", "def domainlist_reverseip(self, response):\n data = response.json()\n for ip in data['response']['ip_addresses']:\n for domain in ip['domain_names']:\n yield(domain.lower())", "def domain(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"domain\")", "def test_client_get_domains(mocker, client_all_domains_input):\n mocker.patch(\"tracker_client.client.get_auth_token\")\n mocker.patch(\"tracker_client.client.create_client\")\n test_client = Client()\n test_client.execute_query = mocker.MagicMock(return_value=client_all_domains_input)\n\n domain_list = test_client.get_domains()\n\n test_client.execute_query.assert_called_once_with(\n queries.GET_ALL_DOMAINS, {\"after\": \"abc\", \"search\": \"\"}\n )\n assert domain_list[0].domain_name == \"foo.bar\"\n assert domain_list[1].dmarc_phase == \"not implemented\"\n assert domain_list[2].last_ran == \"2021-01-27 23:24:26.911236\"\n assert domain_list[0].dkim_selectors == []", "def fetch_domain_certs(domain):\n url = BASE_URL.format(domain)\n result = requests.get(url)\n if result.status_code != 200:\n result.raise_for_status()\n return result.json()", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetDnsServers', self.handle))", "def domain(self, id_or_name):\n return DomainCollection(self.request).find(id_or_name)", "def domain(self):\n return self['domain']", "def list_zones(self):\n action = \"/api_dns_list.asp\"\n if self.reseller_id is not None:\n action = \"/api_dns_list_reseller.asp\"\n zones = self.connection.request(action)\n if len(zones.body) == 0:\n return []\n else:\n return self._to_zones(zones.body)", "def dict_of_domains(fc):\r\n # need to find root database (GDB or SDE)\r\n db_root = os.path.dirname(fc)\r\n while db_root[-4:].lower() != '.gdb' and db_root[-4:].lower() != '.sde':\r\n old_db_root = db_root # protect against infinite loop\r\n db_root = os.path.dirname(db_root)\r\n if old_db_root == db_root: # protect against infinite loop\r\n break\r\n arcpy.AddMessage(\"Retrieving Domains from \" + str(db_root))\r\n return {domain.name: domain.codedValues for domain in arcpy.da.ListDomains(db_root)}", "def list_availability_domains(self, compartment_id, **kwargs):\n resource_path = \"/availabilityDomains\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_availability_domains got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[AvailabilityDomain]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[AvailabilityDomain]\")", "def get_dns(self) -> Set:\n if self.dn_set.should_update():\n contacts_data = self.get_contacts_data()\n self.dn_set.update(set(contacts_data.get_dns()))\n return self.dn_set.data", "def public_domain_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerPublicDomainNameArgs']]]]:\n return pulumi.get(self, \"public_domain_names\")", "def getIPs(self, domain = \"localhost\"):\n # convert 'domain' to string, in case of erroneous type being passed\n domain = str(domain)\n\n # Kind warning for those who entered an IP address instead of a domain\n try: \n inet_aton(domain)\n print(\"Warning: an IP address was given instead of a domain name.\")\n except:\n pass\n\n # Try to query DNS records to populate A-Record IP list\n # Prints errors and returns None if exceptions found\n try:\n iplist = gethost(domain)[2]\n except gaierror as ge:\n if ge.errno == -2:\n print(\"Error: Domain '{}' invalid, or unknown. \"\\\n \"Please check proper spelling and format.\\n\"\\\n \"(e.g.: python dns_get_A_record_IPs.py google.com )\".format(domain))\n elif ge.errno == -3:\n print(\"Error: Domain '{}' unreachable. Please check your connection.\".format(domain))\n return None\n except timeout:\n print(\"Error: Connection to {} timed out.\".format(domain))\n return None\n\n return iplist", "def domain(self) -> str:\n return pulumi.get(self, \"domain\")", "def domain(self) -> str:\n return pulumi.get(self, \"domain\")", "def print_all_dns_records():\n for domain in sorted(get_domains()):\n dns_records = get_domain_dns_records(domain)\n print(domain)\n pprint(dns_records)\n print(\"*\" * 50)\n # TODO: poor man's rate limiter. improve?\n time.sleep(2)", "def get_layer_urls(self):\n urls = []\n\n if getattr(self, 'additional_domains'):\n map(urls.append, (domain for domain in self.additional_domains.split(\";\") if domain))\n\n return urls", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevNet_GetDnsServers', self.handle))", "def get_input_domains():\n df = pandas.read_excel(\"AutoScrapy/files/EBE21 - Top 100 Onlineshops to scrapp.ods\", engine=\"odf\")\n list_of_addresses = df['Domain'].to_list()\n list_of_addresses = [(\"http://\" + address) for address in list_of_addresses]\n print(list_of_addresses)\n return list_of_addresses", "def get(self):\r\n url = '{0}/subdomains/{1}'.format(self.parent.get_url(),\r\n port.to_u(self.object_id))\r\n return http.Request('GET', url), parsers.parse_json", "def fusion_api_get_login_domains_login_certificates(self, api=None, headers=None, param=''):\n return self.login_certificates.get(api, headers, param)", "def option_domains_always_in_scope(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionDomainsAlwaysInScope/')))", "def domains_v2():\n # Is this public?\n configs = get_configs()\n if configs['api_requests'] == 'auth':\n # Auth token in headers\n try:\n auth_token = Token.query.filter_by(auth_token=request.headers.get('Authorization')).first()\n except:\n return {\"alternatives\" : \"Database Error with token!\"}\n if not auth_token:\n return {\"alternatives\": \"Unauthorized!\"}\n\n req_data = request.get_json()\n url = req_data['url']\n if not url:\n return {\"alternatives\" : 'None'}\n \n domain_data = check(url)\n alternatives = {\"alternatives\": domain_data['available_alternatives']}\n return alternatives", "def by_domains(self):\n\t\t\n\t\t# TODO: use urllib instead\n\t\turl_format = r'^\\s*(?:(?P<protocol>\\w+)://)?(?P<domain>[\\w\\d\\-\\.]+)(?::(?P<port>\\d+))?/?(?P<everything_else>.*)$'\n\t\tsites = {}\n\t\tfor line in self.source.lines:\n\t\t\ttry:\n\t\t\t\tif self.filter(line):\n\t\t\t\t\tresult = re.match(url_format, line.content.url)\n\t\t\t\t\tif result.group('domain') not in sites.keys():\n\t\t\t\t\t\tsites[result.group('domain')] = 0\n\t\t\t\t\tsites[result.group('domain')] += int(line.content.size)\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\n\t\t# TODO: sort; convert to lists is even better\n\t\t\n\t\treturn sites", "def list_websites(self):\r\n\r\n # Fetch websites\r\n self.fetch_website_list()\r\n\r\n # Print website data\r\n for website in self.website_list:\r\n print(\"ID: {0} | Domain: {1} | Name: {2}\".format(\r\n website['id'], website['domain'], website['name']))", "def get_domains_from_cert(cert_file):\n proc = subprocess.Popen([\"openssl\", \"x509\", \"-in\", cert_file, \"-noout\", \"-text\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n if proc.returncode != 0:\n raise IOError(\"Error loading {0}: {1}\".format(cert_file, err))\n return ACMEClient._parse_domains_from_openssl_output(out.decode('utf8'))" ]
[ "0.82744664", "0.76725835", "0.7275466", "0.72130734", "0.7188468", "0.7123102", "0.7106527", "0.71019924", "0.7034431", "0.70254606", "0.7023339", "0.69961786", "0.6931258", "0.69296205", "0.6853687", "0.6843187", "0.6822089", "0.6772705", "0.6753809", "0.6742991", "0.6693973", "0.6681079", "0.66689324", "0.66550237", "0.66511244", "0.6635103", "0.6612865", "0.658361", "0.6566474", "0.65224487", "0.6464172", "0.64491", "0.6447973", "0.64409953", "0.63741535", "0.6354096", "0.6333439", "0.62408364", "0.62126094", "0.60852724", "0.6071372", "0.6067067", "0.60492945", "0.6028298", "0.5996481", "0.5976128", "0.5975336", "0.5962553", "0.5946239", "0.591592", "0.5915199", "0.5886879", "0.5875029", "0.58622384", "0.585843", "0.58525467", "0.582552", "0.58240294", "0.58129066", "0.5797633", "0.5789385", "0.5778386", "0.5741338", "0.57096255", "0.5698289", "0.56958705", "0.5689746", "0.56873685", "0.56569403", "0.5650599", "0.5649958", "0.56422234", "0.5630903", "0.5630386", "0.5624595", "0.5622027", "0.5592967", "0.5583474", "0.5580943", "0.5561862", "0.55403227", "0.5525581", "0.5509804", "0.5509188", "0.5505259", "0.55012614", "0.5500372", "0.5471805", "0.5471805", "0.54666525", "0.54577273", "0.54464424", "0.5421184", "0.5420839", "0.54205704", "0.54135036", "0.5413111", "0.5404056", "0.54006815", "0.5400229" ]
0.78274804
1